language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pydantic__pydantic | pydantic/v1/config.py | {
"start": 915,
"end": 2479
} | class ____(str, Enum):
allow = 'allow'
ignore = 'ignore'
forbid = 'forbid'
# https://github.com/cython/cython/issues/4003
# Fixed in Cython 3 and Pydantic v1 won't support Cython 3.
# Pydantic v2 doesn't depend on Cython at all.
if not compiled:
from typing_extensions import TypedDict
class ConfigDict(TypedDict, total=False):
title: Optional[str]
anystr_lower: bool
anystr_strip_whitespace: bool
min_anystr_length: int
max_anystr_length: Optional[int]
validate_all: bool
extra: Extra
allow_mutation: bool
frozen: bool
allow_population_by_field_name: bool
use_enum_values: bool
fields: Dict[str, Union[str, Dict[str, str]]]
validate_assignment: bool
error_msg_templates: Dict[str, str]
arbitrary_types_allowed: bool
orm_mode: bool
getter_dict: Type[GetterDict]
alias_generator: Optional[Callable[[str], str]]
keep_untouched: Tuple[type, ...]
schema_extra: Union[Dict[str, object], 'SchemaExtraCallable']
json_loads: Callable[[str], object]
json_dumps: AnyArgTCallable[str]
json_encoders: Dict[Type[object], AnyCallable]
underscore_attrs_are_private: bool
allow_inf_nan: bool
copy_on_model_validation: Literal['none', 'deep', 'shallow']
# whether dataclass `__post_init__` should be run after validation
post_init_call: Literal['before_validation', 'after_validation']
else:
ConfigDict = dict # type: ignore
| Extra |
python | pytorch__pytorch | test/distributed/tensor/parallel/test_tp_style.py | {
"start": 900,
"end": 17489
} | class ____(DTensorTestBase):
@property
def world_size(self):
return NUM_DEVICES
@with_comms
def test_colwise_parallel_style(self):
mesh = init_device_mesh(self.device_type, (self.world_size,))
comm_mode = CommDebugMode()
tensor = torch.rand(8, 16, device=self.device_type, requires_grad=True)
model = nn.Linear(16, 16, device=self.device_type)
default_col_parallel = ColwiseParallel()
colwise_mod = parallelize_module(deepcopy(model), mesh, default_col_parallel)
with comm_mode:
out = colwise_mod(tensor)
# ensure output shard on the last dim
self.assertEqual(out.shape, (8, 16 // self.world_size))
# ensure no communication happened in fwd
self.assertEqual(comm_mode.get_total_counts(), 0)
out.sum().backward()
# allreduce in bwd
self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.all_reduce], 1)
self.assertEqual(comm_mode.get_total_counts(), 1)
sharded_col_parallel = ColwiseParallel(input_layouts=Shard(0))
colwise_mod = parallelize_module(deepcopy(model), mesh, sharded_col_parallel)
with comm_mode:
out = colwise_mod(tensor)
# ensure output shard on the last dim
self.assertEqual(out.shape, (8 * self.world_size, 16 // self.world_size))
# allgather in fwd
self.assertEqual(
comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1
)
self.assertEqual(comm_mode.get_total_counts(), 1)
out.sum().backward()
# reduce_scatter in bwd
self.assertEqual(
comm_mode.get_comm_counts()[c10d_functional.reduce_scatter_tensor], 1
)
self.assertEqual(comm_mode.get_total_counts(), 2)
@with_comms
def test_colwise_parallel_embedding(self):
mesh = init_device_mesh(self.device_type, (self.world_size,))
comm_mode = CommDebugMode()
tensor = torch.arange(8, device=self.device_type).reshape(4, 2)
model = nn.Embedding(16, 16, device=self.device_type)
default_col_parallel = ColwiseParallel()
colwise_mod = parallelize_module(deepcopy(model), mesh, default_col_parallel)
with comm_mode:
out = colwise_mod(tensor)
# ensure output shard on the last dim
self.assertEqual(out.shape, (4, 2, 16 // self.world_size))
# ensure no communication happened in fwd
self.assertEqual(comm_mode.get_total_counts(), 0)
out.sum().backward()
# no comm in bwd
self.assertEqual(comm_mode.get_total_counts(), 0)
@with_comms
def test_rowwise_parallel_style(self):
mesh = init_device_mesh(self.device_type, (self.world_size,))
comm_mode = CommDebugMode()
tensor = torch.rand(
8, 16 // self.world_size, device=self.device_type, requires_grad=True
)
model = nn.Linear(16, 16, device=self.device_type)
default_row_parallel = RowwiseParallel()
rowwise_mod = parallelize_module(deepcopy(model), mesh, default_row_parallel)
with comm_mode:
out = rowwise_mod(tensor)
# ensure output replicated
self.assertEqual(out.shape, (8, 16))
# allreduce in fwd
self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.all_reduce], 1)
self.assertEqual(comm_mode.get_total_counts(), 1)
out.sum().backward()
# no op in bwd
self.assertEqual(comm_mode.get_total_counts(), 1)
sharded_row_parallel = RowwiseParallel(output_layouts=Shard(0))
rowwise_mod = parallelize_module(deepcopy(model), mesh, sharded_row_parallel)
with comm_mode:
out = rowwise_mod(tensor)
# ensure output replicated
self.assertEqual(out.shape, (8 // self.world_size, 16))
# reduce_scatter in fwd
self.assertEqual(
comm_mode.get_comm_counts()[c10d_functional.reduce_scatter_tensor], 1
)
self.assertEqual(comm_mode.get_total_counts(), 1)
out.sum().backward()
# allgather in bwd
self.assertEqual(
comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1
)
self.assertEqual(comm_mode.get_total_counts(), 2)
@with_comms
def test_rowwise_parallel_embedding(self):
mesh = init_device_mesh(self.device_type, (self.world_size,))
comm_mode = CommDebugMode()
tensor = torch.arange(8, device=self.device_type).reshape(4, 2)
model = nn.Embedding(16, 16, device=self.device_type)
rowwise_mod = parallelize_module(
deepcopy(model), mesh, RowwiseParallel(input_layouts=Replicate())
)
with comm_mode:
out = rowwise_mod(tensor)
# ensure output shard on the last dim
self.assertEqual(out.shape, (4, 2, 16))
# ensure allreduce communication happened in fwd
self.assertEqual(comm_mode.get_total_counts(), 1)
self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.all_reduce], 1)
out.sum().backward()
# no comm in bwd
self.assertEqual(comm_mode.get_total_counts(), 1)
sharded_row_parallel = RowwiseParallel(
input_layouts=Replicate(), output_layouts=Shard(1)
)
rowwise_mod = parallelize_module(deepcopy(model), mesh, sharded_row_parallel)
inp_indices = torch.arange(8, device=self.device_type)
with comm_mode:
out = rowwise_mod(inp_indices)
# ensure output shard on the last dim
self.assertEqual(out.shape, (8, 16 // self.world_size))
# reduce scatter in fwd
self.assertEqual(comm_mode.get_total_counts(), 1)
self.assertEqual(
comm_mode.get_comm_counts()[c10d_functional.reduce_scatter_tensor], 1
)
out.sum().backward()
# allgather comm in bwd
self.assertEqual(comm_mode.get_total_counts(), 2)
self.assertEqual(
comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1
)
@with_comms
def test_prepare_module_input(self):
mesh = init_device_mesh(self.device_type, (self.world_size,))
tensor = torch.ones(2, 16, device=self.device_type)
expected_tensor = torch.ones(2 * self.world_size, 16, device=self.device_type)
prepare_inp_style = PrepareModuleInput(
input_layouts=Shard(0), desired_input_layouts=Replicate()
)
model = nn.Identity()
allgather_mod = parallelize_module(model, mesh, prepare_inp_style)
output = allgather_mod(tensor).full_tensor()
self.assertEqual(output, expected_tensor)
@with_comms
def test_prepare_module_input_multiple_inputs(self):
mesh = init_device_mesh(self.device_type, (self.world_size,))
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(8, 8)
def forward(self, x, y):
return self.linear(x) + y
# Raise assertion error if input_layouts and desired_input_layouts do not have same length.
test_mod = TestModule().to(self.device_type)
with self.assertRaisesRegex(
AssertionError,
"input_layouts and desired_input_layouts should have same length!",
):
PrepareModuleInput(
input_layouts=Shard(0), desired_input_layouts=(Replicate(), None)
)
# Raise assertion error if module inputs and input_layouts do not have same length.
prepare_inps_short_dimension = PrepareModuleInput(
input_layouts=Shard(0), desired_input_layouts=Replicate()
)
parallelize_module(test_mod.linear, mesh, ColwiseParallel())
parallelize_module(test_mod, mesh, prepare_inps_short_dimension)
with self.assertRaisesRegex(
ValueError, "module inputs and input_layouts should have same length!"
):
output = test_mod(
torch.randn(2, 8, device=self.device_type),
torch.ones(
self.world_size * 2, 8 // self.world_size, device=self.device_type
),
)
test_mod = TestModule().to(self.device_type)
prepare_inps = PrepareModuleInput(
input_layouts=(Shard(0), None), desired_input_layouts=(Replicate(), None)
)
parallelize_module(test_mod.linear, mesh, ColwiseParallel())
parallelize_module(test_mod, mesh, prepare_inps)
output = test_mod(
torch.randn(2, 8, device=self.device_type),
torch.ones(
self.world_size * 2, 8 // self.world_size, device=self.device_type
),
)
self.assertEqual(output.shape, (self.world_size * 2, 8 // self.world_size))
@with_comms
def test_prepare_module_kwargs_input(self):
mesh = init_device_mesh(self.device_type, (self.world_size,))
class TestKwargModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(8, 8)
def forward(self, x, *, y, z=2):
return self.linear(x) + y + z
test_mod = TestKwargModule().to(self.device_type)
prepare_inps_simple = PrepareModuleInput(
input_kwarg_layouts={"y": Shard(0)},
desired_input_kwarg_layouts={"y": Replicate()},
)
parallelize_module(
test_mod.linear, mesh, ColwiseParallel(use_local_output=False)
)
parallelize_module(test_mod, mesh, prepare_inps_simple)
comm_mode = CommDebugMode()
with comm_mode:
output = test_mod(
torch.randn(1 * self.world_size, 8, device=self.device_type),
y=torch.ones(1, 8, device=self.device_type),
)
self.assertEqual(comm_mode.get_total_counts(), 1)
self.assertEqual(output.shape, (1 * self.world_size, 8))
class TestKwargOnlyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(8, 8)
def forward(self, *, x, y=2, z=None):
return self.linear(x) + y + z
test_kwonly_mod = TestKwargOnlyModule().to(self.device_type)
prepare_inps_simple = PrepareModuleInput(
input_kwarg_layouts={"x": Shard(0), "z": Shard(0)},
desired_input_kwarg_layouts={"x": Replicate(), "z": Replicate()},
)
parallelize_module(
test_kwonly_mod.linear, mesh, ColwiseParallel(use_local_output=False)
)
parallelize_module(test_kwonly_mod, mesh, prepare_inps_simple)
with comm_mode:
output = test_kwonly_mod(
x=torch.randn(1, 8, device=self.device_type),
z=torch.ones(1, 8, device=self.device_type),
)
self.assertEqual(comm_mode.get_total_counts(), 2)
self.assertEqual(output.shape, (1 * self.world_size, 8))
# test the case where x is a DTensor
x_dt = DTensor.from_local(
torch.randn(1, 8, device=self.device_type), mesh, [Shard(0)]
)
with comm_mode:
output = test_kwonly_mod(
x=x_dt, z=torch.ones(1, 8, device=self.device_type)
)
self.assertEqual(comm_mode.get_total_counts(), 2)
self.assertEqual(output.shape, (1 * self.world_size, 8))
@with_comms
def test_prepare_module_output(self):
mesh = init_device_mesh(self.device_type, (self.world_size,))
tensor = torch.ones(8, 16, device=self.device_type)
expected_tensor = torch.ones(8 // self.world_size, 16, device=self.device_type)
prepare_out_style = PrepareModuleOutput(
output_layouts=Replicate(), desired_output_layouts=Shard(0)
)
model = nn.Identity()
chunk_mod = parallelize_module(model, mesh, prepare_out_style)
output = chunk_mod(tensor)
self.assertEqual(output, expected_tensor)
@with_comms
def test_sequence_parallel_style(self):
mesh = init_device_mesh(self.device_type, (self.world_size,))
# early init RNG tracker
torch.distributed.tensor._random.manual_seed(0, mesh)
comm_mode = CommDebugMode()
batch, N, embedding_dim = 20, 8, 12
global_input = torch.rand(
batch,
N * self.world_size,
embedding_dim,
device=self.device_type,
requires_grad=True,
)
sharded_input = distribute_tensor(global_input, mesh, [Shard(1)])
# test LayerNorm
for elementwise_affine in [True, False]:
norm = nn.LayerNorm(
embedding_dim,
elementwise_affine=elementwise_affine,
device=self.device_type,
)
sp_norm = parallelize_module(deepcopy(norm), mesh, SequenceParallel())
output = norm(global_input)
output.sum().backward()
with comm_mode:
sharded_out = sp_norm(sharded_input)
grad_out = torch.ones_like(sharded_out)
sharded_out.backward(grad_out)
self.assertIsInstance(sharded_out, DTensor)
self.assertEqual(sharded_out.placements, (Shard(1),))
self.assertEqual(comm_mode.get_total_counts(), 0)
self.assertEqual(
comm_mode.get_comm_counts()[c10d_functional.all_reduce], 0
)
if elementwise_affine:
self.assertEqual(sp_norm.weight.grad.placements, (_Partial(),))
self.assertEqual(sp_norm.bias.grad.placements, (_Partial(),))
self.assertEqual(sharded_out.full_tensor(), output)
# test RMSNorm
rmsnorm = RMSNormPython(embedding_dim).to(self.device_type)
sp_rmsnorm = parallelize_module(deepcopy(rmsnorm), mesh, SequenceParallel())
output = rmsnorm(global_input)
output.sum().backward()
with comm_mode:
sharded_out = sp_rmsnorm(sharded_input)
grad_out = torch.ones_like(sharded_out)
sharded_out.backward(grad_out)
self.assertIsInstance(sharded_out, DTensor)
self.assertEqual(sharded_out.placements, (Shard(1),))
self.assertEqual(sp_rmsnorm.weight.grad.placements, (_Partial(),))
self.assertEqual(comm_mode.get_total_counts(), 0)
self.assertEqual(comm_mode.get_comm_counts()[c10d_functional.all_reduce], 0)
self.assertEqual(sharded_out.full_tensor(), output)
# test dropout
dropout = nn.Dropout(0.5).to(self.device_type)
sp_dropout = parallelize_module(deepcopy(dropout), mesh, SequenceParallel())
output = dropout(global_input)
output.sum().backward()
with comm_mode:
sharded_out = sp_dropout(sharded_input)
grad_out = torch.ones_like(sharded_out)
sharded_out.backward(grad_out)
self.assertIsInstance(sharded_out, DTensor)
self.assertEqual(sharded_out.placements, (Shard(1),))
self.assertEqual(comm_mode.get_total_counts(), 0)
# test sharded on non-sequence dim input
sharded_batch_input = distribute_tensor(global_input, mesh, [Shard(0)])
rmsnorm = RMSNormPython(embedding_dim).to(self.device_type)
sp_rmsnorm = parallelize_module(deepcopy(rmsnorm), mesh, SequenceParallel())
with comm_mode:
sharded_out = sp_rmsnorm(sharded_batch_input)
grad_out = torch.ones_like(sharded_out)
sharded_out.backward(grad_out)
self.assertIsInstance(sharded_out, DTensor)
# output still sharded on sequence dimension
self.assertEqual(sharded_out.placements, (Shard(1),))
self.assertEqual(sp_rmsnorm.weight.grad.placements, (_Partial(),))
# communication happens in both fwd/bwd to redistribute input
self.assertEqual(comm_mode.get_total_counts(), 2)
if __name__ == "__main__":
run_tests()
| TensorParallelStyleTest |
python | ansible__ansible | test/units/plugins/cache/test_cache.py | {
"start": 1029,
"end": 3305
} | class ____(unittest.TestCase):
def setUp(self):
# memory plugin cache
self.cache = CachePluginAdjudicator()
self.cache['cache_key'] = {'key1': 'value1', 'key2': 'value2'}
self.cache['cache_key_2'] = {'key': 'value'}
def test___setitem__(self):
self.cache['new_cache_key'] = {'new_key1': ['new_value1', 'new_value2']}
assert self.cache['new_cache_key'] == {'new_key1': ['new_value1', 'new_value2']}
def test_inner___setitem__(self):
self.cache['new_cache_key'] = {'new_key1': ['new_value1', 'new_value2']}
self.cache['new_cache_key']['new_key1'][0] = 'updated_value1'
assert self.cache['new_cache_key'] == {'new_key1': ['updated_value1', 'new_value2']}
def test___contains__(self):
assert 'cache_key' in self.cache
assert 'not_cache_key' not in self.cache
def test_get(self):
assert self.cache.get('cache_key') == {'key1': 'value1', 'key2': 'value2'}
def test_get_with_default(self):
assert self.cache.get('foo', 'bar') == 'bar'
def test_get_without_default(self):
assert self.cache.get('foo') is None
def test___getitem__(self):
with pytest.raises(KeyError):
self.cache['foo'] # pylint: disable=pointless-statement
def test_pop_with_default(self):
assert self.cache.pop('foo', 'bar') == 'bar'
def test_pop_without_default(self):
with pytest.raises(KeyError):
self.cache.pop('foo')
def test_pop(self):
v = self.cache.pop('cache_key_2')
assert v == {'key': 'value'}
assert 'cache_key_2' not in self.cache
def test_update(self):
self.cache.update({'cache_key': {'key2': 'updatedvalue'}})
assert self.cache['cache_key']['key2'] == 'updatedvalue'
def test_update_cache_if_changed(self):
# Changes are stored in the CachePluginAdjudicator and will be
# persisted to the plugin when calling update_cache_if_changed()
# The exception is flush which flushes the plugin immediately.
assert len(self.cache.keys()) == 2
assert len(self.cache._plugin.keys()) == 0
self.cache.update_cache_if_changed()
assert len(self.cache._plugin.keys()) == 2
| TestCachePluginAdjudicator |
python | encode__django-rest-framework | tests/test_exceptions.py | {
"start": 1610,
"end": 2692
} | class ____(TestCase):
def test_eq(self):
assert ErrorDetail('msg') == ErrorDetail('msg')
assert ErrorDetail('msg', 'code') == ErrorDetail('msg', code='code')
assert ErrorDetail('msg') == 'msg'
assert ErrorDetail('msg', 'code') == 'msg'
def test_ne(self):
assert ErrorDetail('msg1') != ErrorDetail('msg2')
assert ErrorDetail('msg') != ErrorDetail('msg', code='invalid')
assert ErrorDetail('msg1') != 'msg2'
assert ErrorDetail('msg1', 'code') != 'msg2'
def test_repr(self):
assert repr(ErrorDetail('msg1')) == \
'ErrorDetail(string={!r}, code=None)'.format('msg1')
assert repr(ErrorDetail('msg1', 'code')) == \
'ErrorDetail(string={!r}, code={!r})'.format('msg1', 'code')
def test_str(self):
assert str(ErrorDetail('msg1')) == 'msg1'
assert str(ErrorDetail('msg1', 'code')) == 'msg1'
def test_hash(self):
assert hash(ErrorDetail('msg')) == hash('msg')
assert hash(ErrorDetail('msg', 'code')) == hash('msg')
| ErrorDetailTests |
python | mlflow__mlflow | mlflow/store/tracking/dbmodels/models.py | {
"start": 4452,
"end": 9022
} | class ____(Base):
"""
DB model for :py:class:`mlflow.entities.Run`. These are recorded in ``runs`` table.
"""
__tablename__ = "runs"
run_uuid = Column(String(32), nullable=False)
"""
Run UUID: `String` (limit 32 characters). *Primary Key* for ``runs`` table.
"""
name = Column(String(250))
"""
Run name: `String` (limit 250 characters).
"""
source_type = Column(String(20), default=SourceType.to_string(SourceType.LOCAL))
"""
Source Type: `String` (limit 20 characters). Can be one of ``NOTEBOOK``, ``JOB``, ``PROJECT``,
``LOCAL`` (default), or ``UNKNOWN``.
"""
source_name = Column(String(500))
"""
Name of source recording the run: `String` (limit 500 characters).
"""
entry_point_name = Column(String(50))
"""
Entry-point name that launched the run run: `String` (limit 50 characters).
"""
user_id = Column(String(256), nullable=True, default=None)
"""
User ID: `String` (limit 256 characters). Defaults to ``null``.
"""
status = Column(String(20), default=RunStatus.to_string(RunStatus.SCHEDULED))
"""
Run Status: `String` (limit 20 characters). Can be one of ``RUNNING``, ``SCHEDULED`` (default),
``FINISHED``, ``FAILED``.
"""
start_time = Column(BigInteger, default=get_current_time_millis)
"""
Run start time: `BigInteger`. Defaults to current system time.
"""
end_time = Column(BigInteger, nullable=True, default=None)
"""
Run end time: `BigInteger`.
"""
deleted_time = Column(BigInteger, nullable=True, default=None)
"""
Run deleted time: `BigInteger`. Timestamp of when run is deleted, defaults to none.
"""
source_version = Column(String(50))
"""
Source version: `String` (limit 50 characters).
"""
lifecycle_stage = Column(String(20), default=LifecycleStage.ACTIVE)
"""
Lifecycle Stage of run: `String` (limit 32 characters).
Can be either ``active`` (default) or ``deleted``.
"""
artifact_uri = Column(String(200), default=None)
"""
Default artifact location for this run: `String` (limit 200 characters).
"""
experiment_id = Column(Integer, ForeignKey("experiments.experiment_id"))
"""
Experiment ID to which this run belongs to: *Foreign Key* into ``experiment`` table.
"""
experiment = relationship("SqlExperiment", backref=backref("runs", cascade="all"))
"""
SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlExperiment`.
"""
__table_args__ = (
CheckConstraint(source_type.in_(SourceTypes), name="source_type"),
CheckConstraint(status.in_(RunStatusTypes), name="status"),
CheckConstraint(
lifecycle_stage.in_(LifecycleStage.view_type_to_stages(ViewType.ALL)),
name="runs_lifecycle_stage",
),
PrimaryKeyConstraint("run_uuid", name="run_pk"),
)
@staticmethod
def get_attribute_name(mlflow_attribute_name):
"""
Resolves an MLflow attribute name to a `SqlRun` attribute name.
"""
# Currently, MLflow Search attributes defined in `SearchUtils.VALID_SEARCH_ATTRIBUTE_KEYS`
# share the same names as their corresponding `SqlRun` attributes. Therefore, this function
# returns the same attribute name
return {"run_name": "name", "run_id": "run_uuid"}.get(
mlflow_attribute_name, mlflow_attribute_name
)
def to_mlflow_entity(self):
"""
Convert DB model to corresponding MLflow entity.
Returns:
mlflow.entities.Run: Description of the return value.
"""
run_info = RunInfo(
run_id=self.run_uuid,
run_name=self.name,
experiment_id=str(self.experiment_id),
user_id=self.user_id,
status=self.status,
start_time=self.start_time,
end_time=self.end_time,
lifecycle_stage=self.lifecycle_stage,
artifact_uri=self.artifact_uri,
)
tags = [t.to_mlflow_entity() for t in self.tags]
run_data = RunData(
metrics=[m.to_mlflow_entity() for m in self.latest_metrics],
params=[p.to_mlflow_entity() for p in self.params],
tags=tags,
)
if not run_info.run_name:
if run_name := _get_run_name_from_tags(tags):
run_info._set_run_name(run_name)
return Run(run_info=run_info, run_data=run_data)
| SqlRun |
python | apache__airflow | providers/alibaba/tests/unit/alibaba/cloud/operators/test_analyticdb_spark.py | {
"start": 4784,
"end": 6568
} | class ____:
@mock.patch(ADB_SPARK_OPERATOR_STRING.format("AnalyticDBSparkHook"))
def test_execute(self, mock_hook):
"""Test submit AnalyticDB Spark SQL Application works as expected."""
operator = AnalyticDBSparkSQLOperator(
sql=MOCK_SQL,
cluster_id=MOCK_CLUSTER_ID,
rg_name=MOCK_RG_NAME,
adb_spark_conn_id=MOCK_ADB_SPARK_CONN_ID,
region=MOCK_REGION,
task_id=MOCK_TASK_ID,
)
operator.execute(None)
mock_hook.assert_called_once_with(adb_spark_conn_id=MOCK_ADB_SPARK_CONN_ID, region=MOCK_REGION)
mock_hook.return_value.submit_spark_sql.assert_called_once_with(
cluster_id=MOCK_CLUSTER_ID,
rg_name=MOCK_RG_NAME,
sql=MOCK_SQL,
conf=None,
driver_resource_spec=None,
executor_resource_spec=None,
num_executors=None,
name=None,
)
@mock.patch(ADB_SPARK_OPERATOR_STRING.format("AnalyticDBSparkBaseOperator.hook"))
def test_execute_with_exception(self, mock_hook):
"""Test submit AnalyticDB Spark SQL Application raises ValueError with invalid parameter."""
# Given
mock_hook.submit_spark_sql.side_effect = ValueError("List of strings expected")
# When
operator = AnalyticDBSparkSQLOperator(
sql=MOCK_SQL,
conf={"spark.eventLog.enabled": True},
cluster_id=MOCK_CLUSTER_ID,
rg_name=MOCK_RG_NAME,
adb_spark_conn_id=MOCK_ADB_SPARK_CONN_ID,
region=MOCK_REGION,
task_id=MOCK_TASK_ID,
)
with pytest.raises(ValueError, match="List of strings expected"):
operator.execute(None)
| TestAnalyticDBSparklSQLOperator |
python | getsentry__sentry | src/sentry/snuba/metrics/fields/base.py | {
"start": 12335,
"end": 12718
} | class ____(MetricOperationDefinition):
can_orderby: bool
can_groupby: bool = False
can_filter: bool = False
meta_type: str | None = None
post_query_func: Callable[..., PostQueryFuncReturnType] = lambda data, *args: data
snql_func: Callable[..., Function | None] = lambda _: None
default_null_value: int | list[tuple[float]] | None = None
| DerivedOpDefinition |
python | pydata__xarray | xarray/core/indexing.py | {
"start": 22288,
"end": 23456
} | class ____(NDArrayMixin):
"""Wrap an array, converting tuples into the indicated explicit indexer."""
__slots__ = ("array", "indexer_cls")
def __init__(self, array, indexer_cls: type[ExplicitIndexer] = BasicIndexer):
self.array = as_indexable(array)
self.indexer_cls = indexer_cls
def __array__(
self, dtype: DTypeLike | None = None, /, *, copy: bool | None = None
) -> np.ndarray:
if Version(np.__version__) >= Version("2.0.0"):
return np.asarray(self.get_duck_array(), dtype=dtype, copy=copy)
else:
return np.asarray(self.get_duck_array(), dtype=dtype)
def get_duck_array(self):
return self.array.get_duck_array()
def __getitem__(self, key: Any):
key = expanded_indexer(key, self.ndim)
indexer = self.indexer_cls(key)
result = apply_indexer(self.array, indexer)
if isinstance(result, ExplicitlyIndexed):
return type(self)(result, self.indexer_cls)
else:
# Sometimes explicitly indexed arrays return NumPy arrays or
# scalars.
return result
| ImplicitToExplicitIndexingAdapter |
python | encode__django-rest-framework | rest_framework/authentication.py | {
"start": 1488,
"end": 3639
} | class ____(BaseAuthentication):
"""
HTTP Basic authentication against username/password.
"""
www_authenticate_realm = 'api'
def authenticate(self, request):
"""
Returns a `User` if a correct username and password have been supplied
using HTTP Basic authentication. Otherwise returns `None`.
"""
auth = get_authorization_header(request).split()
if not auth or auth[0].lower() != b'basic':
return None
if len(auth) == 1:
msg = _('Invalid basic header. No credentials provided.')
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _('Invalid basic header. Credentials string should not contain spaces.')
raise exceptions.AuthenticationFailed(msg)
try:
try:
auth_decoded = base64.b64decode(auth[1]).decode('utf-8')
except UnicodeDecodeError:
auth_decoded = base64.b64decode(auth[1]).decode('latin-1')
userid, password = auth_decoded.split(':', 1)
except (TypeError, ValueError, UnicodeDecodeError, binascii.Error):
msg = _('Invalid basic header. Credentials not correctly base64 encoded.')
raise exceptions.AuthenticationFailed(msg)
return self.authenticate_credentials(userid, password, request)
def authenticate_credentials(self, userid, password, request=None):
"""
Authenticate the userid and password against username and password
with optional request for context.
"""
credentials = {
get_user_model().USERNAME_FIELD: userid,
'password': password
}
user = authenticate(request=request, **credentials)
if user is None:
raise exceptions.AuthenticationFailed(_('Invalid username/password.'))
if not user.is_active:
raise exceptions.AuthenticationFailed(_('User inactive or deleted.'))
return (user, None)
def authenticate_header(self, request):
return 'Basic realm="%s"' % self.www_authenticate_realm
| BasicAuthentication |
python | sphinx-doc__sphinx | sphinx/pycode/parser.py | {
"start": 18746,
"end": 21666
} | class ____(TokenProcessor):
"""Python source code parser to detect location of functions,
classes and methods.
"""
def __init__(self, lines: list[str]) -> None:
super().__init__(lines)
self.decorator: Token | None = None
self.context: list[str] = []
self.indents: list[tuple[str, str | None, int | None]] = []
self.definitions: dict[str, tuple[str, int, int]] = {}
def add_definition(self, name: str, entry: tuple[str, int, int]) -> None:
"""Add a location of definition."""
if self.indents and self.indents[-1][0] == entry[0] == 'def':
# ignore definition of inner function
pass
else:
self.definitions[name] = entry
def parse(self) -> None:
"""Parse the code to obtain location of definitions."""
while True:
token = self.fetch_token()
if token is None:
break
if token == COMMENT:
pass
elif token == [OP, '@'] and (
self.previous is None
or self.previous.match(NEWLINE, NL, INDENT, DEDENT)
):
if self.decorator is None:
self.decorator = token
elif token.match([NAME, 'class']):
self.parse_definition('class')
elif token.match([NAME, 'def']):
self.parse_definition('def')
elif token == INDENT:
self.indents.append(('other', None, None))
elif token == DEDENT:
self.finalize_block()
def parse_definition(self, typ: str) -> None:
"""Parse AST of definition."""
name = self.fetch_token()
self.context.append(name.value) # type: ignore[union-attr]
funcname = '.'.join(self.context)
if self.decorator:
start_pos = self.decorator.start[0]
self.decorator = None
else:
start_pos = name.start[0] # type: ignore[union-attr]
self.fetch_until([OP, ':'])
if self.fetch_token().match(COMMENT, NEWLINE): # type: ignore[union-attr]
self.fetch_until(INDENT)
self.indents.append((typ, funcname, start_pos))
else:
# one-liner
self.add_definition(funcname, (typ, start_pos, name.end[0])) # type: ignore[union-attr]
self.context.pop()
def finalize_block(self) -> None:
"""Finalize definition block."""
definition = self.indents.pop()
if definition[0] != 'other':
typ, funcname, start_pos = definition
end_pos = self.current.end[0] - 1 # type: ignore[union-attr]
while emptyline_re.match(self.get_line(end_pos)):
end_pos -= 1
self.add_definition(funcname, (typ, start_pos, end_pos)) # type: ignore[arg-type]
self.context.pop()
| DefinitionFinder |
python | facebookresearch__faiss | faiss/gpu/test/torch_test_contrib_gpu.py | {
"start": 9372,
"end": 14059
} | class ____(unittest.TestCase):
def test_knn_gpu(self, use_cuvs=False):
torch.manual_seed(10)
d = 32
nb = 1024
nq = 10
k = 10
res = faiss.StandardGpuResources()
# make GT on torch cpu and test using IndexFlatL2
xb = torch.rand(nb, d, dtype=torch.float32)
xq = torch.rand(nq, d, dtype=torch.float32)
index = faiss.IndexFlatL2(d)
index.add(xb)
gt_D, gt_I = index.search(xq, k)
# for the GPU, we'll use a non-default stream
s = torch.cuda.Stream()
with torch.cuda.stream(s):
# test numpy inputs
xb_np = xb.numpy()
xq_np = xq.numpy()
for xq_row_major in True, False:
for xb_row_major in True, False:
if not xq_row_major:
xq_c = to_column_major_numpy(xq_np)
assert not xq_c.flags.contiguous
else:
xq_c = xq_np
if not xb_row_major:
xb_c = to_column_major_numpy(xb_np)
assert not xb_c.flags.contiguous
else:
xb_c = xb_np
D, I = faiss.knn_gpu(res, xq_c, xb_c, k, use_cuvs=use_cuvs)
self.assertTrue(torch.equal(torch.from_numpy(I), gt_I))
self.assertLess((torch.from_numpy(D) - gt_D).abs().max(), 1e-4)
# test torch (cpu, gpu) inputs
for is_cuda in True, False:
for xq_row_major in True, False:
for xb_row_major in True, False:
if is_cuda:
xq_c = xq.cuda()
xb_c = xb.cuda()
else:
# also test torch cpu tensors
xq_c = xq
xb_c = xb
if not xq_row_major:
xq_c = to_column_major_torch(xq)
assert not xq_c.is_contiguous()
if not xb_row_major:
xb_c = to_column_major_torch(xb)
assert not xb_c.is_contiguous()
D, I = faiss.knn_gpu(res, xq_c, xb_c, k, use_cuvs=use_cuvs)
self.assertTrue(torch.equal(I.cpu(), gt_I))
self.assertLess((D.cpu() - gt_D).abs().max(), 1e-4)
# test on subset
try:
# This internally uses the current pytorch stream
D, I = faiss.knn_gpu(res, xq_c[6:8], xb_c, k, use_cuvs=use_cuvs)
except TypeError:
if not xq_row_major:
# then it is expected
continue
# otherwise it is an error
raise
self.assertTrue(torch.equal(I.cpu(), gt_I[6:8]))
self.assertLess((D.cpu() - gt_D[6:8]).abs().max(), 1e-4)
@unittest.skipUnless(
"CUVS" in faiss.get_compile_options(),
"only if CUVS is compiled in")
def test_knn_gpu_cuvs(self):
self.test_knn_gpu(use_cuvs=True)
def test_knn_gpu_datatypes(self, use_cuvs=False):
torch.manual_seed(10)
d = 10
nb = 1024
nq = 5
k = 10
res = faiss.StandardGpuResources()
# make GT on torch cpu and test using IndexFlatL2
xb = torch.rand(nb, d, dtype=torch.float32)
xq = torch.rand(nq, d, dtype=torch.float32)
index = faiss.IndexFlatL2(d)
index.add(xb)
gt_D, gt_I = index.search(xq, k)
xb_c = xb.cuda().half()
xq_c = xq.cuda().half()
# use i32 output indices
D = torch.zeros(nq, k, device=xb_c.device, dtype=torch.float32)
I = torch.zeros(nq, k, device=xb_c.device, dtype=torch.int32)
faiss.knn_gpu(res, xq_c, xb_c, k, D, I, use_cuvs=use_cuvs)
self.assertTrue(torch.equal(I.long().cpu(), gt_I))
self.assertLess((D.float().cpu() - gt_D).abs().max(), 1.5e-3)
# Test using numpy
D = np.zeros((nq, k), dtype=np.float32)
I = np.zeros((nq, k), dtype=np.int32)
xb_c = xb.half().numpy()
xq_c = xq.half().numpy()
faiss.knn_gpu(res, xq_c, xb_c, k, D, I, use_cuvs=use_cuvs)
self.assertTrue(torch.equal(torch.from_numpy(I).long(), gt_I))
self.assertLess((torch.from_numpy(D) - gt_D).abs().max(), 1.5e-3)
| TestTorchUtilsKnnGpu |
python | encode__django-rest-framework | tests/test_filters.py | {
"start": 19153,
"end": 19407
} | class ____(models.Model):
related_object = models.ForeignKey(OrderingFilterModel, related_name="related", on_delete=models.CASCADE)
index = models.SmallIntegerField(help_text="A non-related field to test with", default=0)
| OrderingFilterRelatedModel |
python | huggingface__transformers | src/transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py | {
"start": 7021,
"end": 10449
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: HunYuanMoEV1Config, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.rotary_fn = apply_rotary_pos_emb
self.query_layernorm = HunYuanMoEV1RMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.key_layernorm = HunYuanMoEV1RMSNorm(self.head_dim, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
query_states = self.query_layernorm(query_states)
key_states = self.key_layernorm(key_states)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| HunYuanMoEV1Attention |
python | openai__openai-python | src/openai/resources/embeddings.py | {
"start": 11902,
"end": 12131
} | class ____:
def __init__(self, embeddings: Embeddings) -> None:
self._embeddings = embeddings
self.create = to_streamed_response_wrapper(
embeddings.create,
)
| EmbeddingsWithStreamingResponse |
python | wandb__wandb | tests/system_tests/test_automations/test_automations_api.py | {
"start": 23977,
"end": 27187
} | class ____:
@fixture(scope="class")
def num_projects(self) -> int:
return 10
@fixture(scope="class", params=[1, 2, 3])
def page_size(self, request: FixtureRequest) -> int:
return request.param
@fixture(scope="class")
def num_pages(self, num_projects: int, page_size: int) -> int:
"""The number of pages we'll expect to encounter via paginated requests."""
# NOTE: For now, pagination is per project, NOT per automation
return math.ceil(num_projects / page_size)
@fixture(scope="class")
def setup_paginated_automations(
self,
user: str,
api: wandb.Api,
webhook: WebhookIntegration,
num_projects: int,
make_name: Callable[[str], str],
):
# HACK: Is there a way to ensure a clean slate for each test?
for id_ in api.automations():
api.delete_automation(id_)
# NOTE: For now, pagination is per project, NOT per automation, so
# to test pagination, we'll create each automation in a separate project.
#
# UPDATE THIS in the future if we switch to per-automation pagination.
project_names = [make_name(f"project-{i}") for i in range(num_projects)]
automation_names = [make_name(f"automation-{i}") for i in range(num_projects)]
created_automation_ids = deque()
for project_name, automation_name in zip(project_names, automation_names):
# Create the placeholder project for the automation
api.create_project(name=project_name, entity=user)
project = api.project(name=project_name, entity=user)
# Create the actual automation
event = OnLinkArtifact(scope=project)
action = SendWebhook.from_integration(webhook)
created = api.create_automation(
event >> action, name=automation_name, description="test description"
)
# Refetch (to avoid the off-by-1 index issue on older servers) and retain for later cleanup
refetched_id = api.automation(name=created.name).id
created_automation_ids.append(refetched_id)
yield
# This particular fixture is deliberately class-scoped, but clean up the automations for good measure
for id_ in created_automation_ids:
api.delete_automation(id_)
@mark.usefixtures(setup_paginated_automations.__name__)
def test_paginated_automations(
self,
mocker,
user,
api: wandb.Api,
num_projects,
page_size,
):
# Spy on the client method that makes the GQL request. Not ideal, but it may have to do for now
client_spy = mocker.spy(api.client, "execute")
# Fetch the automations
list(api.automations(entity=user, per_page=page_size))
# Check that the number of GQL requests is at least what we expect from the pagination params
# Note that a (cached) introspection query may add an extra request the first time this is
# called.
expected_page_count = math.ceil(num_projects / page_size)
assert client_spy.call_count >= expected_page_count
| TestPaginatedAutomations |
python | dask__dask | dask/tests/test_expr.py | {
"start": 2511,
"end": 2571
} | class ____:
def __init__(self, *args, **kwargs): ...
| Mixin |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP004.py | {
"start": 330,
"end": 375
} | class ____(
#
object
#
):
...
| A |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 52888,
"end": 53989
} | class ____(Request):
"""
Clear an open Scroll ID
:param scroll_id: Scroll ID as returned by previous events service calls
:type scroll_id: str
"""
_service = "events"
_action = "clear_scroll"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"scroll_id": {
"description": "Scroll ID as returned by previous events service calls",
"type": "string",
}
},
"required": ["scroll_id"],
"type": "object",
}
def __init__(self, scroll_id: str, **kwargs: Any) -> None:
super(ClearScrollRequest, self).__init__(**kwargs)
self.scroll_id = scroll_id
@schema_property("scroll_id")
def scroll_id(self) -> str:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: str) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| ClearScrollRequest |
python | pytorch__pytorch | test/test_sparse.py | {
"start": 198194,
"end": 210588
} | class ____(TestCase):
exact_dtype = True
def _test_meta_sparse_coo(self, dtype):
r = torch.empty(4, 4, layout=torch.sparse_coo, device='meta', dtype=dtype)
self.assertTrue(r.is_meta)
self.assertEqual(r.device.type, "meta")
r2 = torch.empty_like(r)
self.assertTrue(r2.is_meta)
self.assertEqual(r, r2)
r3 = torch.sparse_coo_tensor(size=(4, 4), device='meta', dtype=dtype)
self.assertTrue(r3.is_meta)
self.assertEqual(r, r3)
r.sparse_resize_((4, 4), 1, 1)
r.sparse_resize_and_clear_((4, 4, 4), 2, 1)
self.assertEqual(r.sparse_dim(), 2)
self.assertEqual(r.dense_dim(), 1)
self.assertEqual(r._dimV(), 1)
self.assertEqual(r._nnz(), 0)
# nnz zero sparse tensors should always be coalesced at creation
self.assertEqual(r.is_coalesced(), True)
# but we can force them into the uncoalesed state
r._coalesced_(False)
self.assertEqual(r.is_coalesced(), False)
# return the coalesced state for indices/values access
r._coalesced_(True)
# TODO: this sort of aliasing will need to be handled by
# functionalization
self.assertEqual(r._indices(), torch.empty(2, 0, device='meta', dtype=torch.int64))
self.assertEqual(r._values(), torch.empty(0, 4, device='meta', dtype=dtype))
self.assertEqual(r.indices(), torch.empty(2, 0, device='meta', dtype=torch.int64))
self.assertEqual(r.values(), torch.empty(0, 4, device='meta', dtype=dtype))
def _test_meta_sparse_compressed(self, dtype, layout, batchsize, densesize):
index_dtype = torch.int64
blocksize = (2, 3) if layout in {torch.sparse_bsr, torch.sparse_bsc} else ()
sparsesize = (4, 6)
nnz = 0
shape = (*batchsize, *sparsesize, *densesize)
compressed_dim = 0 if layout in {torch.sparse_csr, torch.sparse_bsr} else 1
nof_compressed_indices = (sparsesize[compressed_dim] // blocksize[compressed_dim] + 1 if blocksize
else sparsesize[compressed_dim] + 1)
compressed_indices = torch.empty((*batchsize, nof_compressed_indices), device='meta', dtype=index_dtype)
plain_indices = torch.empty((*batchsize, nnz), device='meta', dtype=index_dtype)
values = torch.empty((*batchsize, nnz, *blocksize, *densesize), device='meta', dtype=dtype)
r = torch.sparse_compressed_tensor(
compressed_indices,
plain_indices,
values,
shape,
layout=layout
)
self.assertTrue(r.is_meta)
self.assertEqual(r.device.type, "meta")
self.assertEqual(r.sparse_dim(), 2)
self.assertEqual(r.dense_dim(), len(densesize))
self.assertEqual(r._nnz(), nnz)
batch_dims = r.ndim - r.sparse_dim() - r.dense_dim()
r_blocksize = r.values().shape[batch_dims + 1: batch_dims + 1 + len(blocksize)]
self.assertEqual(r_blocksize, blocksize)
r_compressed_indices = r.crow_indices() if layout in {torch.sparse_csr, torch.sparse_bsr} else r.ccol_indices()
r_plain_indices = r.col_indices() if layout in {torch.sparse_csr, torch.sparse_bsr} else r.row_indices()
self.assertEqual(r_compressed_indices,
torch.empty((*batchsize, nof_compressed_indices), device='meta', dtype=index_dtype))
self.assertEqual(r_plain_indices, torch.empty((*batchsize, nnz), device='meta', dtype=index_dtype))
self.assertEqual(r.values(), torch.empty((*batchsize, nnz, *blocksize, *densesize), device='meta', dtype=dtype))
r2 = torch.empty_like(r)
self.assertTrue(r2.is_meta)
self.assertEqual(r2, r)
if layout in {torch.sparse_csr, torch.sparse_csc}:
r3 = torch.empty((*batchsize, *sparsesize), dtype=dtype, layout=layout, device="meta")
self.assertTrue(r3.is_meta)
if not densesize:
# dense dimensions cannot be specified for torch.empty
self.assertEqual(r3, r)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
def test_meta(self, dtype, layout):
if layout is torch.sparse_coo:
self._test_meta_sparse_coo(dtype)
else:
for batchsize, densesize in itertools.product([(), (2,)], [(), (3,)]):
self._test_meta_sparse_compressed(dtype, layout, batchsize, densesize)
def _test_print_meta_data(self, dtype, layout, batchsize, sparsesize, densesize):
index_dtype = torch.int64
nnz = 0
blocksize = (2, 3) if layout in {torch.sparse_bsr, torch.sparse_bsc} else ()
shape = (*batchsize, *sparsesize, *densesize)
values = torch.empty((*batchsize, nnz, *blocksize, *densesize), device='meta', dtype=dtype)
if layout is torch.sparse_coo:
indices = torch.empty((len(sparsesize), nnz), device='meta', dtype=index_dtype)
x = torch.sparse_coo_tensor(indices, values, shape)
else:
compressed_dim = 0 if layout in {torch.sparse_csr, torch.sparse_bsr} else 1
nof_compressed_indices = (sparsesize[compressed_dim] // blocksize[compressed_dim] + 1 if blocksize
else sparsesize[compressed_dim] + 1)
compressed_indices = torch.empty((*batchsize, nof_compressed_indices), device='meta', dtype=index_dtype)
plain_indices = torch.empty((*batchsize, nnz), device='meta', dtype=index_dtype)
x = torch.sparse_compressed_tensor(
compressed_indices,
plain_indices,
values,
shape,
layout=layout
)
printed = []
printed.append(f"########## {dtype}/{index_dtype}/size={batchsize}+{sparsesize}+{blocksize}+{densesize} ##########")
printed.append("# sparse meta tensor")
printed.append(str(x))
return printed
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
def test_print_meta(self, dtype, layout):
printed = []
for batchsize, sparsesize, densesize in itertools.product(
[(), (2,)], [(4, 6), (3, 5, 7)], [(), (3,)]
):
if layout is torch.sparse_coo and batchsize:
# COO tensors don't have batch dimensions
continue
if layout is not torch.sparse_coo and len(sparsesize) != 2:
# CSR/CSC/BSR/BSC tensors must have 2 sparse dimensions
continue
printed += self._test_print_meta_data(dtype, layout, batchsize, sparsesize, densesize)
orig_maxDiff = self.maxDiff
self.maxDiff = None
try:
self.assertExpected('\n'.join(printed))
self.maxDiff = orig_maxDiff
except Exception:
self.maxDiff = orig_maxDiff
raise
def assertEqualMeta(self, x, y, expected_nnz):
self.assertEqual(x.layout, y.layout)
self.assertEqual(x.shape, y.shape)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.sparse_dim(), y.sparse_dim())
self.assertEqual(x.dense_dim(), y.dense_dim())
def assertEqualAttrs(x, y, expected_shape):
self.assertEqual(x.shape, expected_shape)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.layout, y.layout)
if not x.is_meta:
self.assertEqual(x.device, y.device)
if x.layout is torch.sparse_coo:
assertEqualAttrs(x._indices(), y._indices(), (*y._indices().shape[:-1], expected_nnz))
assertEqualAttrs(x._values(), y._values(), (expected_nnz, *y._values().shape[1:]))
elif x.layout in {torch.sparse_csr, torch.sparse_bsr}:
assertEqualAttrs(x.crow_indices(), y.crow_indices(), y.crow_indices().shape)
assertEqualAttrs(x.col_indices(), y.col_indices(), (*y.col_indices().shape[:-1], expected_nnz))
batch_dim = x.col_indices().ndim - 1
values_shape = (*y.values().shape[:batch_dim], expected_nnz, *y.values().shape[batch_dim + 1:])
self.assertEqual(x.values().layout, y.values().layout)
self.assertEqual(x.values().dtype, y.values().dtype)
self.assertEqual(x.values().shape, values_shape)
elif x.layout in {torch.sparse_csc, torch.sparse_bsc}:
assertEqualAttrs(x.ccol_indices(), y.ccol_indices(), y.ccol_indices().shape)
assertEqualAttrs(x.row_indices(), y.row_indices(), (*y.row_indices().shape[:-1], expected_nnz))
batch_dim = x.row_indices().ndim - 1
values_shape = (*y.values().shape[:batch_dim], expected_nnz, *y.values().shape[batch_dim + 1:])
self.assertEqual(x.values().layout, y.values().layout)
self.assertEqual(x.values().dtype, y.values().dtype)
self.assertEqual(x.values().shape, values_shape)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
def test_to_meta(self, dtype, layout):
index_dtype = torch.int64
device = 'cpu'
for t in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype):
m = t.to(device="meta")
self.assertEqual(m.device.type, "meta")
self.assertEqualMeta(m, t, 0)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
def test_zeros_like_meta(self, dtype, layout):
index_dtype = torch.int64
device = 'cpu'
for t in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype):
m = torch.zeros_like(t, device="meta")
self.assertEqual(m.device.type, "meta")
self.assertEqualMeta(m, t, 0)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
def test_fake(self, dtype, layout):
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
fake_mode = FakeTensorMode()
index_dtype = torch.int64
device = 'cpu'
for t in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype):
f = FakeTensor.from_tensor(t, fake_mode)
self.assertIsInstance(f, FakeTensor)
self.assertEqualMeta(f, t, 0)
d = f.detach()
self.assertIsInstance(d, FakeTensor)
self.assertEqualMeta(d, t, 0)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
def test_zeros_like_fake(self, dtype, layout):
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._mode_utils import no_dispatch
fake_mode = FakeTensorMode()
index_dtype = torch.int64
device = 'cpu'
for t in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype):
f = FakeTensor.from_tensor(t, fake_mode)
expected = torch.zeros_like(t)
with no_dispatch():
result = torch.zeros_like(f, device=f.fake_device)
self.assertEqual(result, expected)
self.assertEqualMeta(result, expected, 0)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
def test_sum_meta(self, dtype, layout):
device = 'cpu'
index_dtype = torch.int64
for t in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype):
m = t.to(device='meta')
r = torch.sum(m)
expected = torch.sum(t).to(device="meta")
self.assertTrue(r.is_meta)
self.assertEqualMeta(r, expected, 0)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
def test_add_meta(self, dtype, layout):
device = 'cpu'
index_dtype = torch.int64
for t in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype):
expected = torch.add(t, t).to(device='meta')
m = t.to(device='meta')
r = torch.add(m, m)
self.assertEqualMeta(r, expected, 0)
| TestSparseMeta |
python | gevent__gevent | src/greentest/3.9/test_subprocess.py | {
"start": 136818,
"end": 147182
} | class ____(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_keywords(self):
# startupinfo argument
# We use hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USERSHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO(
dwFlags=STARTF_USERSHOWWINDOW,
wShowWindow=SW_MAXIMIZE
)
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_copy(self):
# bpo-34044: Popen must not modify input STARTUPINFO structure
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call Popen() twice with the same startupinfo object to make sure
# that it's not modified
for _ in range(2):
cmd = ZERO_RETURN_CMD
with open(os.devnull, 'w') as null:
proc = subprocess.Popen(cmd,
stdout=null,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
with proc:
proc.communicate()
self.assertEqual(proc.returncode, 0)
self.assertEqual(startupinfo.dwFlags,
subprocess.STARTF_USESHOWWINDOW)
self.assertIsNone(startupinfo.hStdInput)
self.assertIsNone(startupinfo.hStdOutput)
self.assertIsNone(startupinfo.hStdError)
self.assertEqual(startupinfo.wShowWindow, subprocess.SW_HIDE)
self.assertEqual(startupinfo.lpAttributeList, {"handle_list": []})
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
@support.cpython_only
def test_issue31471(self):
# There shouldn't be an assertion failure in Popen() in case the env
# argument has a bad keys() method.
class BadEnv(dict):
keys = None
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, env=BadEnv())
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_close_fds_with_stdio(self):
import msvcrt
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
handles = []
for fd in fds:
os.set_inheritable(fd, True)
handles.append(msvcrt.get_osfhandle(fd))
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
int(stdout.strip()) # Check that stdout is an integer
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# The same as the previous call, but with an empty handle_list
handle_list = []
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handle_list}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# Check for a warning due to using handle_list and close_fds=False
with support.check_warnings((".*overriding close_fds", RuntimeWarning)):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handles[:]}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
def test_empty_attribute_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_empty_handle_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": []}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
| Win32ProcessTestCase |
python | doocs__leetcode | solution/2900-2999/2917.Find the K-or of an Array/Solution.py | {
"start": 0,
"end": 237
} | class ____:
def findKOr(self, nums: List[int], k: int) -> int:
ans = 0
for i in range(32):
cnt = sum(x >> i & 1 for x in nums)
if cnt >= k:
ans |= 1 << i
return ans
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 22323,
"end": 23396
} | class ____(Request):
"""
Archive models
:param ids: Entities to move
:type ids: Sequence[str]
"""
_service = "models"
_action = "archive_many"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "Entities to move",
"items": {"type": "string"},
"type": "array",
}
},
"required": ["ids"],
"type": "object",
}
def __init__(self, ids: List[str], **kwargs: Any) -> None:
super(ArchiveManyRequest, self).__init__(**kwargs)
self.ids = ids
@schema_property("ids")
def ids(self) -> List[str]:
return self._property_ids
@ids.setter
def ids(self, value: List[str]) -> None:
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
| ArchiveManyRequest |
python | bokeh__bokeh | src/bokeh/models/formatters.py | {
"start": 28853,
"end": 41813
} | class ____(TickFormatter):
''' A ``TickFormatter`` for displaying timedelta values nicely across a
range of scales. The largest scale for differentiating between formats
is "days", as the conversion from "days" to "months" or "years" is not
well defined.
``TimedeltaTickFormatter`` has the following properties (listed together
with their default values) that can be used to control the formatting
of axis ticks at different scales:
{defaults}
Each scale property can be set to format or list of formats to use for
formatting timedelta tick values that fall in that "time scale".
By default, only the first format string passed for each time scale
will be used. By default, leading zeros are stripped away from
the formatted labels for the time scales ``nanoseconds``, ``microseconds``
and ``milliseconds``.
This list of supported formats is reproduced below. In general formats
with an uppercase letter refer to the time passed since the next last
larger time format (e.g. minutes since the last hour). On the other hand
formats with a lowercase letter corresponds to the overall completed time
passed (3.6 days becomes 3 days).
+--------+-----------------------------------------------------------------+
| %NS | Nanoseconds since last microsecond as a decimal number, |
| | zero-padded on the left (range 000 to 999). |
| | Warning: Due to floating point precision, ticks may be formatted|
| | incorrectly if the overall timedelta is rather large (>10days). |
+--------+-----------------------------------------------------------------+
| %ns | Overall completed nanoseconds. |
| | Warning: Due to floating point precision, ticks may be formatted|
| | incorrectly if the overall timedelta is rather large (>10days). |
+--------+-----------------------------------------------------------------+
| %US | Microseconds since last millisecond as a decimal number, |
| | zero-padded on the left (range 000 to 999). |
+--------+-----------------------------------------------------------------+
| %us | Overall completed microseconds. |
+--------+-----------------------------------------------------------------+
| %MS | Milliseconds since last second as a decimal number, |
| | zero-padded on the left (range 000 to 999). |
+--------+-----------------------------------------------------------------+
| %ms | Overall completed milliseconds. |
+--------+-----------------------------------------------------------------+
| %S | Seconds since last minute as a decimal number, zero-padded on |
| | the left (range 00 to 59). |
+--------+-----------------------------------------------------------------+
| %s | Overall completed seconds. |
+--------+-----------------------------------------------------------------+
| %M | Minutes since last hour as a decimal number, zero-padded on |
| | the left (range 00 to 59). |
+--------+-----------------------------------------------------------------+
| %m | Overall completed minutes. |
+--------+-----------------------------------------------------------------+
| %H | Hours since last day as a decimal number, zero-padded on the |
| | left (range 00 to 23). |
+--------+-----------------------------------------------------------------+
| %h | Overall completed hours. |
+--------+-----------------------------------------------------------------+
| %d | Overall completed days. |
+--------+-----------------------------------------------------------------+
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
nanoseconds = String(help=_TIMEDELTA_TICK_FORMATTER_HELP("``nanoseconds``"),
default="%NSns")
microseconds = String(help=_TIMEDELTA_TICK_FORMATTER_HELP("``microseconds``"),
default="%USus")
milliseconds = String(help=_TIMEDELTA_TICK_FORMATTER_HELP("``milliseconds``"),
default="%MSms")
seconds = String(help=_TIMEDELTA_TICK_FORMATTER_HELP("``seconds``"),
default="%H:%M:%S")
minsec = String(help=_TIMEDELTA_TICK_FORMATTER_HELP("``minsec`` (for combined minutes and seconds)"),
default="%H:%M:%S")
minutes = String(help=_TIMEDELTA_TICK_FORMATTER_HELP("``minutes``"),
default="%H:%M")
hourmin = String(help=_TIMEDELTA_TICK_FORMATTER_HELP("``hourmin`` (for combined hours and minutes)"),
default="%H:%M")
hours = String(help=_TIMEDELTA_TICK_FORMATTER_HELP("``hours``"),
default="%H:%M")
days = String(help=_TIMEDELTA_TICK_FORMATTER_HELP("``days``"),
default="%d days")
strip_leading_zeros = Either(Bool, Seq(Enum(TimedeltaResolutionType)), default=False, help="""
Whether to strip any leading zeros in the formatted ticks.
Valid values are:
* ``True`` or ``False`` (default) to set stripping across all resolutions.
* A sequence of resolution types, e.g. ``["microseconds", "milliseconds"]``, to enable
scale-dependent stripping of leading zeros.
""")
hide_repeats = Bool(default=False, help="""
Whether repeated formatted tick values will be suppressed.
For example, an initial set of ticks ``["06:07", "06:07", "06:07", "06:08",
"06:08"]`` will become ``["06:07", "", "", "06:08", ""]``. Only the base
label, without any additional context, is considered when determining
repeats. If the context itself is a ``TimedeltaTickFormatter``, then this
property may also be set for the context separately, if desired.
""")
context = Nullable(Either(String, Instance("bokeh.models.formatters.TimedeltaTickFormatter")), default=None, help="""
A format for adding context to the tick or ticks specified by ``context_which``.
Valid values are:
* None, no context is added
* A standard :class:`~bokeh.models.TimedeltaTickFormatter` format string, the single format is
used across all scales
* Another :class:`~bokeh.models.TimedeltaTickFormatter` instance, to have scale-dependent
context
""")
context_which = Enum(ContextWhich, default="start", help="""
Which tick or ticks to add a formatted context string to. Valid values are:
`"start"`, `"end"`, `"center"`, and `"all"`.
""")
context_location = Enum(Location, default="below", help="""
Relative to the tick label text baseline, where the context should be
rendered. Valid values are: `"below"`, `"above"`, `"left"`, and `"right"`.
""")
def RELATIVE_DATETIME_CONTEXT() -> DatetimeTickFormatter:
return DatetimeTickFormatter(
microseconds="%T",
milliseconds="%T",
seconds="%H:%M",
minsec="%Hh",
minutes="%Hh",
hourmin="%F",
hours="%F",
days="%Y",
months="",
years="",
)
def CONTEXTUAL_DATETIME_FORMATTER() -> DatetimeTickFormatter:
return DatetimeTickFormatter(
microseconds="%fus",
milliseconds="%3Nms",
seconds="%T",
minsec="%T",
minutes="%H:%M",
hourmin="%H:%M",
hours="%H:%M",
days="%b %d",
months="%b %Y",
years="%Y",
strip_leading_zeros=["microseconds", "milliseconds", "seconds"],
boundary_scaling=False,
context_which="all",
context=DatetimeTickFormatter(
microseconds="%T",
milliseconds="%T",
seconds="%b %d, %Y",
minsec="%b %d, %Y",
minutes="%b %d, %Y",
hourmin="%b %d, %Y",
hours="%b %d, %Y",
days="%Y",
months="",
years="",
boundary_scaling=False,
hide_repeats=True,
context_which="all",
context=DatetimeTickFormatter(
microseconds="%b %d, %Y",
milliseconds="%b %d, %Y",
seconds="",
minsec="",
minutes="",
hourmin="",
hours="",
days="",
months="",
years="",
boundary_scaling=False,
hide_repeats=True,
context=None,
),
),
)
def CONTEXTUAL_TIMEDELTA_FORMATTER() -> TimedeltaTickFormatter:
return TimedeltaTickFormatter(
nanoseconds="%NSns",
microseconds="%USus",
milliseconds="%MSms",
seconds="%H:%M:%S",
minsec="%H:%M:%S",
minutes="%H:%M",
hourmin="%H:%M",
hours="%H:%M",
days="%d days",
strip_leading_zeros=["nanoseconds", "microseconds", "milliseconds"],
context_which="all",
context=TimedeltaTickFormatter(
nanoseconds="%H:%M:%S.%MS%US",
microseconds="%H:%M:%S.%MS",
milliseconds="%H:%M:%S",
seconds="%d days",
minsec="%d days",
minutes="%d days",
hourmin="%d days",
hours="%d days",
days="",
hide_repeats=True,
context_which="all",
context=TimedeltaTickFormatter(
nanoseconds="%d days",
microseconds="%d days",
milliseconds="%d days",
seconds="",
minsec="",
minutes="",
hourmin="",
hours="",
days="",
hide_repeats=True,
context=None,
),
),
)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# This is to automate documentation of DatetimeTickFormatter/TimedeltaTickFormatter formats and their defaults.
def create_format_table(fields: tuple[str, ...], primary: TickFormatter) -> str:
def extended_join(character, iterable):
return f"{character}{character.join(iterable)}{character}"
def add_row_item(obj, name, string_length):
value = getattr(obj, name) if obj else ""
return f"{value:<{string_length}}"
def create_separator_line(character):
return extended_join("+", [character*col_len for col_len in lens])
column_names = ["Scale", "Format", "1st Context", "2nd Context"]
# Get formatters for each context level
secondary = primary.context
tertiary = secondary.context
lens = [len(name) for name in column_names]
lens[0] = max(lens[0], max(map(len, fields)))
lens[1] = max(lens[1], max(map(lambda f: len(getattr(primary, f) if primary else ""), fields)))
lens[2] = max(lens[2], max(map(lambda f: len(getattr(secondary, f) if primary else ""), fields)))
lens[3] = max(lens[3], max(map(lambda f: len(getattr(tertiary, f) if primary else ""), fields)))
separator = create_separator_line("-")
rows = [
separator,
extended_join("|", [f"{value:<{n}}" for value, n in zip(column_names, lens)]),
create_separator_line("="),
]
# Build table rows
for field in fields:
scale = f"{field:<{lens[0]}}"
p_fmt = add_row_item(primary, field, lens[1])
c1_fmt = add_row_item(secondary, field, lens[2])
c2_fmt = add_row_item(tertiary, field, lens[3])
rows.append(extended_join("|", [scale, p_fmt, c1_fmt, c2_fmt]))
rows.append(separator)
indent = " "*4
return f"\n{indent}".join(rows)
DatetimeTickFormatter.__doc__ = format_docstring(DatetimeTickFormatter.__doc__, defaults=create_format_table(
('microseconds', 'milliseconds', 'seconds', 'minsec', 'minutes', 'hourmin', 'hours', 'days', 'months', 'years'),
CONTEXTUAL_DATETIME_FORMATTER(),
))
TimedeltaTickFormatter.__doc__ = format_docstring(TimedeltaTickFormatter.__doc__, defaults=create_format_table(
('nanoseconds', 'microseconds', 'milliseconds', 'seconds', 'minsec', 'minutes', 'hourmin', 'hours', 'days'),
CONTEXTUAL_TIMEDELTA_FORMATTER(),
))
| TimedeltaTickFormatter |
python | apache__airflow | providers/presto/src/airflow/providers/presto/transfers/gcs_to_presto.py | {
"start": 1448,
"end": 5047
} | class ____(BaseOperator):
"""
Loads a csv file from Google Cloud Storage into a Presto table.
Assumptions:
1. CSV file should not have headers
2. Presto table with requisite columns is already created
3. Optionally, a separate JSON file with headers or list of headers can be provided
:param source_bucket: Source GCS bucket that contains the csv
:param source_object: csv file including the path
:param presto_table: presto table to upload the data
:param presto_conn_id: destination presto connection
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud and
interact with the Google Cloud Storage service.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
template_fields: Sequence[str] = (
"source_bucket",
"source_object",
"presto_table",
)
def __init__(
self,
*,
source_bucket: str,
source_object: str,
presto_table: str,
presto_conn_id: str = "presto_default",
gcp_conn_id: str = "google_cloud_default",
schema_fields: Iterable[str] | None = None,
schema_object: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.presto_table = presto_table
self.presto_conn_id = presto_conn_id
self.gcp_conn_id = gcp_conn_id
self.schema_fields = schema_fields
self.schema_object = schema_object
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
presto_hook = PrestoHook(presto_conn_id=self.presto_conn_id)
with NamedTemporaryFile("w+") as temp_file:
self.log.info("Downloading data from %s", self.source_object)
gcs_hook.download(
bucket_name=self.source_bucket,
object_name=self.source_object,
filename=temp_file.name,
)
data = csv.reader(temp_file)
rows = (tuple(row) for row in data)
self.log.info("Inserting data into %s", self.presto_table)
if self.schema_fields:
presto_hook.insert_rows(table=self.presto_table, rows=rows, target_fields=self.schema_fields)
elif self.schema_object:
blob = gcs_hook.download(
bucket_name=self.source_bucket,
object_name=self.schema_object,
)
schema_fields = json.loads(blob.decode("utf-8"))
presto_hook.insert_rows(table=self.presto_table, rows=rows, target_fields=schema_fields)
else:
presto_hook.insert_rows(table=self.presto_table, rows=rows)
| GCSToPrestoOperator |
python | aio-libs__aiohttp | tests/test_resolver.py | {
"start": 2721,
"end": 3035
} | class ____:
def __init__(self, hosts: Collection[str]) -> None:
self.nodes = [
FakeAIODNSAddrInfoNode(
socket.AF_INET6,
(h.encode(), 0, 0, 3 if ip_address(h).is_link_local else 0),
)
for h in hosts
]
| FakeAIODNSAddrInfoIPv6Result |
python | run-llama__llama_index | llama-index-core/tests/voice_agents/test_subclasses.py | {
"start": 3105,
"end": 7317
} | class ____(BaseVoiceAgent):
def __init__(
self,
ws: BaseVoiceAgentWebsocket,
interface: BaseVoiceAgentInterface,
api_key: Optional[str] = None,
):
super().__init__(ws=ws, interface=interface, api_key=api_key)
self._is_started = False
self._sent: List[Any] = []
self._handled: List[dict] = []
self._is_stopped = False
async def start(self, *args, **kwargs) -> None:
self._is_started = True
async def send(self, audio: Any, *args, **kwargs) -> None:
self._sent.append(audio)
async def interrupt(self) -> None:
pass
async def handle_message(self, message: dict) -> Any:
self._handled.append(message)
async def stop(self) -> None:
self._is_stopped = True
@pytest.fixture()
def mock_interface() -> BaseVoiceAgentInterface:
return MockVoiceAgentInterface()
@pytest.fixture()
def mock_websocket() -> BaseVoiceAgentWebsocket:
return MockVoiceAgentWebsocket(
uri="wss://my.mock.websocket:8000", api_key="fake-api-key"
)
@pytest.fixture()
def mock_agent() -> BaseVoiceAgent:
return MockVoiceAgent(
ws=MockVoiceAgentWebsocket(
uri="wss://my.mock.websocket:8000", api_key="fake-api-key"
),
interface=MockVoiceAgentInterface(),
)
@pytest.mark.skipif(not websockets_available, reason="websockets library not installed")
def test_interface_subclassing(mock_interface: MockVoiceAgentInterface):
mock_interface.start()
mock_interface._speaker_callback()
mock_interface._microphone_callback()
mock_interface.receive(data=b"hello world!")
mock_interface.interrupt()
mock_interface.stop()
assert mock_interface.output() == ["interface.,", True, True, 1]
assert mock_interface._received == [b"hello world!"]
@pytest.mark.asyncio
@pytest.mark.skipif(not websockets_available, reason="websockets library not installed")
async def test_websocket_subclassing(mock_websocket: MockVoiceAgentWebsocket):
await mock_websocket.aconnect()
assert isinstance(mock_websocket.ws, MockConnection)
await mock_websocket.send(data="hello world")
await mock_websocket.send(data=b"this is a test")
assert mock_websocket.ws._sent == ["hello world", b"this is a test"]
await mock_websocket.close()
assert mock_websocket.ws._is_closed
@pytest.mark.asyncio
@pytest.mark.skipif(not websockets_available, reason="websockets library not installed")
async def test_agent_subclassing(mock_agent: MockVoiceAgent):
await mock_agent.start()
assert mock_agent._is_started
await mock_agent.send(audio="Hello world")
assert mock_agent._sent == ["Hello world"]
await mock_agent.handle_message(message={"type": "text", "content": "content"})
assert mock_agent._handled == [{"type": "text", "content": "content"}]
mock_agent._events = [
BaseVoiceAgentEvent(type_t="send"),
BaseVoiceAgentEvent(type_t="text"),
]
mock_agent._messages = [
ChatMessage(role="user", content="Hello world"),
ChatMessage(role="assistant", content="content"),
]
def filter_events(events: List[BaseVoiceAgentEvent]):
return [event for event in events if event.type_t == "send"]
assert mock_agent.export_events() == [
BaseVoiceAgentEvent(type_t="send"),
BaseVoiceAgentEvent(type_t="text"),
]
assert mock_agent.export_events(filter=filter_events) == [
BaseVoiceAgentEvent(type_t="send")
]
assert mock_agent.export_events(limit=1) == [BaseVoiceAgentEvent(type_t="send")]
def filter_messages(messages: List[ChatMessage]):
return [message for message in messages if message.role == "assistant"]
assert mock_agent.export_messages() == [
ChatMessage(role="user", content="Hello world"),
ChatMessage(role="assistant", content="content"),
]
assert mock_agent.export_messages(limit=1) == [
ChatMessage(role="user", content="Hello world")
]
assert mock_agent.export_messages(filter=filter_messages) == [
ChatMessage(role="assistant", content="content")
]
await mock_agent.stop()
assert mock_agent._is_stopped
| MockVoiceAgent |
python | getsentry__sentry | src/sentry/seer/anomaly_detection/types.py | {
"start": 1013,
"end": 1099
} | class ____(TypedDict):
success: bool
message: NotRequired[str]
| StoreDataResponse |
python | django__django | tests/custom_managers/models.py | {
"start": 5597,
"end": 5729
} | class ____(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(is_public=True)
| RestrictedManager |
python | pandas-dev__pandas | pandas/tests/indexing/test_indexing.py | {
"start": 30316,
"end": 39342
} | class ____:
def test_setitem_dt64_string_scalar(self, tz_naive_fixture, indexer_sli):
# dispatching _can_hold_element to underlying DatetimeArray
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
ser = Series(dti.copy(deep=True))
values = ser._values
newval = "2018-01-01"
values._validate_setitem_value(newval)
indexer_sli(ser)[0] = newval
if tz is None:
# TODO(EA2D): we can make this no-copy in tz-naive case too
assert ser.dtype == dti.dtype
assert ser._values._ndarray is values._ndarray
else:
assert ser._values is values
@pytest.mark.parametrize("box", [list, np.array, pd.array, pd.Categorical, Index])
@pytest.mark.parametrize(
"key", [[0, 1], slice(0, 2), np.array([True, True, False])]
)
def test_setitem_dt64_string_values(self, tz_naive_fixture, indexer_sli, key, box):
# dispatching _can_hold_element to underling DatetimeArray
tz = tz_naive_fixture
if isinstance(key, slice) and indexer_sli is tm.loc:
key = slice(0, 1)
dti = date_range("2016-01-01", periods=3, tz=tz)
ser = Series(dti.copy(deep=True))
values = ser._values
newvals = box(["2019-01-01", "2010-01-02"])
values._validate_setitem_value(newvals)
indexer_sli(ser)[key] = newvals
if tz is None:
# TODO(EA2D): we can make this no-copy in tz-naive case too
assert ser.dtype == dti.dtype
assert ser._values._ndarray is values._ndarray
else:
assert ser._values is values
@pytest.mark.parametrize("scalar", ["3 Days", offsets.Hour(4)])
def test_setitem_td64_scalar(self, indexer_sli, scalar):
# dispatching _can_hold_element to underling TimedeltaArray
tdi = timedelta_range("1 Day", periods=3)
ser = Series(tdi.copy(deep=True))
values = ser._values
values._validate_setitem_value(scalar)
indexer_sli(ser)[0] = scalar
assert ser._values._ndarray is values._ndarray
@pytest.mark.parametrize("box", [list, np.array, pd.array, pd.Categorical, Index])
@pytest.mark.parametrize(
"key", [[0, 1], slice(0, 2), np.array([True, True, False])]
)
def test_setitem_td64_string_values(self, indexer_sli, key, box):
# dispatching _can_hold_element to underling TimedeltaArray
if isinstance(key, slice) and indexer_sli is tm.loc:
key = slice(0, 1)
tdi = timedelta_range("1 Day", periods=3)
ser = Series(tdi.copy(deep=True))
values = ser._values
newvals = box(["10 Days", "44 hours"])
values._validate_setitem_value(newvals)
indexer_sli(ser)[key] = newvals
assert ser._values._ndarray is values._ndarray
def test_extension_array_cross_section():
# A cross-section of a homogeneous EA should be an EA
df = DataFrame(
{
"A": pd.array([1, 2], dtype="Int64"),
"B": pd.array([3, 4], dtype="Int64"),
},
index=["a", "b"],
)
expected = Series(pd.array([1, 3], dtype="Int64"), index=["A", "B"], name="a")
result = df.loc["a"]
tm.assert_series_equal(result, expected)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
def test_extension_array_cross_section_converts():
# all numeric columns -> numeric series
df = DataFrame(
{
"A": pd.array([1, 2], dtype="Int64"),
"B": np.array([1, 2], dtype="int64"),
},
index=["a", "b"],
)
result = df.loc["a"]
expected = Series([1, 1], dtype="Int64", index=["A", "B"], name="a")
tm.assert_series_equal(result, expected)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
# mixed columns -> object series
df = DataFrame(
{"A": pd.array([1, 2], dtype="Int64"), "B": np.array(["a", "b"])},
index=["a", "b"],
)
result = df.loc["a"]
expected = Series([1, "a"], dtype=object, index=["A", "B"], name="a")
tm.assert_series_equal(result, expected)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser, keys",
[(Series([10]), (0, 0)), (Series([1, 2, 3], index=list("abc")), (0, 1))],
)
def test_ser_tup_indexer_exceeds_dimensions(ser, keys, indexer_li):
# GH#13831
exp_err, exp_msg = IndexingError, "Too many indexers"
with pytest.raises(exp_err, match=exp_msg):
indexer_li(ser)[keys]
if indexer_li == tm.iloc:
# For iloc.__setitem__ we let numpy handle the error reporting.
exp_err, exp_msg = IndexError, "too many indices for array"
with pytest.raises(exp_err, match=exp_msg):
indexer_li(ser)[keys] = 0
def test_ser_list_indexer_exceeds_dimensions(indexer_li):
# GH#13831
# Make sure an exception is raised when a tuple exceeds the dimension of the series,
# but not list when a list is used.
ser = Series([10])
res = indexer_li(ser)[[0, 0]]
exp = Series([10, 10], index=Index([0, 0]))
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize(
"value", [(0, 1), [0, 1], np.array([0, 1]), array.array("b", [0, 1])]
)
def test_scalar_setitem_with_nested_value(value):
# For numeric data, we try to unpack and thus raise for mismatching length
df = DataFrame({"A": [1, 2, 3]})
msg = "|".join(
[
"Must have equal len keys and value",
"setting an array element with a sequence",
]
)
with pytest.raises(ValueError, match=msg):
df.loc[0, "B"] = value
# TODO For object dtype this happens as well, but should we rather preserve
# the nested data and set as such?
df = DataFrame({"A": [1, 2, 3], "B": np.array([1, "a", "b"], dtype=object)})
with pytest.raises(ValueError, match="Must have equal len keys and value"):
df.loc[0, "B"] = value
# if isinstance(value, np.ndarray):
# assert (df.loc[0, "B"] == value).all()
# else:
# assert df.loc[0, "B"] == value
@pytest.mark.parametrize(
"value", [(0, 1), [0, 1], np.array([0, 1]), array.array("b", [0, 1])]
)
def test_scalar_setitem_series_with_nested_value(value, indexer_sli):
# For numeric data, we try to unpack and thus raise for mismatching length
ser = Series([1, 2, 3])
with pytest.raises(ValueError, match="setting an array element with a sequence"):
indexer_sli(ser)[0] = value
# but for object dtype we preserve the nested data and set as such
ser = Series([1, "a", "b"], dtype=object)
indexer_sli(ser)[0] = value
if isinstance(value, np.ndarray):
assert (ser.loc[0] == value).all()
else:
assert ser.loc[0] == value
@pytest.mark.parametrize(
"value", [(0.0,), [0.0], np.array([0.0]), array.array("d", [0.0])]
)
def test_scalar_setitem_with_nested_value_length1(value):
# https://github.com/pandas-dev/pandas/issues/46268
# For numeric data, assigning length-1 array to scalar position gets unpacked
df = DataFrame({"A": [1, 2, 3]})
df.loc[0, "B"] = value
expected = DataFrame({"A": [1, 2, 3], "B": [0.0, np.nan, np.nan]})
tm.assert_frame_equal(df, expected)
# but for object dtype we preserve the nested data
df = DataFrame({"A": [1, 2, 3], "B": np.array([1, "a", "b"], dtype=object)})
df.loc[0, "B"] = value
if isinstance(value, np.ndarray):
assert (df.loc[0, "B"] == value).all()
else:
assert df.loc[0, "B"] == value
@pytest.mark.parametrize(
"value", [(0.0,), [0.0], np.array([0.0]), array.array("d", [0.0])]
)
def test_scalar_setitem_series_with_nested_value_length1(value, indexer_sli):
# For numeric data, assigning length-1 array to scalar position gets unpacked
# TODO this only happens in case of ndarray, should we make this consistent
# for all list-likes? (as happens for DataFrame.(i)loc, see test above)
ser = Series([1.0, 2.0, 3.0])
if isinstance(value, np.ndarray):
indexer_sli(ser)[0] = value
expected = Series([0.0, 2.0, 3.0])
tm.assert_series_equal(ser, expected)
else:
with pytest.raises(
ValueError, match="setting an array element with a sequence"
):
indexer_sli(ser)[0] = value
# but for object dtype we preserve the nested data
ser = Series([1, "a", "b"], dtype=object)
indexer_sli(ser)[0] = value
if isinstance(value, np.ndarray):
assert (ser.loc[0] == value).all()
else:
assert ser.loc[0] == value
def test_object_dtype_series_set_series_element():
# GH 48933
s1 = Series(dtype="O", index=["a", "b"])
s1["a"] = Series()
s1.loc["b"] = Series()
tm.assert_series_equal(s1.loc["a"], Series())
tm.assert_series_equal(s1.loc["b"], Series())
s2 = Series(dtype="O", index=["a", "b"])
s2.iloc[1] = Series()
tm.assert_series_equal(s2.iloc[1], Series())
| TestDatetimelikeCoercion |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B021.py | {
"start": 212,
"end": 335
} | class ____:
f"""hello {VARIABLE}!"""
def foo1():
"""hello world!"""
def foo2():
f"""hello {VARIABLE}!"""
| bar2 |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_header_image01.py | {
"start": 339,
"end": 2432
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("header_image01.xlsx")
self.ignore_elements = {
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"]
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_header("&L&G", {"image_left": self.image_dir + "red.jpg"})
workbook.close()
self.assertExcelEqual()
def test_create_file_in_memory(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename, {"in_memory": True})
worksheet = workbook.add_worksheet()
worksheet.set_header("&L&G", {"image_left": self.image_dir + "red.jpg"})
workbook.close()
self.assertExcelEqual()
def test_create_file_from_bytesio(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
with open(self.image_dir + "red.jpg", "rb") as image_file:
image_data = BytesIO(image_file.read())
worksheet.set_header(
"&L&G", {"image_left": "red.jpg", "image_data_left": image_data}
)
workbook.close()
self.assertExcelEqual()
def test_create_file_from_bytesio_in_memory(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename, {"in_memory": True})
worksheet = workbook.add_worksheet()
with open(self.image_dir + "red.jpg", "rb") as image_file:
image_data = BytesIO(image_file.read())
worksheet.set_header(
"&L&G", {"image_left": "red.jpg", "image_data_left": image_data}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | cython__cython | Cython/Compiler/Tests/TestUtilityLoad.py | {
"start": 3121,
"end": 3923
} | class ____(unittest.TestCase):
def test_equality(self):
c1 = Code.UtilityCode.load("NumpyImportUFunc", "NumpyImportArray.c")
c2 = Code.UtilityCode.load("NumpyImportArray", "NumpyImportArray.c")
c3 = Code.UtilityCode.load("pyunicode_strlen", "StringTools.c")
c4 = Code.UtilityCode.load("pyunicode_from_unicode", "StringTools.c")
c5 = Code.UtilityCode.load("IncludeStringH", "StringTools.c")
c6 = Code.UtilityCode.load("IncludeCppStringH", "StringTools.c")
codes = [c1, c2, c3, c4, c5, c6]
for m in range(len(codes)):
for n in range(len(codes)):
if n == m:
self.assertEqual(codes[m], codes[n])
else:
self.assertNotEqual(codes[m], codes[n])
| TestUtilityCode |
python | numba__numba | numba/cuda/tests/doc_examples/test_vecadd.py | {
"start": 201,
"end": 2043
} | class ____(CUDATestCase):
"""
Test simple vector addition
"""
def setUp(self):
# Prevent output from this test showing
# up when running the test suite
self._captured_stdout = captured_stdout()
self._captured_stdout.__enter__()
super().setUp()
def tearDown(self):
# No exception type, value, or traceback
self._captured_stdout.__exit__(None, None, None)
super().tearDown()
def test_ex_vecadd(self):
# ex_vecadd.import.begin
import numpy as np
from numba import cuda
# ex_vecadd.import.end
# ex_vecadd.kernel.begin
@cuda.jit
def f(a, b, c):
# like threadIdx.x + (blockIdx.x * blockDim.x)
tid = cuda.grid(1)
size = len(c)
if tid < size:
c[tid] = a[tid] + b[tid]
# ex_vecadd.kernel.end
# Seed RNG for test repeatability
np.random.seed(1)
# ex_vecadd.allocate.begin
N = 100000
a = cuda.to_device(np.random.random(N))
b = cuda.to_device(np.random.random(N))
c = cuda.device_array_like(a)
# ex_vecadd.allocate.end
# ex_vecadd.forall.begin
f.forall(len(a))(a, b, c)
print(c.copy_to_host())
# ex_vecadd.forall.end
# ex_vecadd.launch.begin
# Enough threads per block for several warps per block
nthreads = 256
# Enough blocks to cover the entire vector depending on its length
nblocks = (len(a) // nthreads) + 1
f[nblocks, nthreads](a, b, c)
print(c.copy_to_host())
# ex_vecadd.launch.end
np.testing.assert_equal(
c.copy_to_host(),
a.copy_to_host() + b.copy_to_host()
)
if __name__ == "__main__":
unittest.main()
| TestVecAdd |
python | realpython__materials | asterioids-pygame-project/source_code_step_2/space_rocks/game.py | {
"start": 16,
"end": 760
} | class ____:
def __init__(self):
self._init_pygame()
self.screen = pygame.display.set_mode((800, 600))
def main_loop(self):
while True:
self._handle_input()
self._process_game_logic()
self._draw()
def _init_pygame(self):
pygame.init()
pygame.display.set_caption("Space Rocks")
def _handle_input(self):
for event in pygame.event.get():
if event.type == pygame.QUIT or (
event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE
):
quit()
def _process_game_logic(self):
pass
def _draw(self):
self.screen.fill((0, 0, 255))
pygame.display.flip()
| SpaceRocks |
python | walkccc__LeetCode | solutions/792. Number of Matching Subsequences/792.py | {
"start": 0,
"end": 715
} | class ____:
def numMatchingSubseq(self, s: str, words: list[str]) -> int:
root = {}
def insert(word: str) -> None:
node = root
for c in word:
if c not in node:
node[c] = {'count': 0}
node = node[c]
node['count'] += 1
for word in words:
insert(word)
def dfs(s: str, i: int, node: dict) -> int:
ans = node['count'] if 'count' in node else 0
if i >= len(s):
return ans
for c in string.ascii_lowercase:
if c in node:
try:
index = s.index(c, i)
ans += dfs(s, index + 1, node[c])
except ValueError:
continue
return ans
return dfs(s, 0, root)
| Solution |
python | openai__openai-python | src/openai/cli/_api/image.py | {
"start": 2691,
"end": 2883
} | class ____(BaseModel):
image: str
num_images: int
size: str
response_format: str
prompt: str
mask: Omittable[str] = omit
model: Omittable[str] = omit
| CLIImageEditArgs |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/ext/hybrid/hybrid_five.py | {
"start": 322,
"end": 853
} | class ____(Base):
__tablename__ = "my_model"
id: Mapped[int] = mapped_column(primary_key=True)
int_col: Mapped[int | None] = mapped_column()
@hybrid_property
def some_col(self) -> int:
return (self.int_col or 0) + 1
@some_col.inplace.setter
def _str_col_setter(self, value: int | SQLCoreOperations[int]) -> None:
self.int_col = value - 1
m = MyModel(id=42, int_col=1)
m.some_col = 42
m.some_col = select(func.max(MyModel.id)).scalar_subquery()
m.some_col = func.max(MyModel.id)
| MyModel |
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 5474,
"end": 13829
} | class ____(TestCase):
def test_regular_fields(self):
"""
Model fields should map to their equivalent serializer fields.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = '__all__'
expected = dedent(r"""
TestSerializer\(\):
auto_field = IntegerField\(read_only=True\)
big_integer_field = IntegerField\(.*\)
boolean_field = BooleanField\(required=False\)
char_field = CharField\(max_length=100\)
comma_separated_integer_field = CharField\(max_length=100, validators=\[<django.core.validators.RegexValidator object>\]\)
date_field = DateField\(\)
datetime_field = DateTimeField\(\)
decimal_field = DecimalField\(decimal_places=1, max_digits=3\)
email_field = EmailField\(max_length=100\)
float_field = FloatField\(\)
integer_field = IntegerField\(.*\)
null_boolean_field = BooleanField\(allow_null=True, required=False\)
positive_integer_field = IntegerField\(.*\)
positive_small_integer_field = IntegerField\(.*\)
slug_field = SlugField\(allow_unicode=False, max_length=100\)
small_integer_field = IntegerField\(.*\)
text_field = CharField\(max_length=100, style={'base_template': 'textarea.html'}\)
file_field = FileField\(max_length=100\)
time_field = TimeField\(\)
url_field = URLField\(max_length=100\)
custom_field = ModelField\(model_field=<tests.test_model_serializer.CustomField: custom_field>\)
file_path_field = FilePathField\(path=%r\)
""" % tempfile.gettempdir())
assert re.search(expected, repr(TestSerializer())) is not None
def test_field_options(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = FieldOptionsModel
fields = '__all__'
expected = dedent(r"""
TestSerializer\(\):
id = IntegerField\(label='ID', read_only=True\)
value_limit_field = IntegerField\(max_value=10, min_value=1\)
length_limit_field = CharField\(max_length=12, min_length=3\)
blank_field = CharField\(allow_blank=True, max_length=10, required=False\)
null_field = IntegerField\(allow_null=True,.*required=False\)
default_field = IntegerField\(.*required=False\)
descriptive_field = IntegerField\(help_text='Some help text', label='A label'.*\)
choices_field = ChoiceField\(choices=(?:\[|\()\('red', 'Red'\), \('blue', 'Blue'\), \('green', 'Green'\)(?:\]|\))\)
text_choices_field = ChoiceField\(choices=(?:\[|\()\('red', 'Red'\), \('blue', 'Blue'\), \('green', 'Green'\)(?:\]|\))\)
""")
assert re.search(expected, repr(TestSerializer())) is not None
def test_nullable_boolean_field_choices(self):
class NullableBooleanChoicesModel(models.Model):
CHECKLIST_OPTIONS = (
(None, 'Unknown'),
(True, 'Yes'),
(False, 'No'),
)
field = models.BooleanField(null=True, choices=CHECKLIST_OPTIONS)
class NullableBooleanChoicesSerializer(serializers.ModelSerializer):
class Meta:
model = NullableBooleanChoicesModel
fields = ['field']
serializer = NullableBooleanChoicesSerializer(data=dict(
field=None,
))
self.assertTrue(serializer.is_valid())
self.assertEqual(serializer.errors, {})
def test_method_field(self):
"""
Properties and methods on the model should be allowed as `Meta.fields`
values, and should map to `ReadOnlyField`.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('auto_field', 'method')
expected = dedent("""
TestSerializer():
auto_field = IntegerField(read_only=True)
method = ReadOnlyField()
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_pk_fields(self):
"""
Both `pk` and the actual primary key name are valid in `Meta.fields`.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('pk', 'auto_field')
expected = dedent("""
TestSerializer():
pk = IntegerField(label='Auto field', read_only=True)
auto_field = IntegerField(read_only=True)
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_extra_field_kwargs(self):
"""
Ensure `extra_kwargs` are passed to generated fields.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('auto_field', 'char_field')
extra_kwargs = {'char_field': {'default': 'extra'}}
expected = dedent("""
TestSerializer():
auto_field = IntegerField(read_only=True)
char_field = CharField(default='extra', max_length=100)
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_extra_field_kwargs_required(self):
"""
Ensure `extra_kwargs` are passed to generated fields.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('auto_field', 'char_field')
extra_kwargs = {'auto_field': {'required': False, 'read_only': False}}
expected = dedent("""
TestSerializer():
auto_field = IntegerField(read_only=False, required=False)
char_field = CharField(max_length=100)
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_invalid_field(self):
"""
Field names that do not map to a model field or relationship should
raise a configuration error.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('auto_field', 'invalid')
expected = 'Field name `invalid` is not valid for model `RegularFieldsModel` ' \
'in `tests.test_model_serializer.TestSerializer`.'
with self.assertRaisesMessage(ImproperlyConfigured, expected):
TestSerializer().fields
def test_missing_field(self):
"""
Fields that have been declared on the serializer class must be included
in the `Meta.fields` if it exists.
"""
class TestSerializer(serializers.ModelSerializer):
missing = serializers.ReadOnlyField()
class Meta:
model = RegularFieldsModel
fields = ('auto_field',)
expected = (
"The field 'missing' was declared on serializer TestSerializer, "
"but has not been included in the 'fields' option."
)
with self.assertRaisesMessage(AssertionError, expected):
TestSerializer().fields
def test_missing_superclass_field(self):
"""
Fields that have been declared on a parent of the serializer class may
be excluded from the `Meta.fields` option.
"""
class TestSerializer(serializers.ModelSerializer):
missing = serializers.ReadOnlyField()
class ChildSerializer(TestSerializer):
class Meta:
model = RegularFieldsModel
fields = ('auto_field',)
ChildSerializer().fields
def test_choices_with_nonstandard_args(self):
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = ChoicesModel
fields = '__all__'
ExampleSerializer()
| TestRegularFieldMappings |
python | numba__numba | numba/stencils/stencil.py | {
"start": 621,
"end": 2449
} | class ____(object):
'''Callable class responsible for lowering calls to a specific StencilFunc.
'''
def __init__(self, sf):
self.stencilFunc = sf
def __call__(self, context, builder, sig, args):
cres = self.stencilFunc.compile_for_argtys(sig.args, {},
sig.return_type, None)
res = context.call_internal(builder, cres.fndesc, sig, args)
context.add_linking_libs([cres.library])
return res
@register_jitable
def raise_if_incompatible_array_sizes(a, *args):
ashape = a.shape
# We need literal_unroll here because the stencil might take
# multiple input arrays with different types that are not compatible
# (e.g. values as float[:] and flags as bool[:])
# When more than three total arrays are given, the second and third
# are iterated over in the loop below. Without literal_unroll, their
# types have to match.
# An example failing signature without literal_unroll might be
# (float[:], float[:], bool[:]) (Just (float[:], bool[:]) wouldn't fail)
for arg in literal_unroll(args):
if a.ndim != arg.ndim:
raise ValueError("Secondary stencil array does not have same number "
" of dimensions as the first stencil input.")
argshape = arg.shape
for i in range(len(ashape)):
if ashape[i] > argshape[i]:
raise ValueError("Secondary stencil array has some dimension "
"smaller the same dimension in the first "
"stencil input.")
def slice_addition(the_slice, addend):
""" Called by stencil in Python mode to add the loop index to a
user-specified slice.
"""
return slice(the_slice.start + addend, the_slice.stop + addend)
| StencilFuncLowerer |
python | boto__boto3 | tests/functional/docs/test_ec2.py | {
"start": 705,
"end": 2089
} | class ____(BaseDocsFunctionalTests):
def setUp(self):
super().setUp()
self.documenter = ServiceDocumenter(
'ec2',
session=Session(region_name='us-east-1'),
root_docs_path=self.root_services_path,
)
self.generated_contents = self.documenter.document_service()
self.generated_contents = self.generated_contents.decode('utf-8')
def test_delete_tags_method_is_documented(self):
self.assert_contains_lines_in_order(
[
'=========',
'Resources',
'=========',
'The available resources are:',
' ec2/instance/index',
],
self.generated_contents,
)
self.assert_contains_lines_in_order(
[
'.. py:class:: EC2.Instance',
' delete_tags',
],
self.get_nested_file_contents('ec2', 'instance', 'index'),
)
self.assert_contains_lines_in_order(
[
'delete_tags',
'.. py:method:: EC2.Instance.delete_tags(**kwargs)',
'response = instance.delete_tags(',
'DryRun=True|False,',
'Tags=[',
],
self.get_nested_file_contents('ec2', 'instance', 'delete_tags'),
)
| TestInstanceDeleteTags |
python | pyqtgraph__pyqtgraph | pyqtgraph/exporters/Matplotlib.py | {
"start": 5216,
"end": 5689
} | class ____(QtWidgets.QMainWindow):
def __init__(self):
from ..widgets import MatplotlibWidget
QtWidgets.QMainWindow.__init__(self)
self.mpl = MatplotlibWidget.MatplotlibWidget()
self.setCentralWidget(self.mpl)
self.show()
def __getattr__(self, attr):
return getattr(self.mpl, attr)
def closeEvent(self, ev):
MatplotlibExporter.windows.remove(self)
self.deleteLater()
| MatplotlibWindow |
python | miyuchina__mistletoe | mistletoe/span_token.py | {
"start": 9545,
"end": 9889
} | class ____(SpanToken):
"""
A "block" macro opening tag. ("{{macroName<optionalParams>}}<newLine>")
We want to keep it on a separate line instead of "soft" merging it with the *following* line.
"""
pattern = re.compile(r'(?<!\\)(\{\{\w+.*?(?<![\\/])\}\})\s*\n')
parse_inner = False
parse_group = 1
| XWikiBlockMacroStart |
python | mwaskom__seaborn | seaborn/_stats/aggregation.py | {
"start": 3587,
"end": 3684
} | class ____(Stat):
...
def __call__(self, data, groupby, orient, scales):
...
| Rolling |
python | gevent__gevent | src/greentest/3.10/test_threading.py | {
"start": 59428,
"end": 60884
} | class ____(unittest.TestCase):
def test_atexit_output(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
def run_last():
print('parrot')
threading._register_atexit(run_last)
""")
self.assertFalse(err)
self.assertEqual(out.strip(), b'parrot')
def test_atexit_called_once(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
from unittest.mock import Mock
mock = Mock()
threading._register_atexit(mock)
mock.assert_not_called()
# force early shutdown to ensure it was called once
threading._shutdown()
mock.assert_called_once()
""")
self.assertFalse(err)
def test_atexit_after_shutdown(self):
# The only way to do this is by registering an atexit within
# an atexit, which is intended to raise an exception.
rc, out, err = assert_python_ok("-c", """if True:
import threading
def func():
pass
def run_last():
threading._register_atexit(func)
threading._register_atexit(run_last)
""")
self.assertTrue(err)
self.assertIn("RuntimeError: can't register atexit after shutdown",
err.decode())
if __name__ == "__main__":
unittest.main()
| AtexitTests |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classes9.py | {
"start": 793,
"end": 913
} | class ____(TypedDict):
x: NotRequired[int]
y: Required[int]
# This should generate an error for x but not y.
| TD_A2 |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-diff-private-simple-dataset/llama_index/packs/diff_private_simple_dataset/events.py | {
"start": 88,
"end": 237
} | class ____(BaseEvent):
@classmethod
def class_name(cls):
"""Class name."""
return "LLMEmptyResponseEvent"
| LLMEmptyResponseEvent |
python | python__mypy | mypy/nodes.py | {
"start": 57822,
"end": 58155
} | class ____(Statement):
__slots__ = ("expr",)
__match_args__ = ("expr",)
expr: Expression | None
def __init__(self, expr: Expression | None) -> None:
super().__init__()
self.expr = expr
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_return_stmt(self)
| ReturnStmt |
python | dask__distributed | distributed/deploy/old_ssh.py | {
"start": 424,
"end": 10080
} | class ____:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def async_ssh(cmd_dict):
import paramiko
from paramiko.buffered_pipe import PipeTimeout
from paramiko.ssh_exception import PasswordRequiredException, SSHException
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
retries = 0
while True: # Be robust to transient SSH failures.
try:
# Set paramiko logging to WARN or higher to squelch INFO messages.
logging.getLogger("paramiko").setLevel(logging.WARN)
ssh.connect(
hostname=cmd_dict["address"],
username=cmd_dict["ssh_username"],
port=cmd_dict["ssh_port"],
key_filename=cmd_dict["ssh_private_key"],
compress=True,
timeout=30,
banner_timeout=30,
) # Helps prevent timeouts when many concurrent ssh connections are opened.
# Connection successful, break out of while loop
break
except (SSHException, PasswordRequiredException) as e:
print(
"[ dask ssh ] : "
+ bcolors.FAIL
+ "SSH connection error when connecting to {addr}:{port} "
"to run '{cmd}'".format(
addr=cmd_dict["address"],
port=cmd_dict["ssh_port"],
cmd=cmd_dict["cmd"],
)
+ bcolors.ENDC
)
print(
bcolors.FAIL
+ " SSH reported this exception: "
+ str(e)
+ bcolors.ENDC
)
# Print an exception traceback
traceback.print_exc()
# Transient SSH errors can occur when many SSH connections are
# simultaneously opened to the same server. This makes a few
# attempts to retry.
retries += 1
if retries >= 3:
print(
"[ dask ssh ] : "
+ bcolors.FAIL
+ "SSH connection failed after 3 retries. Exiting."
+ bcolors.ENDC
)
# Connection failed after multiple attempts. Terminate this thread.
os._exit(1)
# Wait a moment before retrying
print(
" "
+ bcolors.FAIL
+ f"Retrying... (attempt {retries}/3)"
+ bcolors.ENDC
)
sleep(1)
# Execute the command, and grab file handles for stdout and stderr. Note
# that we run the command using the user's default shell, but force it to
# run in an interactive login shell, which hopefully ensures that all of the
# user's normal environment variables (via the dot files) have been loaded
# before the command is run. This should help to ensure that important
# aspects of the environment like PATH and PYTHONPATH are configured.
print("[ {label} ] : {cmd}".format(label=cmd_dict["label"], cmd=cmd_dict["cmd"]))
stdin, stdout, stderr = ssh.exec_command(
"$SHELL -i -c '" + cmd_dict["cmd"] + "'", get_pty=True
)
# Set up channel timeout (which we rely on below to make readline() non-blocking)
channel = stdout.channel
channel.settimeout(0.1)
def read_from_stdout():
"""
Read stdout stream, time out if necessary.
"""
try:
line = stdout.readline()
while len(line) > 0: # Loops until a timeout exception occurs
line = line.rstrip()
logger.debug("stdout from ssh channel: %s", line)
cmd_dict["output_queue"].put(
"[ {label} ] : {output}".format(
label=cmd_dict["label"], output=line
)
)
line = stdout.readline()
except (TimeoutError, PipeTimeout):
pass
def read_from_stderr():
"""
Read stderr stream, time out if necessary.
"""
try:
line = stderr.readline()
while len(line) > 0:
line = line.rstrip()
logger.debug("stderr from ssh channel: %s", line)
cmd_dict["output_queue"].put(
"[ {label} ] : ".format(label=cmd_dict["label"])
+ bcolors.FAIL
+ line
+ bcolors.ENDC
)
line = stderr.readline()
except (TimeoutError, PipeTimeout):
pass
def communicate():
"""
Communicate a little bit, without blocking too long.
Return True if the command ended.
"""
read_from_stdout()
read_from_stderr()
# Check to see if the process has exited. If it has, we let this thread
# terminate.
if channel.exit_status_ready():
exit_status = channel.recv_exit_status()
cmd_dict["output_queue"].put(
"[ {label} ] : ".format(label=cmd_dict["label"])
+ bcolors.FAIL
+ "remote process exited with exit status "
+ str(exit_status)
+ bcolors.ENDC
)
return True
# Get transport to current SSH client
transport = ssh.get_transport()
# Wait for a message on the input_queue. Any message received signals this
# thread to shut itself down.
while cmd_dict["input_queue"].empty():
# Kill some time so that this thread does not hog the CPU.
sleep(1.0)
# Send noise down the pipe to keep connection active
transport.send_ignore()
if communicate():
break
# Ctrl-C the executing command and wait a bit for command to end cleanly
start = time()
while time() < start + 5.0:
channel.send(b"\x03") # Ctrl-C
if communicate():
break
sleep(1.0)
# Shutdown the channel, and close the SSH connection
channel.close()
ssh.close()
def start_scheduler(
logdir, addr, port, ssh_username, ssh_port, ssh_private_key, remote_python=None
):
cmd = f"{remote_python or sys.executable} -m distributed.cli.dask_scheduler --port {port}"
# Optionally re-direct stdout and stderr to a logfile
if logdir is not None:
cmd = f"mkdir -p {logdir} && {cmd}"
cmd += f"&> {logdir}/dask_scheduler_{addr}:{port}.log"
# Format output labels we can prepend to each line of output, and create
# a 'status' key to keep track of jobs that terminate prematurely.
label = f"{bcolors.BOLD}scheduler {addr}:{port}{bcolors.ENDC}"
# Create a command dictionary, which contains everything we need to run and
# interact with this command.
input_queue = Queue()
output_queue = Queue()
cmd_dict = {
"cmd": cmd,
"label": label,
"address": addr,
"port": port,
"input_queue": input_queue,
"output_queue": output_queue,
"ssh_username": ssh_username,
"ssh_port": ssh_port,
"ssh_private_key": ssh_private_key,
}
# Start the thread
thread = Thread(target=async_ssh, args=[cmd_dict])
thread.daemon = True
thread.start()
return merge(cmd_dict, {"thread": thread})
def start_worker(
logdir,
scheduler_addr,
scheduler_port,
worker_addr,
nthreads,
n_workers,
ssh_username,
ssh_port,
ssh_private_key,
nohost,
memory_limit,
worker_port,
nanny_port,
remote_python=None,
remote_dask_worker="distributed.cli.dask_worker",
local_directory=None,
):
cmd = (
"{python} -m {remote_dask_worker} "
"{scheduler_addr}:{scheduler_port} "
"--nthreads {nthreads}" + (" --nworkers {n_workers}" if n_workers != 1 else "")
)
if not nohost:
cmd += " --host {worker_addr}"
if memory_limit:
cmd += " --memory-limit {memory_limit}"
if worker_port:
cmd += " --worker-port {worker_port}"
if nanny_port:
cmd += " --nanny-port {nanny_port}"
cmd = cmd.format(
python=remote_python or sys.executable,
remote_dask_worker=remote_dask_worker,
scheduler_addr=scheduler_addr,
scheduler_port=scheduler_port,
worker_addr=worker_addr,
nthreads=nthreads,
n_workers=n_workers,
memory_limit=memory_limit,
worker_port=worker_port,
nanny_port=nanny_port,
)
if local_directory is not None:
cmd += f" --local-directory {local_directory}"
# Optionally redirect stdout and stderr to a logfile
if logdir is not None:
cmd = f"mkdir -p {logdir} && {cmd}&> {logdir}/dask_scheduler_{worker_addr}.log"
label = f"worker {worker_addr}"
# Create a command dictionary, which contains everything we need to run and
# interact with this command.
input_queue = Queue()
output_queue = Queue()
cmd_dict = {
"cmd": cmd,
"label": label,
"address": worker_addr,
"input_queue": input_queue,
"output_queue": output_queue,
"ssh_username": ssh_username,
"ssh_port": ssh_port,
"ssh_private_key": ssh_private_key,
}
# Start the thread
thread = Thread(target=async_ssh, args=[cmd_dict])
thread.daemon = True
thread.start()
return merge(cmd_dict, {"thread": thread})
| bcolors |
python | redis__redis-py | redis/asyncio/multidb/healthcheck.py | {
"start": 2859,
"end": 4260
} | class ____(AbstractHealthCheckPolicy):
"""
Policy that returns True if a majority of health check probes are successful.
"""
def __init__(self, health_check_probes: int, health_check_delay: float):
super().__init__(health_check_probes, health_check_delay)
async def execute(self, health_checks: List[HealthCheck], database) -> bool:
for health_check in health_checks:
if self.health_check_probes % 2 == 0:
allowed_unsuccessful_probes = self.health_check_probes / 2
else:
allowed_unsuccessful_probes = (self.health_check_probes + 1) / 2
for attempt in range(self.health_check_probes):
try:
if not await health_check.check_health(database):
allowed_unsuccessful_probes -= 1
if allowed_unsuccessful_probes <= 0:
return False
except Exception as e:
allowed_unsuccessful_probes -= 1
if allowed_unsuccessful_probes <= 0:
raise UnhealthyDatabaseException(
"Unhealthy database", database, e
)
if attempt < self.health_check_probes - 1:
await asyncio.sleep(self._health_check_delay)
return True
| HealthyMajorityPolicy |
python | scikit-learn__scikit-learn | sklearn/externals/_numpydoc/docscrape.py | {
"start": 17958,
"end": 19092
} | class ____(NumpyDocString):
def __init__(self, func, role="func", doc=None, config=None):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
raise ValueError("No function or docstring given")
doc = inspect.getdoc(func) or ""
if config is None:
config = {}
NumpyDocString.__init__(self, doc, config)
def get_func(self):
func_name = getattr(self._f, "__name__", self.__class__.__name__)
if inspect.isclass(self._f):
func = getattr(self._f, "__call__", self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
out = ""
func, func_name = self.get_func()
roles = {"func": "function", "meth": "method"}
if self._role:
if self._role not in roles:
print(f"Warning: invalid role {self._role}")
out += f".. {roles.get(self._role, '')}:: {func_name}\n \n\n"
out += super().__str__(func_role=self._role)
return out
| FunctionDoc |
python | django__django | tests/syndication_tests/feeds.py | {
"start": 5877,
"end": 6032
} | class ____(TestRss2Feed):
stylesheets = [
"/stylesheet1.xsl",
feedgenerator.Stylesheet("/stylesheet2.xsl"),
]
| TestFeedWithStylesheets |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-walk-in-weighted-graph.py | {
"start": 50,
"end": 1712
} | class ____(object):
def minimumCost(self, n, edges, query):
"""
:type n: int
:type edges: List[List[int]]
:type query: List[List[int]]
:rtype: List[int]
"""
class UnionFind(object): # Time: O(n * alpha(n)), Space: O(n)
def __init__(self, n):
self.set = list(range(n))
self.rank = [0]*n
self.w = [-1]*n # added
def find_set(self, x):
stk = []
while self.set[x] != x: # path compression
stk.append(x)
x = self.set[x]
while stk:
self.set[stk.pop()] = x
return x
def union_set(self, x, y, w): # modified
x, y = self.find_set(x), self.find_set(y)
if x == y:
self.w[x] &= w # added
return False
if self.rank[x] > self.rank[y]: # union by rank
x, y = y, x
self.set[x] = self.set[y]
if self.rank[x] == self.rank[y]:
self.rank[y] += 1
self.w[y] &= self.w[x]&w # added
return True
def cost(self, x): # added
return self.w[self.find_set(x)]
uf = UnionFind(n)
for u, v, w in edges:
uf.union_set(u, v, w)
result = [-1]*(len(query))
for i, (s, t) in enumerate(query):
if uf.find_set(s) != uf.find_set(t):
continue
result[i] = uf.cost(s) if s != t else 0
return result
| Solution |
python | dask__dask | dask/dataframe/dask_expr/_reductions.py | {
"start": 39608,
"end": 40281
} | class ____(Reduction):
"""
Some reductions reduce the number of rows in your object but keep the original
dimension, e.g. a DataFrame stays a DataFrame instead of getting reduced to
a Series.
"""
@classmethod
def chunk(cls, df, **kwargs):
return cls.reduction_chunk(df, **kwargs)
@classmethod
def combine(cls, inputs: list, **kwargs):
func = cls.reduction_combine or cls.reduction_aggregate or cls.reduction_chunk
df = _concat(inputs)
return func(df, **kwargs) # type: ignore[misc]
def _divisions(self):
# TODO: We can do better in some cases
return (None, None)
| ReductionConstantDim |
python | getsentry__sentry | src/sentry/analytics/event.py | {
"start": 2314,
"end": 3619
} | class ____:
"""
Base class for custom analytics Events.
"""
# the type of the event, used for serialization and matching. Can be None for abstract base event classes
type: ClassVar[str | None]
_eventclass_initialized: ClassVar[bool] = False
def __new__(cls, *args: Any, **kwargs: Any) -> Self:
# Check if this class was decorated with @eventclass
if "_eventclass_initialized" not in cls.__dict__:
# If not decorated, check if it adds new dataclass fields compared to parent
if getattr(cls, "__annotations__", None):
logger.warning(
"Event class with new fields must use @eventclass decorator",
extra={"cls": cls},
)
return super().__new__(cls)
def serialize(self) -> dict[str, Any]:
return {k: v for k, v in asdict(self).items() if k != "type"}
@classmethod
# @deprecated("This constructor function is discouraged, as it is not type-safe.")
def from_instance(cls, instance: Any, **kwargs: Any) -> Self:
attrs: dict[str, Any] = {
f.name: kwargs.get(f.name, getattr(instance, f.name, None))
for f in fields(cls)
if f.name != "type"
}
return cls(**attrs)
@dataclass()
| Event |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_cloud_logging.py | {
"start": 2117,
"end": 7808
} | class ____:
def setup_method(self):
with mock.patch(
BASE_STRING.format("GoogleBaseHook.__init__"),
new=mock_base_gcp_hook_default_project_id,
):
self.hook = CloudLoggingHook(gcp_conn_id=GCP_CONN_ID)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_logging.ConfigServiceV2Client")
@mock.patch("airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials")
def test_get_conn(self, mock_get_credentials, mock_client_class):
mock_credentials = mock.Mock()
mock_get_credentials.return_value = mock_credentials
hook = CloudLoggingHook(gcp_conn_id=GCP_CONN_ID)
conn = hook.get_conn()
mock_client_class.assert_called_once_with(credentials=mock_credentials, client_info=mock.ANY)
assert conn == mock_client_class.return_value
@mock.patch(CLOUDLOGGING_HOOK_CLIENT)
def test_create_sink(self, mock_config_client):
sink = LogSink(**sink_config)
expected_request = CreateSinkRequest(
parent=f"projects/{PROJECT_ID}", sink=sink, unique_writer_identity=UNIQUE_WRITER_IDENTITY
)
self.hook.create_sink(
sink=sink,
project_id=PROJECT_ID,
unique_writer_identity=UNIQUE_WRITER_IDENTITY,
)
mock_config_client.return_value.create_sink.assert_called_once_with(request=expected_request)
@mock.patch(CLOUDLOGGING_HOOK_CLIENT)
def test_get_sink(self, mock_config_client):
expected_request = GetSinkRequest(sink_name=f"projects/{PROJECT_ID}/sinks/{SINK_NAME}")
self.hook.get_sink(sink_name=SINK_NAME, project_id=PROJECT_ID)
mock_config_client.return_value.get_sink.assert_called_once_with(request=expected_request)
@mock.patch(CLOUDLOGGING_HOOK_CLIENT)
def test_list_sinks(self, mock_config_client):
expected_request = ListSinksRequest(parent=f"projects/{PROJECT_ID}")
self.hook.list_sinks(project_id=PROJECT_ID)
mock_config_client.return_value.list_sinks.assert_called_once_with(request=expected_request)
@mock.patch(CLOUDLOGGING_HOOK_CLIENT)
def test_delete_sink(self, mock_config_client):
expected_request = DeleteSinkRequest(sink_name=f"projects/{PROJECT_ID}/sinks/{SINK_NAME}")
self.hook.delete_sink(sink_name=SINK_NAME, project_id=PROJECT_ID)
mock_config_client.return_value.delete_sink.assert_called_once_with(request=expected_request)
@mock.patch(CLOUDLOGGING_HOOK_CLIENT)
def test_update_sink_success(self, mock_config_client):
sink_config = {
"destination": f"bigquery.googleapis.com/projects/{PROJECT_ID}/datasets/your_dataset",
"bigquery_options": {"use_partitioned_tables": True},
}
update_mask = {"paths": ["destination", "bigquery_options"]}
expected_request = UpdateSinkRequest(
sink_name=f"projects/{PROJECT_ID}/sinks/{SINK_NAME}",
sink=sink_config,
update_mask=update_mask,
unique_writer_identity=UNIQUE_WRITER_IDENTITY,
)
self.hook.update_sink(
sink_name=SINK_NAME,
sink=sink_config,
update_mask=update_mask,
unique_writer_identity=UNIQUE_WRITER_IDENTITY,
project_id=PROJECT_ID,
)
mock_config_client.return_value.update_sink.assert_called_once_with(request=expected_request)
@mock.patch(CLOUDLOGGING_HOOK_CLIENT)
def test_create_sink_dict_input(self, mock_config_client):
expected_sink = LogSink(**sink_config)
expected_request = CreateSinkRequest(
parent=f"projects/{PROJECT_ID}", sink=expected_sink, unique_writer_identity=UNIQUE_WRITER_IDENTITY
)
self.hook.create_sink(
sink=sink_config, unique_writer_identity=UNIQUE_WRITER_IDENTITY, project_id=PROJECT_ID
)
mock_config_client.return_value.create_sink.assert_called_once_with(request=expected_request)
def test_update_sink_invalid_dict_format(self):
with pytest.raises(ValueError, match="Unknown field for LogSink: invalid_key"):
self.hook.update_sink(
sink_name=SINK_NAME,
sink={"invalid_key": "value"},
update_mask={"paths": ["invalid_key"]},
unique_writer_identity=UNIQUE_WRITER_IDENTITY,
project_id=PROJECT_ID,
)
@mock.patch(CLOUDLOGGING_HOOK_CLIENT)
def test_update_sink_failure(self, mock_config_client):
updated_sink = LogSink(name=SINK_NAME, destination="storage.googleapis.com/new-bucket")
updated_mask = {"paths": ["name", "destination"]}
mock_config_client.return_value.update_sink.side_effect = Exception("Permission denied")
with pytest.raises(Exception, match="Permission denied"):
self.hook.update_sink(
sink_name=SINK_NAME,
sink=updated_sink,
update_mask=updated_mask,
unique_writer_identity=UNIQUE_WRITER_IDENTITY,
project_id=PROJECT_ID,
)
mock_config_client.return_value.update_sink.assert_called_once()
@mock.patch(CLOUDLOGGING_HOOK_CLIENT)
def test_list_sinks_empty(self, mock_config_client):
mock_config_client.return_value.list_sinks.return_value = []
sinks = self.hook.list_sinks(project_id=PROJECT_ID)
assert sinks == []
mock_config_client.return_value.list_sinks.assert_called_once()
| TestCloudLoggingHook |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/publish/pipeline.py | {
"start": 3260,
"end": 4171
} | class ____(Step):
context: PythonRegistryPublishContext
title = "Check if the connector is published on python registry"
async def _run(self) -> StepResult:
is_published = is_package_published(
self.context.package_metadata.name, self.context.package_metadata.version, self.context.registry_check_url
)
if is_published:
return StepResult(
step=self,
status=StepStatus.SKIPPED,
stderr=f"{self.context.package_metadata.name} already exists in version {self.context.package_metadata.version}.",
)
else:
return StepResult(
step=self,
status=StepStatus.SUCCESS,
stdout=f"{self.context.package_metadata.name} does not exist in version {self.context.package_metadata.version}.",
)
| CheckPythonRegistryPackageDoesNotExist |
python | run-llama__llama_index | llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/events.py | {
"start": 1887,
"end": 1975
} | class ____(CustomEvent, Event):
type: EventType = EventType.CUSTOM
| CustomWorkflowEvent |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 391371,
"end": 392472
} | class ____(sgqlc.types.Interface):
"""Represents a contribution a user made on GitHub, such as opening
an issue.
"""
__schema__ = github_schema
__field_names__ = ("is_restricted", "occurred_at", "resource_path", "url", "user")
is_restricted = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isRestricted")
"""Whether this contribution is associated with a record you do not
have access to. For example, your own 'first issue' contribution
may have been made on a repository you can no longer access.
"""
occurred_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="occurredAt")
"""When this contribution was made."""
resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="resourcePath")
"""The HTTP path for this contribution."""
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
"""The HTTP URL for this contribution."""
user = sgqlc.types.Field(sgqlc.types.non_null("User"), graphql_name="user")
"""The user who made this contribution."""
| Contribution |
python | getsentry__sentry | src/sentry/features/handler.py | {
"start": 2204,
"end": 3529
} | class ____(FeatureHandler):
"""
Base class for feature handlers that apply to an organization
and an optional collection of `objects` (e.g. projects).
Subclasses are expected to implement `_check_for_batch` and perform a feature check
using only the organization.
It is generally better to extend BatchFeatureHandler if it is possible to do
the check with no more than the feature name, organization, and actor. If it
needs to unpack the Feature object and examine the flagged entity, extend
FeatureHandler instead.
"""
@abc.abstractmethod
def _check_for_batch(
self,
feature_name: str,
entity: Organization | User | RpcUser | AnonymousUser | None,
actor: User | RpcUser | AnonymousUser | None,
) -> bool | None:
raise NotImplementedError
def has(
self,
feature: Feature,
actor: User | RpcUser | AnonymousUser | None,
skip_entity: bool | None = False,
) -> bool | None:
return self._check_for_batch(feature.name, feature.get_subject(), actor)
def has_for_batch(self, batch: FeatureCheckBatch) -> dict[Project, bool | None]:
flag = self._check_for_batch(batch.feature_name, batch.subject, batch.actor)
return {obj: flag for obj in batch.objects}
| BatchFeatureHandler |
python | django__django | tests/queries/tests.py | {
"start": 150494,
"end": 150937
} | class ____(TestCase):
def test_reverse_trimming(self):
# We don't accidentally trim reverse joins - we can't know if there is
# anything on the other side of the join, so trimming reverse joins
# can't be done, ever.
t = Tag.objects.create()
qs = Tag.objects.filter(annotation__tag=t.pk)
self.assertIn("INNER JOIN", str(qs.query))
self.assertEqual(list(qs), [])
| ReverseJoinTrimmingTest |
python | langchain-ai__langchain | libs/langchain/langchain_classic/evaluation/comparison/eval_chain.py | {
"start": 3693,
"end": 5113
} | class ____(BaseOutputParser[dict]):
"""A parser for the output of the PairwiseStringEvalChain.
Attributes:
_type: The type of the output parser.
"""
@property
def _type(self) -> str:
"""Return the type of the output parser.
Returns:
The type of the output parser.
"""
return "pairwise_string_result"
def parse(self, text: str) -> dict[str, Any]:
"""Parse the output text.
Args:
text: The output text to parse.
Returns:
The parsed output.
Raises:
ValueError: If the verdict is invalid.
"""
match = _FIND_DOUBLE_BRACKETS.search(text)
if match:
verdict = match.group(1)
if not match or verdict not in {"A", "B", "C"}:
msg = (
f"Invalid output: {text}. "
"Output must contain a double bracketed string\
with the verdict 'A', 'B', or 'C'."
)
raise ValueError(msg)
# C means the models are tied. Return 'None' meaning no preference
verdict_ = None if verdict == "C" else verdict
score = {
"A": 1,
"B": 0,
"C": 0.5,
}[verdict]
return {
"reasoning": text,
"value": verdict_,
"score": score,
}
| PairwiseStringResultOutputParser |
python | kamyu104__LeetCode-Solutions | Python/check-if-number-is-a-sum-of-powers-of-three.py | {
"start": 32,
"end": 274
} | class ____(object):
def checkPowersOfThree(self, n):
"""
:type n: int
:rtype: bool
"""
while n > 0:
if n%3 == 2:
return False
n //= 3
return True
| Solution |
python | kamyu104__LeetCode-Solutions | Python/earliest-finish-time-for-land-and-water-rides-i.py | {
"start": 40,
"end": 767
} | class ____(object):
def earliestFinishTime(self, landStartTime, landDuration, waterStartTime, waterDuration):
"""
:type landStartTime: List[int]
:type landDuration: List[int]
:type waterStartTime: List[int]
:type waterDuration: List[int]
:rtype: int
"""
mn_land = min(landStartTime[i]+landDuration[i] for i in xrange(len(landStartTime)))
mn_water = min(waterStartTime[i]+waterDuration[i] for i in xrange(len(waterStartTime)))
return min(min(max(landStartTime[i], mn_water)+landDuration[i] for i in xrange(len(landStartTime))),
min(max(waterStartTime[i], mn_land)+waterDuration[i] for i in xrange(len(waterStartTime))))
| Solution |
python | pymupdf__PyMuPDF | src/__init__.py | {
"start": 835582,
"end": 836796
} | class ____(mupdf.FzOutput2):
def __init__(self, bio):
super().__init__()
self.bio = bio
self.use_virtual_write()
self.use_virtual_seek()
self.use_virtual_tell()
self.use_virtual_truncate()
def seek( self, ctx, offset, whence):
return self.bio.seek( offset, whence)
def tell( self, ctx):
ret = self.bio.tell()
return ret
def truncate( self, ctx):
return self.bio.truncate()
def write(self, ctx, data_raw, data_length):
data = mupdf.raw_to_python_bytes(data_raw, data_length)
return self.bio.write(data)
def compute_scissor(dev):
'''
Every scissor of a clip is a sub rectangle of the preceding clip scissor
if the clip level is larger.
'''
if dev.scissors is None:
dev.scissors = list()
num_scissors = len(dev.scissors)
if num_scissors > 0:
last_scissor = dev.scissors[num_scissors-1]
scissor = JM_rect_from_py(last_scissor)
scissor = mupdf.fz_intersect_rect(scissor, dev.pathrect)
else:
scissor = dev.pathrect
dev.scissors.append(JM_py_from_rect(scissor))
return scissor
| JM_new_output_fileptr_Output |
python | huggingface__transformers | src/transformers/models/musicgen_melody/configuration_musicgen_melody.py | {
"start": 853,
"end": 6724
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of an [`MusicgenMelodyDecoder`]. It is used to instantiate a
Musicgen Melody decoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Musicgen Melody
[facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 2048):
Vocabulary size of the MusicgenMelodyDecoder model. Defines the number of different tokens that can be
represented by the `inputs_ids` passed when calling [`MusicgenMelodyDecoder`].
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Typically, set this to something large
just in case (e.g., 512 or 1024 or 2048).
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of decoder layers.
ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer block.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer block.
layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
use_cache (`bool`, *optional*, defaults to `True`):
Whether the model should return the last key/values attentions (not used by all models)
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the decoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, text_encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
initializer_factor (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by diving by sqrt(hidden_size).
num_codebooks (`int`, *optional*, defaults to 4):
The number of parallel codebooks forwarded to the model.
audio_channels (`int`, *optional*, defaults to 1):
Number of audio channels used by the model (either mono or stereo). Stereo models generate a separate
audio stream for the left/right output channels. Mono models generate a single audio stream output.
pad_token_id (`int`, *optional*, defaults to 2048): The id of the *padding* token.
bos_token_id (`int`, *optional*, defaults to 2048): The id of the *beginning-of-sequence* token.
eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token.
tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie word embeddings with the text encoder.
"""
model_type = "musicgen_melody_decoder"
base_config_key = "decoder_config"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=2048,
max_position_embeddings=2048,
num_hidden_layers=24,
ffn_dim=4096,
num_attention_heads=16,
layerdrop=0.0,
use_cache=True,
activation_function="gelu",
hidden_size=1024,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
initializer_factor=0.02,
scale_embedding=False,
num_codebooks=4,
audio_channels=1,
pad_token_id=2048,
bos_token_id=2048,
eos_token_id=None,
tie_word_embeddings=False,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.ffn_dim = ffn_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.initializer_factor = initializer_factor
self.layerdrop = layerdrop
self.use_cache = use_cache
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
self.num_codebooks = num_codebooks
if audio_channels not in [1, 2]:
raise ValueError(f"Expected 1 (mono) or 2 (stereo) audio channels, got {audio_channels} channels.")
self.audio_channels = audio_channels
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
| MusicgenMelodyDecoderConfig |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets_tests/snippet_checks/guides/components/integrations/test_omni_utils.py | {
"start": 2536,
"end": 3712
} | class ____(OmniComponent):
async def write_state_to_path(self, state_path):
"""Override to use mock data."""
import dagster as dg
# Create a mock workspace with the same credentials
mock_workspace = MockOmniWorkspace(**self.workspace.model_dump())
# Fetch and store mock data
mock_data = await mock_workspace.fetch_omni_state()
# Serialize and write to path
state_path.write_text(dg.serialize_value(mock_data))
def test_mock_omni_workspace() -> None:
"""Test that the mock Omni workspace returns the expected data."""
import asyncio
workspace = MockOmniWorkspace(
base_url="https://test.omniapp.co",
api_key="test_api_key",
)
workspace_data = asyncio.run(workspace.fetch_omni_state())
# Verify we have the expected content
assert len(workspace_data.documents) == 3
assert len(workspace_data._users_by_id) == 1 # noqa
# Verify specific content
doc_names = [doc.name for doc in workspace_data.documents]
assert "sales_dashboard" in doc_names
assert "revenue_report" in doc_names
assert "customer_analysis" in doc_names
| MockOmniComponent |
python | PrefectHQ__prefect | src/integrations/prefect-gcp/tests/test_utilities.py | {
"start": 1197,
"end": 3665
} | class ____:
@pytest.mark.parametrize(
"ready_condition,expected_value",
[({"status": "True"}, True), ({"status": "False"}, False), ({}, False)],
)
def test_is_ready(self, ready_condition, expected_value):
job = Job(
metadata={},
spec={},
status={},
name="test",
ready_condition=ready_condition,
execution_status={},
)
assert job.is_ready() == expected_value
@pytest.mark.parametrize(
"status,expected_value",
[
({}, {}),
({"conditions": []}, {}),
({"conditions": [{"type": "Dog", "val": "value"}]}, {}),
(
{"conditions": [{"type": "Ready", "val": "value"}]},
{"type": "Ready", "val": "value"},
),
(
{
"conditions": [
{"type": "Dog", "val": "value"},
{"type": "Ready", "val": "value"},
]
},
{"type": "Ready", "val": "value"},
),
],
)
def test_get_ready_condition(self, status, expected_value):
assert Job._get_ready_condition({"status": status}) == expected_value
@pytest.mark.parametrize(
"status,expected_value",
[
({}, {}),
({"latestCreatedExecution": {}}, {}),
({"latestCreatedExecution": {"some": "val"}}, {"some": "val"}),
],
)
def test_get_execution_status(self, status, expected_value):
assert Job._get_execution_status({"status": status}) == expected_value
@pytest.mark.parametrize(
"execution_status,expected_value",
[
({}, True), # Has no execution
(
{"completionTimestamp": None},
True,
), # Has an execution with no completion timestamp
(
{"completionTimestamp": "Exists"},
False,
), # Has an execution and it has a completion timestamp
],
)
def test_has_execution_in_progress(self, execution_status, expected_value):
job = Job(
metadata={},
spec={},
status={},
name="test",
ready_condition={},
execution_status=execution_status,
)
assert job.has_execution_in_progress() == expected_value
| TestJob |
python | scipy__scipy | scipy/stats/tests/test_rank.py | {
"start": 2424,
"end": 12775
} | class ____:
def desired_dtype(self, method='average', has_nans=False, *, xp):
if has_nans:
return xp.asarray(1.).dtype
return xp.asarray(1.).dtype if method=='average' else xp.asarray(1).dtype
def test_empty(self, xp):
"""stats.rankdata of empty array should return an empty array."""
a = xp.asarray([], dtype=xp.int64)
r = rankdata(a)
xp_assert_equal(r, xp.asarray([], dtype=self.desired_dtype(xp=xp)))
def test_list(self):
# test that NumPy still accepts lists
r = rankdata([])
assert_array_equal(r, np.array([]))
r = rankdata([40, 10, 30, 10, 50])
assert_equal(r, [4.0, 1.5, 3.0, 1.5, 5.0])
@pytest.mark.parametrize("shape", [(0, 1, 2)])
@pytest.mark.parametrize("axis", [None, *range(3)])
def test_empty_multidim(self, shape, axis, xp):
a = xp.empty(shape, dtype=xp.int64)
r = rankdata(a, axis=axis)
expected_shape = (0,) if axis is None else shape
xp_assert_equal(r, xp.empty(expected_shape, dtype=self.desired_dtype(xp=xp)))
def test_one(self, xp):
"""Check stats.rankdata with an array of length 1."""
data = [100]
a = xp.asarray(data, dtype=xp.int64)
r = rankdata(a)
xp_assert_equal(r, xp.asarray([1.0], dtype=self.desired_dtype(xp=xp)))
def test_basic(self, xp):
"""Basic tests of stats.rankdata."""
desired_dtype = self.desired_dtype(xp=xp)
data = [100, 10, 50]
expected = xp.asarray([3.0, 1.0, 2.0], dtype=desired_dtype)
a = xp.asarray(data, dtype=xp.int64)
r = rankdata(a)
xp_assert_equal(r, expected)
data = [40, 10, 30, 10, 50]
expected = xp.asarray([4.0, 1.5, 3.0, 1.5, 5.0], dtype=desired_dtype)
a = xp.asarray(data, dtype=xp.int64)
r = rankdata(a)
xp_assert_equal(r, expected)
data = [20, 20, 20, 10, 10, 10]
expected = xp.asarray([5.0, 5.0, 5.0, 2.0, 2.0, 2.0], dtype=desired_dtype)
a = xp.asarray(data, dtype=xp.int64)
r = rankdata(a)
xp_assert_equal(r, expected)
# # The docstring states explicitly that the argument is flattened.
a2d = xp.reshape(a, (2, 3))
r = rankdata(a2d)
xp_assert_equal(r, expected)
@skip_xp_invalid_arg
def test_rankdata_object_string(self):
def min_rank(a):
return [1 + sum(i < j for i in a) for j in a]
def max_rank(a):
return [sum(i <= j for i in a) for j in a]
def ordinal_rank(a):
return min_rank([(x, i) for i, x in enumerate(a)])
def average_rank(a):
return [(i + j) / 2.0 for i, j in zip(min_rank(a), max_rank(a))]
def dense_rank(a):
b = np.unique(a)
return [1 + sum(i < j for i in b) for j in a]
rankf = dict(min=min_rank, max=max_rank, ordinal=ordinal_rank,
average=average_rank, dense=dense_rank)
def check_ranks(a):
for method in 'min', 'max', 'dense', 'ordinal', 'average':
out = rankdata(a, method=method)
assert_array_equal(out, rankf[method](a))
val = ['foo', 'bar', 'qux', 'xyz', 'abc', 'efg', 'ace', 'qwe', 'qaz']
check_ranks(np.random.choice(val, 200))
check_ranks(np.random.choice(val, 200).astype('object'))
val = np.array([0, 1, 2, 2.718, 3, 3.141], dtype='object')
check_ranks(np.random.choice(val, 200).astype('object'))
@pytest.mark.skip_xp_backends("torch", reason="`take_along_axis` fails with uint64")
def test_large_uint(self, xp):
data = xp.asarray([2**60, 2**60+1], dtype=xp.uint64)
r = rankdata(data)
xp_assert_equal(r, xp.asarray([1.0, 2.0], dtype=self.desired_dtype(xp=xp)))
def test_large_int(self, xp):
data = xp.asarray([2**60, 2**60+1], dtype=xp.int64)
r = rankdata(data)
xp_assert_equal(r, xp.asarray([1.0, 2.0], dtype=self.desired_dtype(xp=xp)))
data = xp.asarray([2**60, -2**60+1], dtype=xp.int64)
r = rankdata(data)
xp_assert_equal(r, xp.asarray([2.0, 1.0], dtype=self.desired_dtype(xp=xp)))
@pytest.mark.parametrize('n', [10000, 100000, 1000000])
def test_big_tie(self, n, xp):
data = xp.ones(n)
r = rankdata(data)
expected_rank = 0.5 * (n + 1)
ref = xp.asarray(expected_rank * data, dtype=self.desired_dtype(xp=xp))
xp_assert_equal(r, ref)
def test_axis(self, xp):
data = xp.asarray([[0, 2, 1], [4, 2, 2]])
expected0 = xp.asarray([[1., 1.5, 1.], [2., 1.5, 2.]])
r0 = rankdata(data, axis=0)
xp_assert_equal(r0, expected0)
expected1 = xp.asarray([[1., 3., 2.], [3., 1.5, 1.5]])
r1 = rankdata(data, axis=1)
xp_assert_equal(r1, expected1)
methods= ["average", "min", "max", "dense", "ordinal"]
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("method", methods)
def test_size_0_axis(self, axis, method, xp):
shape = (3, 0)
desired_dtype = self.desired_dtype(method, xp=xp)
data = xp.zeros(shape)
r = rankdata(data, method=method, axis=axis)
assert_equal(r.shape, shape)
assert_equal(r.dtype, desired_dtype)
xp_assert_equal(r, xp.empty(shape, dtype=desired_dtype))
@pytest.mark.parametrize('axis', range(3))
@pytest.mark.parametrize('method', methods)
def test_nan_policy_omit_3d(self, axis, method):
shape = (20, 21, 22)
rng = np.random.RandomState(23983242)
a = rng.random(size=shape)
i = rng.random(size=shape) < 0.4
j = rng.random(size=shape) < 0.1
k = rng.random(size=shape) < 0.1
a[i] = np.nan
a[j] = -np.inf
a[k] - np.inf
def rank_1d_omit(a, method):
out = np.zeros_like(a)
i = np.isnan(a)
a_compressed = a[~i]
res = rankdata(a_compressed, method)
out[~i] = res
out[i] = np.nan
return out
def rank_omit(a, method, axis):
return np.apply_along_axis(lambda a: rank_1d_omit(a, method),
axis, a)
res = rankdata(a, method, axis=axis, nan_policy='omit')
res0 = rank_omit(a, method, axis=axis)
assert_array_equal(res, res0)
def test_nan_policy_2d_axis_none(self):
# 2 2d-array test with axis=None
data = [[0, np.nan, 3],
[4, 2, np.nan],
[1, 2, 2]]
assert_array_equal(rankdata(data, axis=None, nan_policy='omit'),
[1., np.nan, 6., 7., 4., np.nan, 2., 4., 4.])
assert_array_equal(rankdata(data, axis=None, nan_policy='propagate'),
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan])
def test_nan_policy_raise(self):
# 1 1d-array test
data = [0, 2, 3, -2, np.nan, np.nan]
with pytest.raises(ValueError, match="The input contains nan"):
rankdata(data, nan_policy='raise')
# 2 2d-array test
data = [[0, np.nan, 3],
[4, 2, np.nan],
[np.nan, 2, 2]]
with pytest.raises(ValueError, match="The input contains nan"):
rankdata(data, axis=0, nan_policy="raise")
with pytest.raises(ValueError, match="The input contains nan"):
rankdata(data, axis=1, nan_policy="raise")
def test_nan_policy_propagate(self):
# 1 1d-array test
data = [0, 2, 3, -2, np.nan, np.nan]
assert_array_equal(rankdata(data, nan_policy='propagate'),
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan])
# 2 2d-array test
data = [[0, np.nan, 3],
[4, 2, np.nan],
[1, 2, 2]]
assert_array_equal(rankdata(data, axis=0, nan_policy='propagate'),
[[1, np.nan, np.nan],
[3, np.nan, np.nan],
[2, np.nan, np.nan]])
assert_array_equal(rankdata(data, axis=1, nan_policy='propagate'),
[[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[1, 2.5, 2.5]])
_rankdata_cases = (
# values, method, expected
([], 'average', []),
([], 'min', []),
([], 'max', []),
([], 'dense', []),
([], 'ordinal', []),
#
([100], 'average', [1.0]),
([100], 'min', [1.0]),
([100], 'max', [1.0]),
([100], 'dense', [1.0]),
([100], 'ordinal', [1.0]),
#
([100, 100, 100], 'average', [2.0, 2.0, 2.0]),
([100, 100, 100], 'min', [1.0, 1.0, 1.0]),
([100, 100, 100], 'max', [3.0, 3.0, 3.0]),
([100, 100, 100], 'dense', [1.0, 1.0, 1.0]),
([100, 100, 100], 'ordinal', [1.0, 2.0, 3.0]),
#
([100, 300, 200], 'average', [1.0, 3.0, 2.0]),
([100, 300, 200], 'min', [1.0, 3.0, 2.0]),
([100, 300, 200], 'max', [1.0, 3.0, 2.0]),
([100, 300, 200], 'dense', [1.0, 3.0, 2.0]),
([100, 300, 200], 'ordinal', [1.0, 3.0, 2.0]),
#
([100, 200, 300, 200], 'average', [1.0, 2.5, 4.0, 2.5]),
([100, 200, 300, 200], 'min', [1.0, 2.0, 4.0, 2.0]),
([100, 200, 300, 200], 'max', [1.0, 3.0, 4.0, 3.0]),
([100, 200, 300, 200], 'dense', [1.0, 2.0, 3.0, 2.0]),
([100, 200, 300, 200], 'ordinal', [1.0, 2.0, 4.0, 3.0]),
#
([100, 200, 300, 200, 100], 'average', [1.5, 3.5, 5.0, 3.5, 1.5]),
([100, 200, 300, 200, 100], 'min', [1.0, 3.0, 5.0, 3.0, 1.0]),
([100, 200, 300, 200, 100], 'max', [2.0, 4.0, 5.0, 4.0, 2.0]),
([100, 200, 300, 200, 100], 'dense', [1.0, 2.0, 3.0, 2.0, 1.0]),
([100, 200, 300, 200, 100], 'ordinal', [1.0, 3.0, 5.0, 4.0, 2.0]),
#
([10] * 30, 'ordinal', np.arange(1.0, 31.0)),
)
@pytest.mark.parametrize('case', _rankdata_cases)
def test_cases(self, case, xp):
values, method, expected = case
r = rankdata(xp.asarray(values), method=method)
ref = xp.asarray(expected, dtype=self.desired_dtype(method, xp=xp))
xp_assert_equal(r, ref)
| TestRankData |
python | PrefectHQ__prefect | tests/test_futures.py | {
"start": 12168,
"end": 17673
} | class ____:
async def test_wait_with_timeout(self, task_run):
@task
async def my_task():
return 42
task_run = await my_task.create_run()
future = PrefectDistributedFuture(task_run_id=task_run.id)
asyncio.create_task(
run_task_async(
task=my_task,
task_run_id=future.task_run_id,
task_run=task_run,
parameters={},
return_type="state",
)
)
future = PrefectDistributedFuture(task_run_id=task_run.id)
future.wait(timeout=0.25)
assert future.state.is_pending()
async def test_wait_without_timeout(self, events_pipeline):
@task
def my_task():
return 42
task_run = await my_task.create_run()
future = PrefectDistributedFuture(task_run_id=task_run.id)
state = run_task_sync(
task=my_task,
task_run_id=future.task_run_id,
task_run=task_run,
parameters={},
return_type="state",
)
assert state.is_completed()
await events_pipeline.process_events()
future.wait()
assert future.state.is_completed()
async def test_result_with_final_state(self, events_pipeline):
@task(persist_result=True)
def my_task():
return 42
task_run = await my_task.create_run()
future = PrefectDistributedFuture(task_run_id=task_run.id)
state = run_task_sync(
task=my_task,
task_run_id=future.task_run_id,
task_run=task_run,
parameters={},
return_type="state",
)
assert state.is_completed()
await events_pipeline.process_events()
assert await state.result() == 42
# When this test is run as a suite and the task uses default result
# storage, this line fails because the result storage block no longer
# exists.
assert future.result() == 42
async def test_final_state_without_result(self, events_pipeline):
@task(persist_result=False)
def my_task():
return 42
task_run = await my_task.create_run()
future = PrefectDistributedFuture(task_run_id=task_run.id)
state = run_task_sync(
task=my_task,
task_run_id=future.task_run_id,
task_run=task_run,
parameters={},
return_type="state",
)
assert state.is_completed()
await events_pipeline.process_events()
with pytest.raises(MissingResult):
future.result()
async def test_result_with_final_state_and_raise_on_failure(self, events_pipeline):
@task(persist_result=True)
def my_task():
raise ValueError("oops")
task_run = await my_task.create_run()
future = PrefectDistributedFuture(task_run_id=task_run.id)
state = run_task_sync(
task=my_task,
task_run_id=future.task_run_id,
task_run=task_run,
parameters={},
return_type="state",
)
assert state.is_failed()
await events_pipeline.process_events()
with pytest.raises(ValueError, match="oops"):
future.result(raise_on_failure=True)
async def test_final_state_missing_result(self, events_pipeline):
@task(persist_result=False)
def my_task():
return 42
task_run = await my_task.create_run()
future = PrefectDistributedFuture(task_run_id=task_run.id)
state = run_task_sync(
task=my_task,
task_run_id=future.task_run_id,
task_run=task_run,
parameters={},
return_type="state",
)
assert state.is_completed()
await events_pipeline.process_events()
with pytest.raises(MissingResult):
future.result()
async def test_result_async_without_explicit_wait(self, events_pipeline):
"""Test that result_async() works without calling wait_async() first.
This tests the fix for issue #18278 where result_async() would timeout
when called on a task submitted via delay(), even though the task
completed successfully.
"""
@task(persist_result=True)
def delayed_task():
return "delayed_result"
task_run = await delayed_task.create_run()
future = PrefectDistributedFuture(task_run_id=task_run.id)
# Run the task (simulating what happens when using delay())
state = run_task_sync(
task=delayed_task,
task_run_id=future.task_run_id,
task_run=task_run,
parameters={},
return_type="state",
)
assert state.is_completed()
# Process events so TaskRunWaiter knows the task is complete
await events_pipeline.process_events()
# Create a new future instance (simulating getting the future from delay())
fresh_future = PrefectDistributedFuture(task_run_id=task_run.id)
# result_async should work without calling wait_async explicitly
result = await fresh_future.result_async(timeout=5)
assert result == "delayed_result"
# Verify that _final_state was properly set
assert fresh_future._final_state is not None
assert fresh_future._final_state.is_completed()
| TestPrefectDistributedFuture |
python | python-poetry__poetry | src/poetry/publishing/uploader.py | {
"start": 835,
"end": 12361
} | class ____:
def __init__(self, poetry: Poetry, io: IO, dist_dir: Path | None = None) -> None:
self._poetry = poetry
self._package = poetry.package
self._io = io
self._dist_dir = dist_dir or self.default_dist_dir
self._username: str | None = None
self._password: str | None = None
@property
def user_agent(self) -> str:
agent: str = user_agent("poetry", __version__)
return agent
@property
def default_dist_dir(self) -> Path:
return self._poetry.file.path.parent / "dist"
@property
def dist_dir(self) -> Path:
if not self._dist_dir.is_absolute():
return self._poetry.file.path.parent / self._dist_dir
return self._dist_dir
@property
def files(self) -> list[Path]:
dist = self.dist_dir
version = self._package.version.to_string()
escaped_name = distribution_name(self._package.name)
wheels = list(dist.glob(f"{escaped_name}-{version}-*.whl"))
tars = list(dist.glob(f"{escaped_name}-{version}.tar.gz"))
return sorted(wheels + tars)
def auth(self, username: str | None, password: str | None) -> None:
self._username = username
self._password = password
def make_session(self) -> requests.Session:
session = requests.Session()
auth = self.get_auth()
if auth is not None:
session.auth = auth
session.headers["User-Agent"] = self.user_agent
return session
def get_auth(self) -> tuple[str, str] | None:
if self._username is None or self._password is None:
return None
return (self._username, self._password)
def upload(
self,
url: str,
cert: Path | bool = True,
client_cert: Path | None = None,
dry_run: bool = False,
skip_existing: bool = False,
) -> None:
session = self.make_session()
session.verify = str(cert) if isinstance(cert, Path) else cert
if client_cert:
session.cert = str(client_cert)
with session:
self._upload(session, url, dry_run, skip_existing)
@classmethod
def post_data(cls, file: Path) -> dict[str, Any]:
file_type = cls._get_type(file)
hash_manager = HashManager()
hash_manager.hash(file)
file_hashes = hash_manager.hexdigest()
md5_digest = file_hashes.md5
sha2_digest = file_hashes.sha256
blake2_256_digest = file_hashes.blake2_256
py_version: str | None = None
if file_type == "bdist_wheel":
wheel_info = wheel_file_re.match(file.name)
if wheel_info is not None:
py_version = wheel_info.group("pyver")
else:
py_version = "source"
data: dict[str, Any] = {
# Upload API (https://docs.pypi.org/api/upload/)
# ":action", "protocol_version" and "content are added later
"md5_digest": md5_digest,
"sha256_digest": sha2_digest,
"blake2_256_digest": blake2_256_digest,
"filetype": file_type,
"pyversion": py_version,
}
for key, value in cls._get_metadata(file).items():
# strip trailing 's' to match API field names
# see https://docs.pypi.org/api/upload/
if key in {"platforms", "supported_platforms", "license_files"}:
key = key[:-1]
# revert some special cases from packaging.metadata.parse_email()
# "keywords" is not "multiple use" but a comma-separated string
if key == "keywords":
assert isinstance(value, list)
value = ", ".join(value)
# "project_urls" is not a dict
if key == "project_urls":
assert isinstance(value, dict)
value = [f"{k}, {v}" for k, v in value.items()]
data[key] = value
return data
def _upload(
self,
session: requests.Session,
url: str,
dry_run: bool = False,
skip_existing: bool = False,
) -> None:
for file in self.files:
self._upload_file(session, url, file, dry_run, skip_existing)
def _upload_file(
self,
session: requests.Session,
url: str,
file: Path,
dry_run: bool = False,
skip_existing: bool = False,
) -> None:
from cleo.ui.progress_bar import ProgressBar
if not file.is_file():
raise UploadError(f"Archive ({file}) does not exist")
data = self.post_data(file)
data.update({":action": "file_upload", "protocol_version": "1"})
data_to_send: list[tuple[str, Any]] = self._prepare_data(data)
with file.open("rb") as fp:
data_to_send.append(
("content", (file.name, fp, "application/octet-stream"))
)
encoder = MultipartEncoder(data_to_send)
bar = ProgressBar(self._io, max=encoder.len)
bar.set_format(f" - Uploading <c1>{file.name}</c1> <b>%percent%%</b>")
monitor = MultipartEncoderMonitor(
encoder, lambda monitor: bar.set_progress(monitor.bytes_read)
)
bar.start()
resp = None
try:
if not dry_run:
resp = session.post(
url,
data=monitor,
allow_redirects=False,
headers={"Content-Type": monitor.content_type},
timeout=REQUESTS_TIMEOUT,
)
if resp is None or 200 <= resp.status_code < 300:
bar.set_format(
f" - Uploading <c1>{file.name}</c1> <fg=green>%percent%%</>"
)
bar.finish()
elif 300 <= resp.status_code < 400:
if self._io.output.is_decorated():
self._io.overwrite(
f" - Uploading <c1>{file.name}</c1> <error>FAILED</>"
)
raise UploadError(
"Redirects are not supported. "
"Is the URL missing a trailing slash?"
)
elif resp.status_code == 400 and "was ever registered" in resp.text:
self._register(session, url)
resp.raise_for_status()
elif skip_existing and self._is_file_exists_error(resp):
bar.set_format(
f" - Uploading <c1>{file.name}</c1> <warning>File exists."
" Skipping</>"
)
bar.display()
else:
resp.raise_for_status()
except requests.RequestException as e:
if self._io.output.is_decorated():
self._io.overwrite(
f" - Uploading <c1>{file.name}</c1> <error>FAILED</>"
)
if e.response is not None:
message = (
f"HTTP Error {e.response.status_code}: "
f"{e.response.reason} | {e.response.content!r}"
)
raise UploadError(message) from e
raise UploadError("Error connecting to repository") from e
finally:
self._io.write_line("")
def _register(self, session: requests.Session, url: str) -> requests.Response:
"""
Register a package to a repository.
"""
dist = self.dist_dir
escaped_name = distribution_name(self._package.name)
file = dist / f"{escaped_name}-{self._package.version.to_string()}.tar.gz"
if not file.exists():
raise RuntimeError(f'"{file.name}" does not exist.')
data = self.post_data(file)
data.update({":action": "submit", "protocol_version": "1"})
data_to_send = self._prepare_data(data)
encoder = MultipartEncoder(data_to_send)
resp = session.post(
url,
data=encoder,
allow_redirects=False,
headers={"Content-Type": encoder.content_type},
timeout=REQUESTS_TIMEOUT,
)
resp.raise_for_status()
return resp
def _prepare_data(self, data: dict[str, Any]) -> list[tuple[str, str]]:
data_to_send = []
for key, value in data.items():
if not isinstance(value, (list, tuple)):
data_to_send.append((key, value))
else:
for item in value:
data_to_send.append((key, item))
return data_to_send
@staticmethod
def _get_type(file: Path) -> Literal["bdist_wheel", "sdist"]:
exts = file.suffixes
if exts and exts[-1] == ".whl":
return "bdist_wheel"
elif len(exts) >= 2 and "".join(exts[-2:]) == ".tar.gz":
return "sdist"
raise ValueError("Unknown distribution format " + "".join(exts))
@staticmethod
def _get_metadata(file: Path) -> RawMetadata:
if file.suffix == ".whl":
with zipfile.ZipFile(file) as z:
for name in z.namelist():
parts = Path(name).parts
if (
len(parts) == 2
and parts[1] == "METADATA"
and parts[0].endswith(".dist-info")
):
with z.open(name) as mf:
return parse_email(mf.read().decode("utf-8"))[0]
raise FileNotFoundError("METADATA not found in wheel")
elif file.suffixes[-2:] == [".tar", ".gz"]:
with tarfile.open(file, "r:gz") as tar:
for member in tar.getmembers():
parts = Path(member.name).parts
if (
len(parts) == 2
and parts[1] == "PKG-INFO"
and (pf := tar.extractfile(member))
):
return parse_email(pf.read().decode("utf-8"))[0]
raise FileNotFoundError("PKG-INFO not found in sdist")
raise ValueError(f"Unsupported file type: {file}")
def _is_file_exists_error(self, response: requests.Response) -> bool:
# based on https://github.com/pypa/twine/blob/a6dd69c79f7b5abfb79022092a5d3776a499e31b/twine/commands/upload.py#L32
status = response.status_code
reason = response.reason.lower()
text = response.text.lower()
reason_and_text = reason + text
return (
# pypiserver (https://pypi.org/project/pypiserver)
status == 409
# PyPI / TestPyPI / GCP Artifact Registry
or (status == 400 and "already exist" in reason_and_text)
# Nexus Repository OSS (https://www.sonatype.com/nexus-repository-oss)
or (status == 400 and "updating asset" in reason_and_text)
or (status == 400 and "cannot be updated" in reason_and_text)
# Artifactory (https://jfrog.com/artifactory/)
or (status == 403 and "overwrite artifact" in reason_and_text)
# Gitlab Enterprise Edition (https://about.gitlab.com)
or (status == 400 and "already been taken" in reason_and_text)
)
| Uploader |
python | PyCQA__pylint | tests/functional/m/missing/missing_function_docstring.py | {
"start": 306,
"end": 413
} | class ____:
def __init__(self, my_param: int) -> None: # [missing-function-docstring]
pass
| MyClass |
python | huggingface__transformers | src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py | {
"start": 6979,
"end": 8638
} | class ____(nn.Module):
def __init__(self, config: MobileNetV2Config, in_channels: int, expanded_channels: int, out_channels: int) -> None:
super().__init__()
# The very first layer is a regular 3x3 convolution with stride 2 that expands to 32 channels.
# All other expansion layers use the expansion factor to compute the number of output channels.
self.first_conv = MobileNetV2ConvLayer(
config,
in_channels=in_channels,
out_channels=expanded_channels,
kernel_size=3,
stride=2,
)
if config.first_layer_is_expansion:
self.expand_1x1 = None
else:
self.expand_1x1 = MobileNetV2ConvLayer(
config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=1
)
self.conv_3x3 = MobileNetV2ConvLayer(
config,
in_channels=expanded_channels,
out_channels=expanded_channels,
kernel_size=3,
stride=1,
groups=expanded_channels,
)
self.reduce_1x1 = MobileNetV2ConvLayer(
config,
in_channels=expanded_channels,
out_channels=out_channels,
kernel_size=1,
use_activation=False,
)
def forward(self, features: torch.Tensor) -> torch.Tensor:
features = self.first_conv(features)
if self.expand_1x1 is not None:
features = self.expand_1x1(features)
features = self.conv_3x3(features)
features = self.reduce_1x1(features)
return features
@auto_docstring
| MobileNetV2Stem |
python | celery__celery | t/unit/concurrency/test_gevent.py | {
"start": 3057,
"end": 4170
} | class ____:
def test_apply_timeout(self):
self.patching.modules(*gevent_modules)
class Timeout(Exception):
value = None
def __init__(self, value):
self.__class__.value = value
def __enter__(self):
return self
def __exit__(self, *exc_info):
pass
timeout_callback = Mock(name='timeout_callback')
apply_target = Mock(name='apply_target')
getpid = Mock(name='getpid')
apply_timeout(
Mock(), timeout=10, callback=Mock(name='callback'),
timeout_callback=timeout_callback, getpid=getpid,
apply_target=apply_target, Timeout=Timeout,
)
assert Timeout.value == 10
apply_target.assert_called()
apply_target.side_effect = Timeout(10)
apply_timeout(
Mock(), timeout=10, callback=Mock(),
timeout_callback=timeout_callback, getpid=getpid,
apply_target=apply_target, Timeout=Timeout,
)
timeout_callback.assert_called_with(False, 10)
| test_apply_timeout |
python | jazzband__django-formtools | tests/wizard/wizardtests/models.py | {
"start": 201,
"end": 430
} | class ____(models.Model):
poet = models.ForeignKey(Poet, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
class Meta:
app_label = 'formtools'
def __str__(self):
return self.name
| Poem |
python | getsentry__sentry | tests/sentry/monitors/endpoints/test_project_monitor_details.py | {
"start": 293,
"end": 465
} | class ____(BaseMonitorDetailsTest, BaseProjectMonitorTest):
endpoint = "sentry-api-0-project-monitor-details"
__test__ = True
@freeze_time()
| ProjectMonitorDetailsTest |
python | joke2k__faker | faker/providers/phone_number/nl_NL/__init__.py | {
"start": 49,
"end": 512
} | class ____(PhoneNumberProvider):
formats = (
"0### ######",
"0## #######",
"+31### ######",
"+31## #######",
"+31(0)### ######",
"+31(0)## #######",
"(0###) ######",
"(0##) #######",
"0###-######",
"0##-#######",
"+31###-######",
"+31##-#######",
"+31(0)###-######",
"+31(0)##-#######",
"(0###)-######",
"(0##)-#######",
)
| Provider |
python | django__django | tests/null_fk_ordering/tests.py | {
"start": 106,
"end": 2013
} | class ____(TestCase):
def test_ordering_across_null_fk(self):
"""
Regression test for #7512
ordering across nullable Foreign Keys shouldn't exclude results
"""
author_1 = Author.objects.create(name="Tom Jones")
author_2 = Author.objects.create(name="Bob Smith")
Article.objects.create(title="No author on this article")
Article.objects.create(
author=author_1, title="This article written by Tom Jones"
)
Article.objects.create(
author=author_2, title="This article written by Bob Smith"
)
# We can't compare results directly (since different databases sort
# NULLs to different ends of the ordering), but we can check that all
# results are returned.
self.assertEqual(len(list(Article.objects.all())), 3)
s = SystemInfo.objects.create(system_name="System Info")
f = Forum.objects.create(system_info=s, forum_name="First forum")
p = Post.objects.create(forum=f, title="First Post")
Comment.objects.create(post=p, comment_text="My first comment")
Comment.objects.create(comment_text="My second comment")
s2 = SystemInfo.objects.create(system_name="More System Info")
f2 = Forum.objects.create(system_info=s2, forum_name="Second forum")
p2 = Post.objects.create(forum=f2, title="Second Post")
Comment.objects.create(comment_text="Another first comment")
Comment.objects.create(post=p2, comment_text="Another second comment")
# We have to test this carefully. Some databases sort NULL values
# before everything else, some sort them afterward. So we extract the
# ordered list and check the length. Before the fix, this list was too
# short (some values were omitted).
self.assertEqual(len(list(Comment.objects.all())), 4)
| NullFkOrderingTests |
python | davidhalter__jedi | test/refactor/extract_function.py | {
"start": 6909,
"end": 7093
} | class ____:
#? 9 text {'new_name': 'f', 'until_line': 4}
a = 3
c = a + 2
# ++++++++++++++++++++++++++++++++++++++++++++++++++
def f():
a = 3
c = a + 2
return c
| X1 |
python | crytic__slither | slither/slithir/operations/member.py | {
"start": 749,
"end": 3068
} | class ____(OperationWithLValue):
def __init__(
self,
variable_left: SourceMapping,
variable_right: Constant,
result: Union[ReferenceVariable, ReferenceVariableSSA],
) -> None:
# Function can happen for something like
# library FunctionExtensions {
# function h(function() internal _t, uint8) internal { }
# }
# contract FunctionMembers {
# using FunctionExtensions for function();
#
# function f() public {
# f.h(1);
# }
# }
# Can be an ElementaryType because of bytes.concat, string.concat
assert is_valid_rvalue(variable_left) or isinstance(
variable_left,
(
Contract,
Enum,
Function,
Event,
CustomError,
SolidityImportPlaceHolder,
ElementaryType,
),
)
assert isinstance(variable_right, Constant)
assert isinstance(result, ReferenceVariable)
super().__init__()
self._variable_left: Union[
RVALUE,
Contract,
Enum,
Function,
Event,
CustomError,
SolidityImportPlaceHolder,
ElementaryType,
] = variable_left
self._variable_right = variable_right
self._lvalue = result
self._gas = None
self._value = None
@property
def read(self) -> List[SourceMapping]:
return [self.variable_left, self.variable_right]
@property
def variable_left(
self,
) -> Union[
RVALUE, Contract, Enum, Function, CustomError, SolidityImportPlaceHolder, ElementaryType
]:
return self._variable_left
@property
def variable_right(self) -> Constant:
return self._variable_right
@property
def call_value(self):
return self._value
@call_value.setter
def call_value(self, v):
self._value = v
@property
def call_gas(self):
return self._gas
@call_gas.setter
def call_gas(self, gas):
self._gas = gas
def __str__(self):
return f"{self.lvalue}({self.lvalue.type}) -> {self.variable_left}.{self.variable_right}"
| Member |
python | run-llama__llama_index | llama-index-integrations/evaluation/llama-index-evaluation-tonic-validate/llama_index/evaluation/tonic_validate/answer_similarity.py | {
"start": 356,
"end": 2057
} | class ____(BaseEvaluator):
"""
Tonic Validate's answer similarity metric.
The output score is a float between 0.0 and 5.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = AnswerSimilarityMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
reference_response: Optional[str] = None,
**kwargs: Any,
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query, answer=reference_response)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
| AnswerSimilarityEvaluator |
python | great-expectations__great_expectations | great_expectations/core/util.py | {
"start": 9303,
"end": 10677
} | class ____:
OBJECT_URL_TEMPLATE: str = "s3a://{bucket}/{path}"
"""
>>> s = S3Url("s3://bucket/hello/world")
>>> s.bucket
'bucket'
>>> s.key
'hello/world'
>>> s.url
's3://bucket/hello/world'
>>> s = S3Url("s3://bucket/hello/world?qwe1=3#ddd")
>>> s.bucket
'bucket'
>>> s.key
'hello/world?qwe1=3#ddd'
>>> s.url
's3://bucket/hello/world?qwe1=3#ddd'
>>> s = S3Url("s3://bucket/hello/world#foo?bar=2")
>>> s.key
'hello/world#foo?bar=2'
>>> s.url
's3://bucket/hello/world#foo?bar=2'
"""
def __init__(self, url) -> None:
self._parsed = urlparse(url, allow_fragments=False)
@property
def bucket(self):
return self._parsed.netloc
@property
def key(self):
if self._parsed.query:
return f"{self._parsed.path.lstrip('/')}?{self._parsed.query}"
else:
return self._parsed.path.lstrip("/")
@property
def suffix(self) -> Optional[str]:
"""
Attempts to get a file suffix from the S3 key.
If can't find one returns `None`.
"""
splits = self._parsed.path.rsplit(".", 1)
_suffix = splits[-1]
if len(_suffix) > 0 and len(splits) > 1:
return str(_suffix)
return None
@property
def url(self):
return self._parsed.geturl()
| S3Url |
python | huggingface__transformers | src/transformers/models/encodec/modeling_encodec.py | {
"start": 1655,
"end": 2586
} | class ____(ModelOutput):
r"""
audio_codes (`torch.LongTensor` of shape `(nb_frames, batch_size, nb_quantizers, frame_len)`, *optional*):
Discrete code embeddings computed using `model.encode`.
audio_scales (list of length `nb_frames` of `torch.Tensor` of shape `(batch_size, 1)`, *optional*):
Scaling factor for each `audio_codes` input. This is used to unscale each chunk of audio when decoding.
last_frame_pad_length (`int`, *optional*):
The length of the padding in the last frame, if any. This is used to ensure that the encoded frames can be
outputted as a tensor. This value should be passed during decoding to ensure padding is removed from the
encoded frames.
"""
audio_codes: Optional[torch.LongTensor] = None
audio_scales: Optional[torch.FloatTensor] = None
last_frame_pad_length: Optional[int] = None
@dataclass
@auto_docstring
| EncodecEncoderOutput |
python | django__django | tests/m2m_through_regress/models.py | {
"start": 2209,
"end": 2296
} | class ____(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
| Competitor |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/sagemaker.py | {
"start": 12655,
"end": 13795
} | class ____(SageMakerBaseSensor):
"""
Poll the processing job until it reaches a terminal state; raise AirflowException with the failure reason.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:SageMakerProcessingSensor`
:param job_name: Name of the processing job to watch.
"""
template_fields: Sequence[str] = aws_template_fields(
"job_name",
)
template_ext: Sequence[str] = ()
def __init__(self, *, job_name: str, **kwargs):
super().__init__(**kwargs)
self.job_name = job_name
def non_terminal_states(self) -> set[str]:
return SageMakerHook.processing_job_non_terminal_states
def failed_states(self) -> set[str]:
return SageMakerHook.processing_job_failed_states
def get_sagemaker_response(self) -> dict:
self.log.info("Poking Sagemaker ProcessingJob %s", self.job_name)
return self.hook.describe_processing_job(self.job_name)
def state_from_response(self, response: dict) -> str:
return response["ProcessingJobStatus"]
| SageMakerProcessingSensor |
python | protocolbuffers__protobuf | python/google/protobuf/internal/reflection_test.py | {
"start": 2877,
"end": 35740
} | class ____(unittest.TestCase):
def assertListsEqual(self, values, others):
self.assertEqual(len(values), len(others))
for i in range(len(values)):
self.assertEqual(values[i], others[i])
def testScalarConstructor(self, message_module):
# Constructor with only scalar types should succeed.
proto = message_module.TestAllTypes(
optional_int32=24,
optional_double=54.321,
optional_string='optional_string',
optional_float=None)
self.assertEqual(24, proto.optional_int32)
self.assertEqual(54.321, proto.optional_double)
self.assertEqual('optional_string', proto.optional_string)
if message_module is unittest_pb2:
self.assertFalse(proto.HasField("optional_float"))
def testRepeatedScalarConstructor(self, message_module):
# Constructor with only repeated scalar types should succeed.
proto = message_module.TestAllTypes(
repeated_int32=[1, 2, 3, 4],
repeated_double=[1.23, 54.321],
repeated_bool=[True, False, False],
repeated_string=["optional_string"],
repeated_float=None)
self.assertEqual([1, 2, 3, 4], list(proto.repeated_int32))
self.assertEqual([1.23, 54.321], list(proto.repeated_double))
self.assertEqual([True, False, False], list(proto.repeated_bool))
self.assertEqual(["optional_string"], list(proto.repeated_string))
self.assertEqual([], list(proto.repeated_float))
def testMixedConstructor(self, message_module):
# Constructor with only mixed types should succeed.
proto = message_module.TestAllTypes(
optional_int32=24,
optional_string='optional_string',
repeated_double=[1.23, 54.321],
repeated_bool=[True, False, False],
repeated_nested_message=[
message_module.TestAllTypes.NestedMessage(
bb=message_module.TestAllTypes.FOO),
message_module.TestAllTypes.NestedMessage(
bb=message_module.TestAllTypes.BAR)],
repeated_foreign_message=[
message_module.ForeignMessage(c=-43),
message_module.ForeignMessage(c=45324),
message_module.ForeignMessage(c=12)],
optional_nested_message=None)
self.assertEqual(24, proto.optional_int32)
self.assertEqual('optional_string', proto.optional_string)
self.assertEqual([1.23, 54.321], list(proto.repeated_double))
self.assertEqual([True, False, False], list(proto.repeated_bool))
self.assertEqual(
[message_module.TestAllTypes.NestedMessage(
bb=message_module.TestAllTypes.FOO),
message_module.TestAllTypes.NestedMessage(
bb=message_module.TestAllTypes.BAR)],
list(proto.repeated_nested_message))
self.assertEqual(
[message_module.ForeignMessage(c=-43),
message_module.ForeignMessage(c=45324),
message_module.ForeignMessage(c=12)],
list(proto.repeated_foreign_message))
self.assertFalse(proto.HasField("optional_nested_message"))
def testConstructorTypeError(self, message_module):
self.assertRaises(
TypeError, message_module.TestAllTypes, optional_int32='foo')
self.assertRaises(
TypeError, message_module.TestAllTypes, optional_string=1234)
self.assertRaises(
TypeError, message_module.TestAllTypes, optional_nested_message=1234)
self.assertRaises(
TypeError, message_module.TestAllTypes, repeated_int32=1234)
self.assertRaises(
TypeError, message_module.TestAllTypes, repeated_int32=['foo'])
self.assertRaises(
TypeError, message_module.TestAllTypes, repeated_string=1234)
self.assertRaises(
TypeError, message_module.TestAllTypes, repeated_string=[1234])
self.assertRaises(
TypeError, message_module.TestAllTypes, repeated_nested_message=1234)
self.assertRaises(
TypeError, message_module.TestAllTypes, repeated_nested_message=[1234])
def testConstructorInvalidatesCachedByteSize(self, message_module):
message = message_module.TestAllTypes(optional_int32=12)
self.assertEqual(2, message.ByteSize())
message = message_module.TestAllTypes(
optional_nested_message=message_module.TestAllTypes.NestedMessage())
self.assertEqual(3, message.ByteSize())
message = message_module.TestAllTypes(repeated_int32=[12])
# TODO: Add this test back for proto3
if message_module is unittest_pb2:
self.assertEqual(3, message.ByteSize())
message = message_module.TestAllTypes(
repeated_nested_message=[message_module.TestAllTypes.NestedMessage()])
self.assertEqual(3, message.ByteSize())
def testReferencesToNestedMessage(self, message_module):
proto = message_module.TestAllTypes()
nested = proto.optional_nested_message
del proto
# A previous version had a bug where this would raise an exception when
# hitting a now-dead weak reference.
nested.bb = 23
def testOneOf(self, message_module):
proto = message_module.TestAllTypes()
proto.oneof_uint32 = 10
proto.oneof_nested_message.bb = 11
self.assertEqual(11, proto.oneof_nested_message.bb)
self.assertFalse(proto.HasField('oneof_uint32'))
nested = proto.oneof_nested_message
proto.oneof_string = 'abc'
self.assertEqual('abc', proto.oneof_string)
self.assertEqual(11, nested.bb)
self.assertFalse(proto.HasField('oneof_nested_message'))
def testGetDefaultMessageAfterDisconnectingDefaultMessage(
self, message_module):
proto = message_module.TestAllTypes()
nested = proto.optional_nested_message
proto.ClearField('optional_nested_message')
del proto
del nested
# Force a garbage collect so that the underlying CMessages are freed along
# with the Messages they point to. This is to make sure we're not deleting
# default message instances.
gc.collect()
proto = message_module.TestAllTypes()
nested = proto.optional_nested_message
def testDisconnectingNestedMessageAfterSettingField(self, message_module):
proto = message_module.TestAllTypes()
nested = proto.optional_nested_message
nested.bb = 5
self.assertTrue(proto.HasField('optional_nested_message'))
proto.ClearField('optional_nested_message') # Should disconnect from parent
self.assertEqual(5, nested.bb)
self.assertEqual(0, proto.optional_nested_message.bb)
self.assertIsNot(nested, proto.optional_nested_message)
nested.bb = 23
self.assertFalse(proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
def testDisconnectingNestedMessageBeforeGettingField(self, message_module):
proto = message_module.TestAllTypes()
self.assertFalse(proto.HasField('optional_nested_message'))
proto.ClearField('optional_nested_message')
self.assertFalse(proto.HasField('optional_nested_message'))
def testDisconnectingNestedMessageAfterMerge(self, message_module):
# This test exercises the code path that does not use ReleaseMessage().
# The underlying fear is that if we use ReleaseMessage() incorrectly,
# we will have memory leaks. It's hard to check that that doesn't happen,
# but at least we can exercise that code path to make sure it works.
proto1 = message_module.TestAllTypes()
proto2 = message_module.TestAllTypes()
proto2.optional_nested_message.bb = 5
proto1.MergeFrom(proto2)
self.assertTrue(proto1.HasField('optional_nested_message'))
proto1.ClearField('optional_nested_message')
self.assertFalse(proto1.HasField('optional_nested_message'))
def testDisconnectingLazyNestedMessage(self, message_module):
proto = message_module.TestAllTypes()
proto.optional_lazy_message.bb = 5
proto.ClearField('optional_lazy_message')
del proto
gc.collect()
def testSingularListFields(self, message_module):
proto = message_module.TestAllTypes()
proto.optional_fixed32 = 1
proto.optional_int32 = 5
proto.optional_string = 'foo'
# Access sub-message but don't set it yet.
nested_message = proto.optional_nested_message
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 5),
(proto.DESCRIPTOR.fields_by_name['optional_fixed32'], 1),
(proto.DESCRIPTOR.fields_by_name['optional_string' ], 'foo') ],
proto.ListFields())
proto.optional_nested_message.bb = 123
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 5),
(proto.DESCRIPTOR.fields_by_name['optional_fixed32'], 1),
(proto.DESCRIPTOR.fields_by_name['optional_string' ], 'foo'),
(proto.DESCRIPTOR.fields_by_name['optional_nested_message' ],
nested_message) ],
proto.ListFields())
def testRepeatedListFields(self, message_module):
proto = message_module.TestAllTypes()
proto.repeated_fixed32.append(1)
proto.repeated_int32.append(5)
proto.repeated_int32.append(11)
proto.repeated_string.extend(['foo', 'bar'])
proto.repeated_string.extend([])
proto.repeated_string.append('baz')
proto.repeated_string.extend(str(x) for x in range(2))
proto.optional_int32 = 21
proto.repeated_bool # Access but don't set anything; should not be listed.
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 21),
(proto.DESCRIPTOR.fields_by_name['repeated_int32' ], [5, 11]),
(proto.DESCRIPTOR.fields_by_name['repeated_fixed32'], [1]),
(proto.DESCRIPTOR.fields_by_name['repeated_string' ],
['foo', 'bar', 'baz', '0', '1']) ],
proto.ListFields())
def testClearFieldWithUnknownFieldName(self, message_module):
proto = message_module.TestAllTypes()
self.assertRaises(ValueError, proto.ClearField, 'nonexistent_field')
self.assertRaises(ValueError, proto.ClearField, b'nonexistent_field')
def testDisallowedAssignments(self, message_module):
# It's illegal to assign values directly to repeated fields
# or to nonrepeated composite fields. Ensure that this fails.
proto = message_module.TestAllTypes()
# Repeated fields.
self.assertRaises(AttributeError, setattr, proto, 'repeated_int32', 10)
# Lists shouldn't work, either.
self.assertRaises(AttributeError, setattr, proto, 'repeated_int32', [10])
# Composite fields.
self.assertRaises(AttributeError, setattr, proto,
'optional_nested_message', 23)
# Assignment to a repeated nested message field without specifying
# the index in the array of nested messages.
self.assertRaises(AttributeError, setattr, proto.repeated_nested_message,
'bb', 34)
# Assignment to an attribute of a repeated field.
self.assertRaises(AttributeError, setattr, proto.repeated_float,
'some_attribute', 34)
# proto.nonexistent_field = 23 should fail as well.
self.assertRaises(AttributeError, setattr, proto, 'nonexistent_field', 23)
def testSingleScalarTypeSafety(self, message_module):
proto = message_module.TestAllTypes()
self.assertRaises(TypeError, setattr, proto, 'optional_int32', 1.1)
self.assertRaises(TypeError, setattr, proto, 'optional_int32', 'foo')
self.assertRaises(TypeError, setattr, proto, 'optional_string', 10)
self.assertRaises(TypeError, setattr, proto, 'optional_bytes', 10)
self.assertRaises(TypeError, setattr, proto, 'optional_bool', 'foo')
self.assertRaises(TypeError, setattr, proto, 'optional_float', 'foo')
self.assertRaises(TypeError, setattr, proto, 'optional_double', 'foo')
# TODO: Fix type checking difference for python and c extension
if (api_implementation.Type() == 'python' or
(sys.version_info.major, sys.version_info.minor) >= (3, 10)):
self.assertRaises(TypeError, setattr, proto, 'optional_bool', 1.1)
else:
proto.optional_bool = 1.1
def assertIntegerTypes(self, integer_fn, message_module):
"""Verifies setting of scalar integers.
Args:
integer_fn: A function to wrap the integers that will be assigned.
message_module: unittest_pb2 or unittest_proto3_arena_pb2
"""
def TestGetAndDeserialize(field_name, value, expected_type):
proto = message_module.TestAllTypes()
value = integer_fn(value)
setattr(proto, field_name, value)
self.assertIsInstance(getattr(proto, field_name), expected_type)
proto2 = message_module.TestAllTypes()
proto2.ParseFromString(proto.SerializeToString())
self.assertIsInstance(getattr(proto2, field_name), expected_type)
TestGetAndDeserialize('optional_int32', 1, int)
TestGetAndDeserialize('optional_int32', 1 << 30, int)
TestGetAndDeserialize('optional_uint32', 1 << 30, int)
integer_64 = int
if struct.calcsize('L') == 4:
# Python only has signed ints, so 32-bit python can't fit an uint32
# in an int.
TestGetAndDeserialize('optional_uint32', 1 << 31, integer_64)
else:
# 64-bit python can fit uint32 inside an int
TestGetAndDeserialize('optional_uint32', 1 << 31, int)
TestGetAndDeserialize('optional_int64', 1 << 30, integer_64)
TestGetAndDeserialize('optional_int64', 1 << 60, integer_64)
TestGetAndDeserialize('optional_uint64', 1 << 30, integer_64)
TestGetAndDeserialize('optional_uint64', 1 << 60, integer_64)
def testIntegerTypes(self, message_module):
self.assertIntegerTypes(lambda x: x, message_module)
def testNonStandardIntegerTypes(self, message_module):
self.assertIntegerTypes(test_util.NonStandardInteger, message_module)
def testIllegalValuesForIntegers(self, message_module):
pb = message_module.TestAllTypes()
# Strings are illegal, even when the represent an integer.
with self.assertRaises(TypeError):
pb.optional_uint64 = '2'
# The exact error should propagate with a poorly written custom integer.
with self.assertRaisesRegex(RuntimeError, 'my_error'):
pb.optional_uint64 = test_util.NonStandardInteger(5, 'my_error')
def assetIntegerBoundsChecking(self, integer_fn, message_module):
"""Verifies bounds checking for scalar integer fields.
Args:
integer_fn: A function to wrap the integers that will be assigned.
message_module: unittest_pb2 or unittest_proto3_arena_pb2
"""
def TestMinAndMaxIntegers(field_name, expected_min, expected_max):
pb = message_module.TestAllTypes()
expected_min = integer_fn(expected_min)
expected_max = integer_fn(expected_max)
setattr(pb, field_name, expected_min)
self.assertEqual(expected_min, getattr(pb, field_name))
setattr(pb, field_name, expected_max)
self.assertEqual(expected_max, getattr(pb, field_name))
self.assertRaises((ValueError, TypeError), setattr, pb, field_name,
expected_min - 1)
self.assertRaises((ValueError, TypeError), setattr, pb, field_name,
expected_max + 1)
TestMinAndMaxIntegers('optional_int32', -(1 << 31), (1 << 31) - 1)
TestMinAndMaxIntegers('optional_uint32', 0, 0xffffffff)
TestMinAndMaxIntegers('optional_int64', -(1 << 63), (1 << 63) - 1)
TestMinAndMaxIntegers('optional_uint64', 0, 0xffffffffffffffff)
# A bit of white-box testing since -1 is an int and not a long in C++ and
# so goes down a different path.
pb = message_module.TestAllTypes()
with self.assertRaises((ValueError, TypeError)):
pb.optional_uint64 = integer_fn(-(1 << 63))
pb = message_module.TestAllTypes()
pb.optional_nested_enum = integer_fn(1)
self.assertEqual(1, pb.optional_nested_enum)
def testSingleScalarBoundsChecking(self, message_module):
self.assetIntegerBoundsChecking(lambda x: x, message_module)
def testNonStandardSingleScalarBoundsChecking(self, message_module):
self.assetIntegerBoundsChecking(
test_util.NonStandardInteger, message_module)
def testRepeatedScalarTypeSafety(self, message_module):
proto = message_module.TestAllTypes()
self.assertRaises(TypeError, proto.repeated_int32.append, 1.1)
self.assertRaises(TypeError, proto.repeated_int32.append, 'foo')
self.assertRaises(TypeError, proto.repeated_string, 10)
self.assertRaises(TypeError, proto.repeated_bytes, 10)
proto.repeated_int32.append(10)
proto.repeated_int32[0] = 23
self.assertRaises(IndexError, proto.repeated_int32.__setitem__, 500, 23)
self.assertRaises(TypeError, proto.repeated_int32.__setitem__, 0, 'abc')
self.assertRaises(TypeError, proto.repeated_int32.__setitem__, 0, [])
self.assertRaises(TypeError, proto.repeated_int32.__setitem__,
'index', 23)
proto.repeated_string.append('2')
self.assertRaises(TypeError, proto.repeated_string.__setitem__, 0, 10)
# Repeated enums tests.
# proto.repeated_nested_enum.append(0)
def testSingleScalarGettersAndSetters(self, message_module):
proto = message_module.TestAllTypes()
self.assertEqual(0, proto.optional_int32)
proto.optional_int32 = 1
self.assertEqual(1, proto.optional_int32)
proto.optional_uint64 = 0xffffffffffff
self.assertEqual(0xffffffffffff, proto.optional_uint64)
proto.optional_uint64 = 0xffffffffffffffff
self.assertEqual(0xffffffffffffffff, proto.optional_uint64)
# TODO: Test all other scalar field types.
def testEnums(self, message_module):
proto = message_module.TestAllTypes()
self.assertEqual(1, proto.FOO)
self.assertEqual(1, message_module.TestAllTypes.FOO)
self.assertEqual(2, proto.BAR)
self.assertEqual(2, message_module.TestAllTypes.BAR)
self.assertEqual(3, proto.BAZ)
self.assertEqual(3, message_module.TestAllTypes.BAZ)
def testEnum_Name(self, message_module):
self.assertEqual(
'FOREIGN_FOO',
message_module.ForeignEnum.Name(message_module.FOREIGN_FOO))
self.assertEqual(
'FOREIGN_BAR',
message_module.ForeignEnum.Name(message_module.FOREIGN_BAR))
self.assertEqual(
'FOREIGN_BAZ',
message_module.ForeignEnum.Name(message_module.FOREIGN_BAZ))
self.assertRaises(ValueError,
message_module.ForeignEnum.Name, 11312)
proto = message_module.TestAllTypes()
self.assertEqual('FOO',
proto.NestedEnum.Name(proto.FOO))
self.assertEqual('FOO',
message_module.TestAllTypes.NestedEnum.Name(proto.FOO))
self.assertEqual('BAR',
proto.NestedEnum.Name(proto.BAR))
self.assertEqual('BAR',
message_module.TestAllTypes.NestedEnum.Name(proto.BAR))
self.assertEqual('BAZ',
proto.NestedEnum.Name(proto.BAZ))
self.assertEqual('BAZ',
message_module.TestAllTypes.NestedEnum.Name(proto.BAZ))
self.assertRaises(ValueError,
proto.NestedEnum.Name, 11312)
self.assertRaises(ValueError,
message_module.TestAllTypes.NestedEnum.Name, 11312)
# Check some coercion cases.
self.assertRaises(TypeError, message_module.TestAllTypes.NestedEnum.Name,
11312.0)
self.assertRaises(TypeError, message_module.TestAllTypes.NestedEnum.Name,
None)
self.assertEqual('FOO', message_module.TestAllTypes.NestedEnum.Name(True))
def testEnum_Value(self, message_module):
self.assertEqual(message_module.FOREIGN_FOO,
message_module.ForeignEnum.Value('FOREIGN_FOO'))
self.assertEqual(message_module.FOREIGN_FOO,
message_module.ForeignEnum.FOREIGN_FOO)
self.assertEqual(message_module.FOREIGN_BAR,
message_module.ForeignEnum.Value('FOREIGN_BAR'))
self.assertEqual(message_module.FOREIGN_BAR,
message_module.ForeignEnum.FOREIGN_BAR)
self.assertEqual(message_module.FOREIGN_BAZ,
message_module.ForeignEnum.Value('FOREIGN_BAZ'))
self.assertEqual(message_module.FOREIGN_BAZ,
message_module.ForeignEnum.FOREIGN_BAZ)
self.assertRaises(ValueError,
message_module.ForeignEnum.Value, 'FO')
with self.assertRaises(AttributeError):
message_module.ForeignEnum.FO
proto = message_module.TestAllTypes()
self.assertEqual(proto.FOO,
proto.NestedEnum.Value('FOO'))
self.assertEqual(proto.FOO,
proto.NestedEnum.FOO)
self.assertEqual(proto.FOO,
message_module.TestAllTypes.NestedEnum.Value('FOO'))
self.assertEqual(proto.FOO,
message_module.TestAllTypes.NestedEnum.FOO)
self.assertEqual(proto.BAR,
proto.NestedEnum.Value('BAR'))
self.assertEqual(proto.BAR,
proto.NestedEnum.BAR)
self.assertEqual(proto.BAR,
message_module.TestAllTypes.NestedEnum.Value('BAR'))
self.assertEqual(proto.BAR,
message_module.TestAllTypes.NestedEnum.BAR)
self.assertEqual(proto.BAZ,
proto.NestedEnum.Value('BAZ'))
self.assertEqual(proto.BAZ,
proto.NestedEnum.BAZ)
self.assertEqual(proto.BAZ,
message_module.TestAllTypes.NestedEnum.Value('BAZ'))
self.assertEqual(proto.BAZ,
message_module.TestAllTypes.NestedEnum.BAZ)
self.assertRaises(ValueError,
proto.NestedEnum.Value, 'Foo')
with self.assertRaises(AttributeError):
proto.NestedEnum.Value.Foo
self.assertRaises(ValueError,
message_module.TestAllTypes.NestedEnum.Value, 'Foo')
with self.assertRaises(AttributeError):
message_module.TestAllTypes.NestedEnum.Value.Foo
def testEnum_KeysAndValues(self, message_module):
if message_module == unittest_pb2:
keys = [
'FOREIGN_FOO',
'FOREIGN_BAR',
'FOREIGN_BAZ',
'FOREIGN_BAX',
'FOREIGN_LARGE',
]
values = [4, 5, 6, 32, 123456]
items = [
('FOREIGN_FOO', 4),
('FOREIGN_BAR', 5),
('FOREIGN_BAZ', 6),
('FOREIGN_BAX', 32),
('FOREIGN_LARGE', 123456),
]
else:
keys = [
'FOREIGN_ZERO',
'FOREIGN_FOO',
'FOREIGN_BAR',
'FOREIGN_BAZ',
'FOREIGN_LARGE',
]
values = [0, 4, 5, 6, 123456]
items = [
('FOREIGN_ZERO', 0),
('FOREIGN_FOO', 4),
('FOREIGN_BAR', 5),
('FOREIGN_BAZ', 6),
('FOREIGN_LARGE', 123456),
]
self.assertEqual(keys,
list(message_module.ForeignEnum.keys()))
self.assertEqual(values,
list(message_module.ForeignEnum.values()))
self.assertEqual(items,
list(message_module.ForeignEnum.items()))
proto = message_module.TestAllTypes()
if message_module == unittest_pb2:
keys = ['FOO', 'BAR', 'BAZ', 'NEG']
values = [1, 2, 3, -1]
items = [('FOO', 1), ('BAR', 2), ('BAZ', 3), ('NEG', -1)]
else:
keys = ['ZERO', 'FOO', 'BAR', 'BAZ', 'NEG']
values = [0, 1, 2, 3, -1]
items = [('ZERO', 0), ('FOO', 1), ('BAR', 2), ('BAZ', 3), ('NEG', -1)]
self.assertEqual(keys, list(proto.NestedEnum.keys()))
self.assertEqual(values, list(proto.NestedEnum.values()))
self.assertEqual(items,
list(proto.NestedEnum.items()))
def testStaticParseFrom(self, message_module):
proto1 = message_module.TestAllTypes()
test_util.SetAllFields(proto1)
string1 = proto1.SerializeToString()
proto2 = message_module.TestAllTypes.FromString(string1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
def testMergeFromSingularField(self, message_module):
# Test merge with just a singular field.
proto1 = message_module.TestAllTypes()
proto1.optional_int32 = 1
proto2 = message_module.TestAllTypes()
# This shouldn't get overwritten.
proto2.optional_string = 'value'
proto2.MergeFrom(proto1)
self.assertEqual(1, proto2.optional_int32)
self.assertEqual('value', proto2.optional_string)
def testMergeFromRepeatedField(self, message_module):
# Test merge with just a repeated field.
proto1 = message_module.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.repeated_int32.append(2)
proto2 = message_module.TestAllTypes()
proto2.repeated_int32.append(0)
proto2.MergeFrom(proto1)
self.assertEqual(0, proto2.repeated_int32[0])
self.assertEqual(1, proto2.repeated_int32[1])
self.assertEqual(2, proto2.repeated_int32[2])
def testMergeFromRepeatedNestedMessage(self, message_module):
# Test merge with a repeated nested message.
proto1 = message_module.TestAllTypes()
m = proto1.repeated_nested_message.add()
m.bb = 123
m = proto1.repeated_nested_message.add()
m.bb = 321
proto2 = message_module.TestAllTypes()
m = proto2.repeated_nested_message.add()
m.bb = 999
proto2.MergeFrom(proto1)
self.assertEqual(999, proto2.repeated_nested_message[0].bb)
self.assertEqual(123, proto2.repeated_nested_message[1].bb)
self.assertEqual(321, proto2.repeated_nested_message[2].bb)
proto3 = message_module.TestAllTypes()
proto3.repeated_nested_message.MergeFrom(proto2.repeated_nested_message)
self.assertEqual(999, proto3.repeated_nested_message[0].bb)
self.assertEqual(123, proto3.repeated_nested_message[1].bb)
self.assertEqual(321, proto3.repeated_nested_message[2].bb)
def testMergeFromAllFields(self, message_module):
# With all fields set.
proto1 = message_module.TestAllTypes()
test_util.SetAllFields(proto1)
proto2 = message_module.TestAllTypes()
proto2.MergeFrom(proto1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
# Serialized string should be equal too.
string1 = proto1.SerializeToString()
string2 = proto2.SerializeToString()
self.assertEqual(string1, string2)
def testMergeFromBug(self, message_module):
message1 = message_module.TestAllTypes()
message2 = message_module.TestAllTypes()
# Cause optional_nested_message to be instantiated within message1, even
# though it is not considered to be "present".
message1.optional_nested_message
self.assertFalse(message1.HasField('optional_nested_message'))
# Merge into message2. This should not instantiate the field is message2.
message2.MergeFrom(message1)
self.assertFalse(message2.HasField('optional_nested_message'))
def testCopyFromSingularField(self, message_module):
# Test copy with just a singular field.
proto1 = message_module.TestAllTypes()
proto1.optional_int32 = 1
proto1.optional_string = 'important-text'
proto2 = message_module.TestAllTypes()
proto2.optional_string = 'value'
proto2.CopyFrom(proto1)
self.assertEqual(1, proto2.optional_int32)
self.assertEqual('important-text', proto2.optional_string)
def testCopyFromRepeatedField(self, message_module):
# Test copy with a repeated field.
proto1 = message_module.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.repeated_int32.append(2)
proto2 = message_module.TestAllTypes()
proto2.repeated_int32.append(0)
proto2.CopyFrom(proto1)
self.assertEqual(1, proto2.repeated_int32[0])
self.assertEqual(2, proto2.repeated_int32[1])
def testCopyFromAllFields(self, message_module):
# With all fields set.
proto1 = message_module.TestAllTypes()
test_util.SetAllFields(proto1)
proto2 = message_module.TestAllTypes()
proto2.CopyFrom(proto1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
# Serialized string should be equal too.
string1 = proto1.SerializeToString()
string2 = proto2.SerializeToString()
self.assertEqual(string1, string2)
def testCopyFromSelf(self, message_module):
proto1 = message_module.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.optional_int32 = 2
proto1.optional_string = 'important-text'
proto1.CopyFrom(proto1)
self.assertEqual(1, proto1.repeated_int32[0])
self.assertEqual(2, proto1.optional_int32)
self.assertEqual('important-text', proto1.optional_string)
def testDeepCopy(self, message_module):
proto1 = message_module.TestAllTypes()
proto1.optional_int32 = 1
proto2 = copy.deepcopy(proto1)
self.assertEqual(1, proto2.optional_int32)
proto1.repeated_int32.append(2)
proto1.repeated_int32.append(3)
container = copy.deepcopy(proto1.repeated_int32)
self.assertEqual([2, 3], container)
container.remove(container[0])
self.assertEqual([3], container)
message1 = proto1.repeated_nested_message.add()
message1.bb = 1
messages = copy.deepcopy(proto1.repeated_nested_message)
self.assertEqual(proto1.repeated_nested_message, messages)
message1.bb = 2
self.assertNotEqual(proto1.repeated_nested_message, messages)
messages.remove(messages[0])
self.assertEqual(len(messages), 0)
def testEmptyDeepCopy(self, message_module):
proto1 = message_module.TestAllTypes()
nested2 = copy.deepcopy(proto1.optional_nested_message)
self.assertEqual(0, nested2.bb)
# TODO: Implement deepcopy for extension dict
def testDisconnectingBeforeClear(self, message_module):
proto = message_module.TestAllTypes()
nested = proto.optional_nested_message
proto.Clear()
self.assertIsNot(nested, proto.optional_nested_message)
nested.bb = 23
self.assertFalse(proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
proto = message_module.TestAllTypes()
nested = proto.optional_nested_message
nested.bb = 5
foreign = proto.optional_foreign_message
foreign.c = 6
proto.Clear()
self.assertIsNot(nested, proto.optional_nested_message)
self.assertIsNot(foreign, proto.optional_foreign_message)
self.assertEqual(5, nested.bb)
self.assertEqual(6, foreign.c)
nested.bb = 15
foreign.c = 16
self.assertFalse(proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
self.assertFalse(proto.HasField('optional_foreign_message'))
self.assertEqual(0, proto.optional_foreign_message.c)
def testStringUTF8Encoding(self, message_module):
proto = message_module.TestAllTypes()
# Assignment of a unicode object to a field of type 'bytes' is not allowed.
self.assertRaises(TypeError,
setattr, proto, 'optional_bytes', u'unicode object')
# Check that the default value is of python's 'unicode' type.
self.assertEqual(type(proto.optional_string), str)
proto.optional_string = str('Testing')
self.assertEqual(proto.optional_string, str('Testing'))
# Assign a value of type 'str' which can be encoded in UTF-8.
proto.optional_string = str('Testing')
self.assertEqual(proto.optional_string, str('Testing'))
# Try to assign a 'bytes' object which contains non-UTF-8.
self.assertRaises(ValueError,
setattr, proto, 'optional_string', b'a\x80a')
# No exception: Assign already encoded UTF-8 bytes to a string field.
utf8_bytes = u'Тест'.encode('utf-8')
proto.optional_string = utf8_bytes
# No exception: Assign the a non-ascii unicode object.
proto.optional_string = u'Тест'
# No exception thrown (normal str assignment containing ASCII).
proto.optional_string = 'abc'
def testBytesInTextFormat(self, message_module):
proto = message_module.TestAllTypes(optional_bytes=b'\x00\x7f\x80\xff')
self.assertEqual(u'optional_bytes: "\\000\\177\\200\\377"\n', str(proto))
def testEmptyNestedMessage(self, message_module):
proto = message_module.TestAllTypes()
proto.optional_nested_message.MergeFrom(
message_module.TestAllTypes.NestedMessage())
self.assertTrue(proto.HasField('optional_nested_message'))
proto = message_module.TestAllTypes()
proto.optional_nested_message.CopyFrom(
message_module.TestAllTypes.NestedMessage())
self.assertTrue(proto.HasField('optional_nested_message'))
proto = message_module.TestAllTypes()
bytes_read = proto.optional_nested_message.MergeFromString(b'')
self.assertEqual(0, bytes_read)
self.assertTrue(proto.HasField('optional_nested_message'))
proto = message_module.TestAllTypes()
proto.optional_nested_message.ParseFromString(b'')
self.assertTrue(proto.HasField('optional_nested_message'))
serialized = proto.SerializeToString()
proto2 = message_module.TestAllTypes()
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
self.assertTrue(proto2.HasField('optional_nested_message'))
# Class to test proto2-only features (required, extensions, etc.)
@testing_refleaks.TestCase
| ReflectionTest |
python | huggingface__transformers | tests/models/barthez/test_tokenization_barthez.py | {
"start": 829,
"end": 2834
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "moussaKam/mbarthez"
tokenizer_class = BarthezTokenizer
integration_expected_tokens = ['▁This', '▁is', '▁a', '▁test', '▁', '😊', '▁I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fals', 'é', '.', '▁', '生活的真谛是', '▁Hi', '▁Hello', '▁Hi', '▁Hello', '▁Hello', '<s>', '▁hi', '<s>', '▁there', '▁The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁en', 'code', 'd', ':', '▁Hello', '.', '▁But', '▁ir', 'd', '▁and', '▁', 'ปี', '▁ir', 'd', '▁', 'ด', '▁Hey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_token_ids = [2078, 75, 10, 1938, 6, 3, 78, 402, 49997, 23, 387, 7648, 4, 124, 663, 75, 41564, 362, 5, 6, 3, 1739, 18324, 1739, 18324, 18324, 0, 901, 0, 1749, 451, 13564, 39363, 3354, 166, 72171, 22, 21077, 64, 12, 18324, 5, 3007, 172, 64, 124, 6, 3, 172, 64, 6, 3, 14833, 2271, 482, 329, 11028] # fmt: skip
expected_tokens_from_ids = ['▁This', '▁is', '▁a', '▁test', '▁', '<unk>', '▁I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fals', 'é', '.', '▁', '<unk>', '▁Hi', '▁Hello', '▁Hi', '▁Hello', '▁Hello', '<s>', '▁hi', '<s>', '▁there', '▁The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁en', 'code', 'd', ':', '▁Hello', '.', '▁But', '▁ir', 'd', '▁and', '▁', '<unk>', '▁ir', 'd', '▁', '<unk>', '▁Hey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_decoded_text = "This is a test <unk> I was born in 92000, and this is falsé. <unk> Hi Hello Hi Hello Hello<s> hi<s> there The following string should be properly encoded: Hello. But ird and <unk> ird <unk> Hey how are you doing"
@classmethod
def setUpClass(cls):
super().setUpClass()
from_pretrained_id = "moussaKam/mbarthez"
tokenizer = BarthezTokenizer.from_pretrained(from_pretrained_id)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.save_pretrained(cls.tmpdirname)
| BarthezTokenizationTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/zs_responses/records/post_votes_records_builder.py | {
"start": 196,
"end": 503
} | class ____(ZendeskSupportRecordBuilder):
@classmethod
def posts_votes_record(cls) -> "PostsVotesRecordBuilder":
record_template = cls.extract_record("votes", __file__, NestedPath(["votes", 0]))
return cls(record_template, FieldPath("id"), FieldPath("updated_at"))
| PostsVotesRecordBuilder |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/dagster/subschema/postgresql.py | {
"start": 120,
"end": 162
} | class ____(BaseModel):
port: int
| Service |
python | sqlalchemy__sqlalchemy | test/orm/test_deprecations.py | {
"start": 27704,
"end": 34549
} | class ____(QueryTest, AssertsCompiledSQL):
@testing.fails(
"ORM refactor not allowing this yet, "
"we may just abandon this use case"
)
def test_from_alias_one(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
query = (
users.select(users.c.id == 7)
.union(users.select(users.c.id > 7))
.alias("ulist")
.outerjoin(addresses)
.select(order_by=[text("ulist.id"), addresses.c.id])
)
sess = fixture_session()
q = sess.query(User)
# note this has multiple problems because we aren't giving Query
# the statement where it would be able to create an adapter
def go():
with testing.expect_deprecated(
r"Using the Query.instances\(\) method without a context",
"Retrieving row values using Column objects with only "
"matching names",
):
result = list(
q.options(
contains_alias("ulist"), contains_eager("addresses")
).instances(query.execute())
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_from_alias_two_old_way(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
query = (
users.select()
.where(users.c.id == 7)
.union(users.select().where(users.c.id > 7))
.alias("ulist")
.outerjoin(addresses)
.select()
.order_by(text("ulist.id"), addresses.c.id)
)
sess = fixture_session()
q = sess.query(User)
def go():
with testing.expect_deprecated(
"The AliasOption object is not necessary for entities to be "
"matched up to a query",
):
result = (
q.options(
contains_alias("ulist"), contains_eager(User.addresses)
)
.from_statement(query)
.all()
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
sess = fixture_session()
selectquery = (
users.outerjoin(addresses)
.select()
.where(users.c.id < 10)
.order_by(users.c.id, addresses.c.id)
)
q = sess.query(User)
def go():
with testing.expect_deprecated(
r"The Query.instances\(\) method is deprecated",
r"Using the Query.instances\(\) method without a context",
):
result = list(
q.options(contains_eager(User.addresses)).instances(
sess.execute(selectquery)
)
)
assert self.static.user_address_result[0:3] == result
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
with testing.expect_deprecated(
r"The Query.instances\(\) method is deprecated",
r"Using the Query.instances\(\) method without a context",
):
result = list(
q.options(contains_eager(User.addresses)).instances(
sess.connection().execute(selectquery)
)
)
assert self.static.user_address_result[0:3] == result
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_aliased_instances(self):
addresses, users, User = (
self.tables.addresses,
self.tables.users,
self.classes.User,
)
sess = fixture_session()
q = sess.query(User)
adalias = addresses.alias("adalias")
selectquery = (
users.outerjoin(adalias)
.select()
.order_by(users.c.id, adalias.c.id)
)
# note this has multiple problems because we aren't giving Query
# the statement where it would be able to create an adapter
def go():
with testing.expect_deprecated(
r"Using the Query.instances\(\) method without a context",
r"The Query.instances\(\) method is deprecated and will be "
r"removed in a future release.",
):
result = list(
q.options(
contains_eager(User.addresses, alias=adalias)
).instances(sess.connection().execute(selectquery))
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_multi_alias(self):
orders, items, users, order_items, User = (
self.tables.orders,
self.tables.items,
self.tables.users,
self.tables.order_items,
self.classes.User,
)
Order = self.classes.Order
sess = fixture_session()
q = sess.query(User)
oalias = orders.alias("o1")
ialias = items.alias("i1")
query = (
users.outerjoin(oalias)
.outerjoin(order_items)
.outerjoin(ialias)
.select()
.order_by(users.c.id, oalias.c.id, ialias.c.id)
)
# test using Alias with more than one level deep
# new way:
# from sqlalchemy.orm.strategy_options import Load
# opt = Load(User).contains_eager('orders', alias=oalias).
# contains_eager('items', alias=ialias)
def go():
with testing.expect_deprecated(
r"Using the Query.instances\(\) method without a context",
r"The Query.instances\(\) method is deprecated and will be "
r"removed in a future release.",
):
result = list(
q.options(
contains_eager(User.orders, alias=oalias),
defaultload(User.orders).contains_eager(
Order.items, alias=ialias
),
).instances(sess.connection().execute(query))
)
assert self.static.user_order_result == result
self.assert_sql_count(testing.db, go, 1)
| InstancesTest |
python | kamyu104__LeetCode-Solutions | Python/maximum-coins-heroes-can-collect.py | {
"start": 66,
"end": 796
} | class ____(object):
def maximumCoins(self, heroes, monsters, coins):
"""
:type heroes: List[int]
:type monsters: List[int]
:type coins: List[int]
:rtype: List[int]
"""
idxs1 = range(len(heroes))
idxs1.sort(key=lambda x: heroes[x])
idxs2 = range(len(monsters))
idxs2.sort(key=lambda x: monsters[x])
result = [0]*len(idxs1)
i = curr = 0
for idx in idxs1:
for i in xrange(i, len(idxs2)):
if monsters[idxs2[i]] > heroes[idx]:
break
curr += coins[idxs2[i]]
else:
i = len(idxs2)
result[idx] = curr
return result
| Solution |
python | automl__auto-sklearn | test/fixtures/caching.py | {
"start": 370,
"end": 3742
} | class ____:
"""Used for the below fixtures.
Mainly used with cases so they don' need to be retrained at every invocation.
The cache can be cleared using `pytest`'s built in mechanism:
pytest --clear-cache
To view cached items use:
pytest --cache-show
..code:: python
def case_fitted_model(cache, ...):
key = "some key unique to this test"
cache = cache(key)
if "model" in cache:
return cache.load("model")
# ... fit model
cache.save(model, "model")
return model
If multiple items are required, they can be access in much the same ways
..code:: python
def case_requires_multiple_things(cache, ...):
cache1 = cache("key1")
cache2 = cache("key2")
If multiple things need to be stored in one location, you can access a given path
for a given named thing inside a cache.
..code:: python
def case_fitted_model_and_populated_backend(cache, ...):
cache = cache("some key")
"""
def __init__(self, key: str, cache_dir: Path, verbose: int = 0):
"""
Parameters
----------
key : str
The key of the item stored
cache_dir : Path
The dir where the cache should be located
verbose : int = 0
Whether to be verbose or not. Currently only has one level (> 0)
"""
self.key = key
self.dir = cache_dir / key
self.verbose = verbose > 0
self._lock: FileLock = None
def items(self) -> list[Path]:
"""Get any paths associated to items in this dir"""
return list(self.dir.iterdir())
def __contains__(self, name: str) -> bool:
return self.path(name).exists()
def path(self, name: str) -> Path:
"""Path to an item for this cache"""
return self.dir / name
def load(self, name: str) -> Any:
"""Load an item from the cache with a given name"""
if self.verbose:
print(f"Loading cached item {self.path(name)}")
with self.path(name).open("rb") as f:
return pickle.load(f)
def save(self, item: Any, name: str) -> None:
"""Dump an item to cache with a name"""
if self.verbose:
print(f"Saving cached item {self.path(name)}")
with self.path(name).open("wb") as f:
pickle.dump(item, f)
def reset(self) -> None:
"""Delete this caches items"""
shutil.rmtree(self.dir)
self.dir.mkdir()
def __enter__(self):
if int(os.environ.get("PYTEST_XDIST_WORKER_COUNT", 1)) <= 1:
return self
else:
path = LOCK_DIR / f"{self.key}.lock"
self._lock = FileLock(path)
self._lock.acquire(poll_interval=1.0)
if self.verbose:
print(f"locked cache {path}")
return self
def __exit__(self, *args, **kwargs):
if self._lock is not None:
self._lock.release()
@fixture
def make_cache(request: FixtureRequest) -> Callable[[str], Cache]:
"""Gives the access to a cache."""
pytest_cache = request.config.cache
assert pytest_cache is not None
cache_dir = pytest_cache.mkdir(AUTOSKLEARN_CACHE_NAME)
return partial(Cache, cache_dir=cache_dir)
| Cache |
python | pytorch__pytorch | tools/linter/adapters/clangformat_linter.py | {
"start": 397,
"end": 6824
} | class ____(NamedTuple):
path: str | None
line: int | None
char: int | None
code: str
severity: LintSeverity
name: str
original: str | None
replacement: str | None
description: str | None
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
def _run_command(
args: list[str],
*,
timeout: int,
) -> subprocess.CompletedProcess[bytes]:
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(
args,
capture_output=True,
shell=IS_WINDOWS, # So batch scripts are found.
timeout=timeout,
check=True,
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def run_command(
args: list[str],
*,
retries: int,
timeout: int,
) -> subprocess.CompletedProcess[bytes]:
remaining_retries = retries
while True:
try:
return _run_command(args, timeout=timeout)
except subprocess.TimeoutExpired as err:
if remaining_retries == 0:
raise err
remaining_retries -= 1
logging.warning( # noqa: G200
"(%s/%s) Retrying because command failed with: %r",
retries - remaining_retries,
retries,
err,
)
time.sleep(1)
def check_file(
filename: str,
binary: str,
retries: int,
timeout: int,
) -> list[LintMessage]:
try:
with open(filename, "rb") as f:
original = f.read()
proc = run_command(
[binary, filename],
retries=retries,
timeout=timeout,
)
except subprocess.TimeoutExpired:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="CLANGFORMAT",
severity=LintSeverity.ERROR,
name="timeout",
original=None,
replacement=None,
description=(
"clang-format timed out while trying to process a file. "
"Please report an issue in pytorch/pytorch with the "
"label 'module: lint'"
),
)
]
except (OSError, subprocess.CalledProcessError) as err:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="CLANGFORMAT",
severity=LintSeverity.ADVICE,
name="command-failed",
original=None,
replacement=None,
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
"COMMAND (exit code {returncode})\n"
"{command}\n\n"
"STDERR\n{stderr}\n\n"
"STDOUT\n{stdout}"
).format(
returncode=err.returncode,
command=" ".join(as_posix(x) for x in err.cmd),
stderr=err.stderr.decode("utf-8").strip() or "(empty)",
stdout=err.stdout.decode("utf-8").strip() or "(empty)",
)
),
)
]
replacement = proc.stdout
if original == replacement:
return []
return [
LintMessage(
path=filename,
line=None,
char=None,
code="CLANGFORMAT",
severity=LintSeverity.WARNING,
name="format",
original=original.decode("utf-8"),
replacement=replacement.decode("utf-8"),
description="See https://clang.llvm.org/docs/ClangFormat.html.\nRun `lintrunner -a` to apply this patch.",
)
]
def main() -> None:
parser = argparse.ArgumentParser(
description="Format files with clang-format.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--binary",
required=True,
help="clang-format binary path",
)
parser.add_argument(
"--retries",
default=3,
type=int,
help="times to retry timed out clang-format",
)
parser.add_argument(
"--timeout",
default=90,
type=int,
help="seconds to wait for clang-format",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
binary = os.path.normpath(args.binary) if IS_WINDOWS else args.binary
if not Path(binary).exists():
lint_message = LintMessage(
path=None,
line=None,
char=None,
code="CLANGFORMAT",
severity=LintSeverity.ERROR,
name="init-error",
original=None,
replacement=None,
description=(
f"Could not find clang-format binary at {binary}, "
"did you forget to run `lintrunner init`?"
),
)
print(json.dumps(lint_message._asdict()), flush=True)
sys.exit(0)
with concurrent.futures.ThreadPoolExecutor(
max_workers=os.cpu_count(),
thread_name_prefix="Thread",
) as executor:
futures = {
executor.submit(check_file, x, binary, args.retries, args.timeout): x
for x in args.filenames
}
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
print(json.dumps(lint_message._asdict()), flush=True)
except Exception:
logging.critical('Failed at "%s".', futures[future])
raise
if __name__ == "__main__":
main()
| LintMessage |
python | run-llama__llama_index | llama-index-core/llama_index/core/tools/tool_spec/base.py | {
"start": 484,
"end": 3918
} | class ____:
"""Base tool spec class."""
# list of functions that you'd want to convert to spec
spec_functions: List[SPEC_FUNCTION_TYPE]
def get_fn_schema_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[Type[BaseModel]]:
"""
NOTE: This function is deprecated and kept only for backwards compatibility.
Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
return None
def get_metadata_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[ToolMetadata]:
"""
NOTE: This function is deprecated and kept only for backwards compatibility.
Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
schema = self.get_fn_schema_from_fn_name(fn_name, spec_functions=spec_functions)
if schema is None:
return None
func = getattr(self, fn_name)
name = fn_name
docstring = func.__doc__ or ""
description = f"{name}{signature(func)}\n{docstring}"
fn_schema = self.get_fn_schema_from_fn_name(
fn_name, spec_functions=spec_functions
)
return ToolMetadata(name=name, description=description, fn_schema=fn_schema)
def to_tool_list(
self,
spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None,
func_to_metadata_mapping: Optional[Dict[str, ToolMetadata]] = None,
) -> List[FunctionTool]:
"""Convert tool spec to list of tools."""
spec_functions = spec_functions or self.spec_functions
func_to_metadata_mapping = func_to_metadata_mapping or {}
tool_list = []
for func_spec in spec_functions:
func_sync = None
func_async = None
if isinstance(func_spec, str):
func = getattr(self, func_spec)
if asyncio.iscoroutinefunction(func):
func_async = func
else:
func_sync = func
metadata = func_to_metadata_mapping.get(func_spec, None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec)
elif isinstance(func_spec, tuple) and len(func_spec) == 2:
func_sync = getattr(self, func_spec[0])
func_async = getattr(self, func_spec[1])
metadata = func_to_metadata_mapping.get(func_spec[0], None)
if metadata is None:
metadata = func_to_metadata_mapping.get(func_spec[1], None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec[0])
else:
raise ValueError(
"spec_functions must be of type: List[Union[str, Tuple[str, str]]]"
)
tool = FunctionTool.from_defaults(
fn=func_sync,
async_fn=func_async,
tool_metadata=metadata,
)
tool_list.append(tool)
return tool_list
| BaseToolSpec |
python | django__django | django/http/response.py | {
"start": 24802,
"end": 24864
} | class ____(HttpResponse):
status_code = 410
| HttpResponseGone |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/gumroad/tests.py | {
"start": 242,
"end": 926
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = GumroadProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""{
"success": true,
"user": {
"bio": "a sailor, a tailor",
"name": "John Smith",
"twitter_handle": null,
"user_id": "G_-mnBf9b1j9A7a4ub4nFQ==",
"email": "johnsmith@gumroad.com",
"url": "https://gumroad.com/sailorjohn"
}
}""",
)
def get_expected_to_str(self):
return "johnsmith@gumroad.com"
| GumroadTests |
python | sympy__sympy | sympy/stats/rv.py | {
"start": 48084,
"end": 48392
} | class ____:
_argnames: tuple[str, ...] = ()
def __getattr__(self, attr):
try:
return self.args[self._argnames.index(attr)]
except ValueError:
raise AttributeError("'%s' object has no attribute '%s'" % (
type(self).__name__, attr))
| NamedArgsMixin |
python | mlflow__mlflow | tests/sagemaker/mock/__init__.py | {
"start": 39151,
"end": 39833
} | class ____:
"""
Object representing a model description returned by SageMaker's
"DescribeModel" API: https://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeModel.html.
"""
def __init__(self, model, arn):
self.model = model
self.arn = arn
@property
def response_object(self):
return {
"ModelArn": self.arn,
"ModelName": self.model.model_name,
"PrimaryContainer": self.model.primary_container,
"ExecutionRoleArn": self.model.execution_role_arn,
"VpcConfig": self.model.vpc_config or {},
"CreationTime": self.model.creation_time,
}
| ModelDescription |
python | MongoEngine__mongoengine | tests/test_datastructures.py | {
"start": 159,
"end": 425
} | class ____:
def __init__(self):
self._changed_fields = []
self._unset_fields = []
def _mark_as_changed(self, key):
self._changed_fields.append(key)
def _mark_as_unset(self, key):
self._unset_fields.append(key)
| DocumentStub |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/roles.py | {
"start": 5452,
"end": 5651
} | class ____(SQLRole):
__slots__ = ()
_role_name = (
"Row returning expression such as a SELECT, a FROM clause, or an "
"INSERT/UPDATE/DELETE with RETURNING"
)
| ReturnsRowsRole |
python | tensorflow__tensorflow | tensorflow/python/framework/ops_test.py | {
"start": 102279,
"end": 111370
} | class ____(test_util.TensorFlowTestCase):
def testClearsControlDependencies(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.as_default():
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with ops.init_scope():
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with ops.init_scope():
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testLiftsOpsFromFunctions(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with g2.as_default():
with ops.init_scope():
_ = constant_op.constant(1.0)
self.assertLen(g2.get_operations(), 0)
self.assertLen(g1.get_operations(), 0)
self.assertLen(g0.get_operations(), 1)
def testPreservesDevices(self):
g0 = ops.Graph()
with g0.as_default(), ops.device("CPU:0"):
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
with g1.as_default():
with ops.device("GPU:0"):
with ops.init_scope():
# init_scope should preserve device set under `g1`.
on_gpu = constant_op.constant(1.0)
self.assertEqual(on_gpu.device, "/device:GPU:0")
still_on_gpu = constant_op.constant(1.0)
self.assertEqual(still_on_gpu.device, "/device:GPU:0")
blank = constant_op.constant(1.0)
self.assertEqual(blank.device, "")
with ops.init_scope():
now_on_cpu = constant_op.constant(1.0)
self.assertEqual(now_on_cpu.device, "/device:CPU:0")
on_cpu = constant_op.constant(1.0)
self.assertEqual(on_cpu.device, "/device:CPU:0")
def testComposes(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
g3 = ops.Graph()
g3._building_function = False # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
self.assertLen(g2.get_operations(), 0)
self.assertLen(g1.get_operations(), 0)
self.assertLen(g0.get_operations(), 1)
with g2.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
with g3.as_default():
with ops.init_scope():
# This op should be lifted into g3, because g3 is not building a
# function.
_ = constant_op.constant(1.0)
self.assertIs(g3, ops.get_default_graph())
self.assertLen(g3.get_operations(), 1)
self.assertLen(g2.get_operations(), 0)
self.assertLen(g1.get_operations(), 0)
self.assertLen(g0.get_operations(), 2)
def testEscapesToEagerContext(self):
g = ops.Graph()
g._building_function = True # pylint: disable=protected-access
with context.eager_mode():
with context.graph_mode():
with g.as_default():
with ops.init_scope():
# Because g is building a function, init_scope should
# escape out to the eager context.
self.assertTrue(context.executing_eagerly())
# g should be reinstated as the default graph, and the
# graph context should be re-entered.
self.assertIs(g, ops.get_default_graph())
self.assertFalse(context.executing_eagerly())
def testStaysInEagerWhenOnlyEagerContextActive(self):
with context.eager_mode():
with ops.init_scope():
self.assertTrue(context.eager_mode())
self.assertTrue(context.eager_mode())
@test_util.run_v1_only("b/120545219")
def testFallsBackToGlobalGraphWhenAllGraphsAreBuildingFunctions(self):
with context.graph_mode():
ops.reset_default_graph()
# This doesn't push anything onto the graph stack, but it does
# set the stack's global graph.
global_graph = ops.get_default_graph()
fn_graph = ops.Graph()
# pylint: disable=protected-access
fn_graph._building_function = True
self.assertLen(ops._default_graph_stack.stack, 0)
with fn_graph.as_default():
self.assertLen(ops._default_graph_stack.stack, 1)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 1)
dummy = constant_op.constant(1.0)
self.assertLen(ops._default_graph_stack.stack, 1)
# Note that the global graph is _not_ on the graph stack.
self.assertLen(ops._default_graph_stack.stack, 0)
# Ensure that `dummy` was added to the global graph.
self.assertEqual(global_graph, dummy.graph)
# pylint: enable=protected-access
def testInstallsDefaultGraphWhenGraphStackIsEmptyInGraphMode(self):
with context.graph_mode():
# pylint: disable=protected-access
self.assertLen(ops._default_graph_stack.stack, 0)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 0)
self.assertLen(ops._default_graph_stack.stack, 0)
# pylint: enable=protected-access
def testPreservesNameScopeInGraphConstruction(self):
with ops.Graph().as_default():
function_graph = ops.Graph()
with function_graph.as_default():
with ops.name_scope("inner", skip_on_eager=False), ops.init_scope():
self.assertEqual(ops.get_name_scope(), "inner")
self.assertEqual(ops.get_name_scope(), "")
def testEnteringGraphFromEagerIsSticky(self):
with context.eager_mode():
g = ops.Graph()
with g.as_default():
with ops.init_scope():
self.assertFalse(context.executing_eagerly())
self.assertEqual(g, ops.get_default_graph())
def testMixGraphEager(self):
with context.eager_mode():
c = constant_op.constant(1.0)
with ops.Graph().as_default():
with self.assertRaisesRegex(RuntimeError,
"Attempting to capture an EagerTensor"):
math_ops.add(c, c)
c2 = constant_op.constant(2.0)
with self.assertRaises(TypeError):
math_ops.add(c2, c2)
def testPreservesNameScopeInEagerExecution(self):
with context.eager_mode():
def foo():
with ops.name_scope("inner", skip_on_eager=False), ops.init_scope():
if context.executing_eagerly():
# A trailing slash is always appended when eager execution is
# enabled.
self.assertEqual(context.context().scope_name, "inner/")
else:
self.assertEqual(ops.get_name_scope(), "inner")
foo()
self.assertEqual(ops.get_name_scope(), "")
foo_compiled = def_function.function(foo)
foo_compiled()
self.assertEqual(ops.get_name_scope(), "")
def testExecutingEagerlyOutsideFunctions(self):
@def_function.function
def f():
return ops.executing_eagerly_outside_functions()
with context.graph_mode():
self.assertFalse(ops.executing_eagerly_outside_functions())
with session.Session():
# Need self.evaluate for these as the return type of functions is
# tensors.
self.assertFalse(self.evaluate(f()))
with context.eager_mode():
self.assertTrue(ops.executing_eagerly_outside_functions())
self.assertTrue(f())
with ops.Graph().as_default():
self.assertFalse(ops.executing_eagerly_outside_functions())
with session.Session():
self.assertFalse(self.evaluate(f()))
| InitScopeTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.