language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | langchain-ai__langchain | libs/core/langchain_core/language_models/fake.py | {
"start": 2137,
"end": 3732
} | class ____(FakeListLLM):
"""Fake streaming list LLM for testing purposes.
An LLM that will return responses from a list in order.
This model also supports optionally sleeping between successive
chunks in a streaming implementation.
"""
error_on_chunk_number: int | None = None
"""If set, will raise an exception on the specified chunk number."""
@override
def stream(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
*,
stop: list[str] | None = None,
**kwargs: Any,
) -> Iterator[str]:
result = self.invoke(input, config)
for i_c, c in enumerate(result):
if self.sleep is not None:
time.sleep(self.sleep)
if (
self.error_on_chunk_number is not None
and i_c == self.error_on_chunk_number
):
raise FakeListLLMError
yield c
@override
async def astream(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
*,
stop: list[str] | None = None,
**kwargs: Any,
) -> AsyncIterator[str]:
result = await self.ainvoke(input, config)
for i_c, c in enumerate(result):
if self.sleep is not None:
await asyncio.sleep(self.sleep)
if (
self.error_on_chunk_number is not None
and i_c == self.error_on_chunk_number
):
raise FakeListLLMError
yield c
| FakeStreamingListLLM |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_trace_meta.py | {
"start": 319,
"end": 5071
} | class ____(OrganizationEventsTraceEndpointBase):
url_name = "sentry-api-0-organization-trace-meta"
def client_get(self, data, url=None):
if url is None:
url = self.url
return self.client.get(
url,
data,
format="json",
)
def test_no_projects(self) -> None:
user = self.create_user()
org = self.create_organization(owner=user)
self.login_as(user=user)
url = reverse(
self.url_name,
kwargs={"organization_id_or_slug": org.slug, "trace_id": uuid4().hex},
)
with self.feature(self.FEATURES):
response = self.client.get(
url,
format="json",
)
assert response.status_code == 404, response.content
def test_bad_ids(self) -> None:
# Fake trace id
self.url = reverse(
self.url_name,
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"trace_id": uuid4().hex,
},
)
with self.feature(self.FEATURES):
response = self.client.get(
self.url,
format="json",
)
assert response.status_code == 200, response.content
data = response.data
assert data["errors"] == 0
assert data["performance_issues"] == 0
assert data["span_count"] == 0
assert data["span_count_map"] == {}
assert "uptime_checks" not in data # Should not be present without include_uptime param
# Invalid trace id
with pytest.raises(NoReverseMatch):
self.url = reverse(
self.url_name,
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"trace_id": "not-a-trace",
},
)
def test_simple(self) -> None:
self.load_trace(is_eap=True)
with self.feature(self.FEATURES):
response = self.client.get(
self.url,
data={"project": -1},
format="json",
)
assert response.status_code == 200, response.content
data = response.data
assert data["errors"] == 0
assert data["performance_issues"] == 2
assert data["span_count"] == 19
assert data["span_count_map"]["http.server"] == 19
def test_no_team(self) -> None:
self.load_trace(is_eap=True)
self.team.delete()
with self.feature(self.FEATURES):
response = self.client.get(
self.url,
format="json",
)
assert response.status_code == 200, response.content
data = response.data
assert data["errors"] == 0
assert data["performance_issues"] == 2
assert data["span_count"] == 19
assert data["span_count_map"]["http.server"] == 19
def test_with_errors(self) -> None:
self.load_trace(is_eap=True)
self.load_errors(self.gen1_project, self.gen1_span_ids[0])
with self.feature(self.FEATURES):
response = self.client.get(
self.url,
data={"project": -1},
format="json",
)
assert response.status_code == 200, response.content
data = response.data
assert data["errors"] == 3
assert data["performance_issues"] == 2
assert data["span_count"] == 19
assert data["span_count_map"]["http.server"] == 19
def test_with_default(self) -> None:
self.load_trace(is_eap=True)
self.load_default()
with self.feature(self.FEATURES):
response = self.client.get(
self.url,
data={"project": -1},
format="json",
)
assert response.status_code == 200, response.content
data = response.data
assert data["errors"] == 1
assert data["performance_issues"] == 2
assert data["span_count"] == 19
assert data["span_count_map"]["http.server"] == 19
assert len(data["transaction_child_count_map"]) == 8
def test_with_invalid_date(self) -> None:
self.load_trace(is_eap=True)
self.load_default()
with self.options({"system.event-retention-days": 10}):
with self.feature(self.FEATURES):
response = self.client.get(
self.url,
data={"project": -1, "timestamp": before_now(days=120).timestamp()},
format="json",
)
assert response.status_code == 400, response.content
| OrganizationEventsTraceMetaEndpointTest |
python | mlflow__mlflow | tests/sagemaker/mock/__init__.py | {
"start": 28394,
"end": 30326
} | class ____(TimestampedResource):
"""
Object representing a SageMaker transform job. The SageMakerBackend will create
and manage transform jobs.
"""
STATUS_IN_PROGRESS = "InProgress"
STATUS_FAILED = "Failed"
STATUS_COMPLETED = "Completed"
STATUS_STOPPING = "Stopping"
STATUS_STOPPED = "Stopped"
def __init__(
self,
job_name,
model_name,
transform_input,
transform_output,
transform_resources,
data_processing,
tags,
latest_operation,
):
"""
Args:
job_name: The name of the TransformJob.
model_name: The name of the model to associate with the TransformJob.
transform_input: The input data source and the way transform job consumes it.
transform_output: The output results of the transform job.
transform_resources: The ML instance types and instance count to use for the
transform job.
data_processing: The data structure to specify the inference data and associate data
to the prediction results.
tags: Arbitrary tags to associate with the transform job.
latest_operation: The most recent operation that was invoked on the transform job,
represented as an TransformJobOperation object.
"""
super().__init__()
self.job_name = job_name
self.model_name = model_name
self.transform_input = transform_input
self.transform_output = transform_output
self.transform_resources = transform_resources
self.data_processing = data_processing
self.tags = tags
self.latest_operation = latest_operation
@property
def arn_descriptor(self):
return f":transform-job/{self.job_name}"
@property
def status(self):
return self.latest_operation.status()
| TransformJob |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_block_lower_triangular_test.py | {
"start": 2202,
"end": 14071
} | class ____(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def tearDown(self):
config.enable_tensor_float_32_execution(self.tf32_keep_)
def setUp(self):
self.tf32_keep_ = config.tensor_float_32_execution_enabled()
config.enable_tensor_float_32_execution(False)
# Increase from 1e-6 to 1e-5
self._atol[dtypes.float32] = 1e-5
self._atol[dtypes.complex64] = 1e-5
self._rtol[dtypes.float32] = 1e-5
self._rtol[dtypes.complex64] = 1e-5
super(SquareLinearOperatorBlockLowerTriangularTest, self).setUp()
@staticmethod
def use_blockwise_arg():
return True
@staticmethod
def skip_these_tests():
# Skipping since `LinearOperatorBlockLowerTriangular` is in general not
# self-adjoint.
return ["cholesky", "eigvalsh"]
@staticmethod
def operator_shapes_infos():
shape_info = linear_operator_test_util.OperatorShapesInfo
return [
shape_info((0, 0)),
shape_info((1, 1)),
shape_info((1, 3, 3)),
shape_info((5, 5), blocks=[[(2, 2)], [(3, 2), (3, 3)]]),
shape_info((3, 7, 7),
blocks=[[(1, 2, 2)], [(1, 3, 2), (3, 3, 3)],
[(1, 2, 2), (1, 2, 3), (1, 2, 2)]]),
shape_info((2, 4, 6, 6),
blocks=[[(2, 1, 2, 2)], [(1, 4, 2), (4, 4, 4)]]),
]
def operator_and_matrix(
self, shape_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
expected_blocks = (
shape_info.__dict__["blocks"] if "blocks" in shape_info.__dict__
else [[list(shape_info.shape)]])
matrices = []
for i, row_shapes in enumerate(expected_blocks):
row = []
for j, block_shape in enumerate(row_shapes):
if i == j: # operator is on the diagonal
row.append(
linear_operator_test_util.random_positive_definite_matrix(
block_shape, dtype, force_well_conditioned=True))
else:
row.append(
linear_operator_test_util.random_normal(block_shape, dtype=dtype))
matrices.append(row)
lin_op_matrices = matrices
if use_placeholder:
lin_op_matrices = [[
array_ops.placeholder_with_default(
matrix, shape=None) for matrix in row] for row in matrices]
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[linalg.LinearOperatorFullMatrix( # pylint:disable=g-complex-comprehension
l,
is_square=True,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
is_positive_definite=True if ensure_self_adjoint_and_pd else None)
for l in row] for row in lin_op_matrices])
# Should be auto-set.
self.assertTrue(operator.is_square)
# Broadcast the shapes.
expected_shape = list(shape_info.shape)
broadcasted_matrices = linear_operator_util.broadcast_matrix_batch_dims(
[op for row in matrices for op in row]) # pylint: disable=g-complex-comprehension
matrices = [broadcasted_matrices[i * (i + 1) // 2:(i + 1) * (i + 2) // 2]
for i in range(len(matrices))]
block_lower_triangular_dense = _block_lower_triangular_dense(
expected_shape, matrices)
if not use_placeholder:
block_lower_triangular_dense.set_shape(expected_shape)
return operator, block_lower_triangular_dense
def test_is_x_flags(self):
# Matrix with two positive eigenvalues, 1, and 1.
# The matrix values do not effect auto-setting of the flags.
matrix = [[1., 0.], [1., 1.]]
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[linalg.LinearOperatorFullMatrix(matrix)]],
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
def test_block_lower_triangular_inverse_type(self):
matrix = [[1., 0.], [0., 1.]]
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)],
[linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True),
linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)]],
is_non_singular=True,
)
inverse = operator.inverse()
self.assertIsInstance(
inverse,
block_lower_triangular.LinearOperatorBlockLowerTriangular)
self.assertEqual(2, len(inverse.operators))
self.assertEqual(1, len(inverse.operators[0]))
self.assertEqual(2, len(inverse.operators[1]))
def test_tape_safe(self):
operator_1 = linalg.LinearOperatorFullMatrix(
variables_module.Variable([[1., 0.], [0., 1.]]),
is_self_adjoint=True,
is_positive_definite=True)
operator_2 = linalg.LinearOperatorFullMatrix(
variables_module.Variable([[2., 0.], [1., 0.]]))
operator_3 = linalg.LinearOperatorFullMatrix(
variables_module.Variable([[3., 1.], [1., 3.]]),
is_self_adjoint=True,
is_positive_definite=True)
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_2, operator_3]],
is_self_adjoint=False,
is_positive_definite=True)
diagonal_grads_only = ["diag_part", "trace", "determinant",
"log_abs_determinant"]
self.check_tape_safe(operator, skip_options=diagonal_grads_only)
for y in diagonal_grads_only:
for diag_block in [operator_1, operator_3]:
with backprop.GradientTape() as tape:
grads = tape.gradient(getattr(operator, y)(), diag_block.variables)
for item in grads:
self.assertIsNotNone(item)
def test_convert_variables_to_tensors(self):
operator_1 = linalg.LinearOperatorFullMatrix(
variables_module.Variable([[1., 0.], [0., 1.]]),
is_self_adjoint=True,
is_positive_definite=True)
operator_2 = linalg.LinearOperatorFullMatrix(
variables_module.Variable([[2., 0.], [1., 0.]]))
operator_3 = linalg.LinearOperatorFullMatrix(
variables_module.Variable([[3., 1.], [1., 3.]]),
is_self_adjoint=True,
is_positive_definite=True)
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_2, operator_3]],
is_self_adjoint=False,
is_positive_definite=True)
with self.cached_session() as sess:
sess.run([x.initializer for x in operator.variables])
self.check_convert_variables_to_tensors(operator)
def test_is_non_singular_auto_set(self):
# Matrix with two positive eigenvalues, 11 and 8.
# The matrix values do not effect auto-setting of the flags.
matrix = [[11., 0.], [1., 8.]]
operator_1 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator_2 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator_3 = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=True)
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_2, operator_3]],
is_positive_definite=False, # No reason it HAS to be False...
is_non_singular=None)
self.assertFalse(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
with self.assertRaisesRegex(ValueError, "always non-singular"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_2, operator_3]], is_non_singular=False)
operator_4 = linalg.LinearOperatorFullMatrix(
[[1., 0.], [2., 0.]], is_non_singular=False)
# A singular operator off of the main diagonal shouldn't raise
block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_4, operator_2]], is_non_singular=True)
with self.assertRaisesRegex(ValueError, "always singular"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[operator_1], [operator_2, operator_4]], is_non_singular=True)
def test_different_dtypes_raises(self):
operators = [
[linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3))],
[linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3)),
linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3).astype(np.float32))]
]
with self.assertRaisesRegex(TypeError, "same dtype"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(operators)
def test_non_square_operator_raises(self):
operators = [
[linalg.LinearOperatorFullMatrix(rng.rand(3, 4), is_square=False)],
[linalg.LinearOperatorFullMatrix(rng.rand(4, 4)),
linalg.LinearOperatorFullMatrix(rng.rand(4, 4))]
]
with self.assertRaisesRegex(ValueError, "must be square"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(operators)
def test_empty_operators_raises(self):
with self.assertRaisesRegex(ValueError, "must be a list of >=1"):
block_lower_triangular.LinearOperatorBlockLowerTriangular([])
def test_operators_wrong_length_raises(self):
with self.assertRaisesRegex(ValueError, "must contain `2` blocks"):
block_lower_triangular.LinearOperatorBlockLowerTriangular([
[linalg.LinearOperatorFullMatrix(rng.rand(2, 2))],
[linalg.LinearOperatorFullMatrix(rng.rand(2, 2))
for _ in range(3)]])
def test_operators_mismatched_dimension_raises(self):
operators = [
[linalg.LinearOperatorFullMatrix(rng.rand(3, 3))],
[linalg.LinearOperatorFullMatrix(rng.rand(3, 4)),
linalg.LinearOperatorFullMatrix(rng.rand(3, 3))]
]
with self.assertRaisesRegex(ValueError, "must be the same as"):
block_lower_triangular.LinearOperatorBlockLowerTriangular(operators)
def test_incompatible_input_blocks_raises(self):
matrix_1 = array_ops.placeholder_with_default(rng.rand(4, 4), shape=None)
matrix_2 = array_ops.placeholder_with_default(rng.rand(3, 4), shape=None)
matrix_3 = array_ops.placeholder_with_default(rng.rand(3, 3), shape=None)
operators = [
[linalg.LinearOperatorFullMatrix(matrix_1, is_square=True)],
[linalg.LinearOperatorFullMatrix(matrix_2),
linalg.LinearOperatorFullMatrix(matrix_3, is_square=True)]
]
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
operators)
x = np.random.rand(2, 4, 5).tolist()
msg = ("dimension does not match" if context.executing_eagerly()
else "input structure is ambiguous")
with self.assertRaisesRegex(ValueError, msg):
operator.matmul(x)
def test_composite_gradients(self):
with backprop.GradientTape() as tape:
op1 = linalg.LinearOperatorFullMatrix(rng.rand(4, 4), is_square=True)
op2 = linalg.LinearOperatorFullMatrix(rng.rand(3, 4))
op3 = linalg.LinearOperatorFullMatrix(rng.rand(3, 3), is_square=True)
tape.watch([op1, op2, op3])
operator = block_lower_triangular.LinearOperatorBlockLowerTriangular(
[[op1], [op2, op3]])
x = self.make_x(op1, adjoint=False)
y = op1.matmul(x)
connected_grad, disconnected_grad, composite_grad = tape.gradient(
y, [op1, op3, operator]
)
disconnected_component_grad = composite_grad.operators[1][1].to_dense()
self.assertAllClose(connected_grad.to_dense(),
composite_grad.operators[0][0].to_dense())
self.assertAllClose(disconnected_component_grad,
array_ops.zeros_like(disconnected_component_grad))
self.assertIsNone(disconnected_grad)
if __name__ == "__main__":
linear_operator_test_util.add_tests(
SquareLinearOperatorBlockLowerTriangularTest)
test.main()
| SquareLinearOperatorBlockLowerTriangularTest |
python | PrefectHQ__prefect | src/prefect/utilities/schema_tools/hydration.py | {
"start": 3151,
"end": 3354
} | class ____(HydrationError):
@property
def message(self) -> str:
message = "Invalid JSON"
if self.detail:
message += f": {self.detail}"
return message
| InvalidJSON |
python | dask__dask | dask/array/_array_expr/random.py | {
"start": 36204,
"end": 37059
} | class ____(IO):
_parameters = [
"array",
"chunks",
"_meta",
"_state",
"replace",
"p",
"axis",
"shuffle",
]
_defaults = {"axis": None, "shuffle": None}
_funcname = "da.random.choice-"
@cached_property
def chunks(self):
return self.operand("chunks")
@cached_property
def sizes(self):
return list(product(*self.chunks))
@cached_property
def state_data(self):
return random_state_data(len(self.sizes), self._state)
@cached_property
def _meta(self):
return self.operands("_meta")
def _layer(self) -> dict:
return {
k: (_choice_rs, state, self.array, size, self.replace, self.p)
for k, state, size in zip(self.__dask_keys__(), self.state_data, self.sizes)
}
| RandomChoice |
python | sympy__sympy | doc/ext/docscrape.py | {
"start": 13120,
"end": 14716
} | class ____(NumpyDocString):
def __init__(self, func, role='func', doc=None, config={}):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
raise ValueError("No function or docstring given")
doc = inspect.getdoc(func) or ''
NumpyDocString.__init__(self, doc)
if not self['Signature'] and func is not None:
func, func_name = self.get_func()
try:
# try to read signature
argspec = str(inspect.signature(func))
argspec = argspec.replace('*', r'\*')
signature = '{}{}'.format(func_name, argspec)
except TypeError:
signature = '%s()' % func_name
self['Signature'] = signature
def get_func(self):
func_name = getattr(self._f, '__name__', self.__class__.__name__)
if inspect.isclass(self._f):
func = getattr(self._f, '__call__', self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
out = ''
func, func_name = self.get_func()
roles = {'func': 'function',
'meth': 'method'}
if self._role:
if self._role not in roles:
print("Warning: invalid role %s" % self._role)
out += '.. {}:: {}\n \n\n'.format(roles.get(self._role, ''),
func_name)
out += super().__str__(func_role=self._role)
return out
| FunctionDoc |
python | pytorch__pytorch | torch/_dynamo/source.py | {
"start": 33329,
"end": 33523
} | class ____(Source):
def name(self) -> str:
return ""
def guard_source(self) -> GuardSource:
return GuardSource.GLOBAL
@dataclasses.dataclass(frozen=True)
| GlobalStateSource |
python | viewflow__viewflow | tests/workflow/test_nodes__join.py | {
"start": 141,
"end": 1138
} | class ____(TestCase): # noqa: D101
def test_join_async(self):
process = TestASyncWorkflow.start.run()
first_task = process.task_set.filter(flow_task=TestASyncWorkflow.first).first()
second_task = process.task_set.filter(flow_task=TestASyncWorkflow.second).first()
TestASyncWorkflow.first.run(first_task)
join_task = process.task_set.filter(flow_task=TestASyncWorkflow.join).first()
self.assertEqual(join_task.status, STATUS.STARTED)
TestASyncWorkflow.second.run(second_task)
join_task.refresh_from_db()
self.assertEqual(join_task.status, STATUS.DONE)
process.refresh_from_db()
self.assertEqual(process.status, STATUS.DONE)
self.assertEqual(process.task_set.count(), 6)
def test_join_sync(self):
process = TestSyncWorkflow.start.run()
process.refresh_from_db()
self.assertEqual(process.status, STATUS.DONE)
self.assertEqual(process.task_set.count(), 6)
| Test |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/unique_fragment_names.py | {
"start": 69,
"end": 1002
} | class ____(ValidationRule):
__slots__ = 'known_fragment_names',
def __init__(self, context):
super(UniqueFragmentNames, self).__init__(context)
self.known_fragment_names = {}
def enter_OperationDefinition(self, node, key, parent, path, ancestors):
return False
def enter_FragmentDefinition(self, node, key, parent, path, ancestors):
fragment_name = node.name.value
if fragment_name in self.known_fragment_names:
self.context.report_error(GraphQLError(
self.duplicate_fragment_name_message(fragment_name),
[self.known_fragment_names[fragment_name], node.name]
))
else:
self.known_fragment_names[fragment_name] = node.name
return False
@staticmethod
def duplicate_fragment_name_message(field):
return 'There can only be one fragment named "{}".'.format(field)
| UniqueFragmentNames |
python | ansible__ansible | lib/ansible/module_utils/facts/network/netbsd.py | {
"start": 845,
"end": 1643
} | class ____(GenericBsdIfconfigNetwork):
"""
This is the NetBSD Network Class.
It uses the GenericBsdIfconfigNetwork
"""
platform = 'NetBSD'
def parse_media_line(self, words, current_if, ips):
# example of line:
# $ ifconfig
# ne0: flags=8863<UP,BROADCAST,NOTRAILERS,RUNNING,SIMPLEX,MULTICAST> mtu 1500
# ec_capabilities=1<VLAN_MTU>
# ec_enabled=0
# address: 00:20:91:45:00:78
# media: Ethernet 10baseT full-duplex
# inet 192.168.156.29 netmask 0xffffff00 broadcast 192.168.156.255
current_if['media'] = words[1]
if len(words) > 2:
current_if['media_type'] = words[2]
if len(words) > 3:
current_if['media_options'] = words[3].split(',')
| NetBSDNetwork |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/interfaces.py | {
"start": 9952,
"end": 23620
} | class ____(GenericBaseModel, ABC, Generic[DatasourceT, PartitionerT]):
"""
A Data Asset is a collection of records within a Data Source, which is usually named based
on the underlying data system and sliced to correspond to a desired specification.
Data Assets are used to specify how Great Expectations will organize data into Batches.
"""
# To subclass a DataAsset one must define `type` as a Class literal explicitly on the sublass
# as well as implementing the methods in the `Abstract Methods` section below.
# Some examples:
# * type: Literal["MyAssetTypeID"] = "MyAssetTypeID",
# * type: Literal["table"] = "table"
# * type: Literal["csv"] = "csv"
name: str
type: str
id: Optional[uuid.UUID] = Field(default=None, description="DataAsset id")
# TODO: order_by should no longer be used and should be removed
order_by: List[Sorter] = Field(default_factory=list)
batch_metadata: BatchMetadata = pydantic.Field(default_factory=dict)
batch_definitions: List[BatchDefinition] = Field(default_factory=list)
# non-field private attributes
_datasource: DatasourceT = pydantic.PrivateAttr()
_data_connector: Optional[DataConnector] = pydantic.PrivateAttr(default=None)
_test_connection_error_message: Optional[str] = pydantic.PrivateAttr(default=None)
@property
def datasource(self) -> DatasourceT:
return self._datasource
def test_connection(self) -> None:
"""Test the connection for the DataAsset.
Raises:
TestConnectionError: If the connection test fails.
"""
raise NotImplementedError(
"""One needs to implement "test_connection" on a DataAsset subclass."""
)
def get_batch_parameters_keys(
self, partitioner: Optional[PartitionerT] = None
) -> tuple[str, ...]:
raise NotImplementedError(
"""One needs to implement "get_batch_parameters_keys" on a DataAsset subclass."""
)
def build_batch_request(
self,
options: Optional[BatchParameters] = None,
batch_slice: Optional[BatchSlice] = None,
partitioner: Optional[PartitionerT] = None,
) -> BatchRequest[PartitionerT]:
"""A batch request that can be used to obtain batches for this DataAsset.
Args:
options: A dict that can be used to filter the batch groups returned from the asset.
The dict structure depends on the asset type. The available keys for dict can be obtained by
calling get_batch_parameters_keys(...).
batch_slice: A python slice that can be used to limit the sorted batches by index.
e.g. `batch_slice = "[-5:]"` will request only the last 5 batches after the options filter is applied.
partitioner: A Partitioner used to narrow the data returned from the asset.
Returns:
A BatchRequest object that can be used to obtain a batch from an asset by calling the
get_batch method.
""" # noqa: E501 # FIXME CoP
raise NotImplementedError(
"""One must implement "build_batch_request" on a DataAsset subclass."""
)
@abstractmethod
def get_batch_identifiers_list(self, batch_request: BatchRequest) -> List[dict]: ...
@abstractmethod
def get_batch(self, batch_request: BatchRequest) -> Batch: ...
def _validate_batch_request(self, batch_request: BatchRequest) -> None:
"""Validates the batch_request has the correct form.
Args:
batch_request: A batch request object to be validated.
"""
raise NotImplementedError(
"""One must implement "_validate_batch_request" on a DataAsset subclass."""
)
# End Abstract Methods
def add_batch_definition(
self,
name: str,
partitioner: Optional[PartitionerT] = None,
) -> BatchDefinition[PartitionerT]:
"""Add a BatchDefinition to this DataAsset.
BatchDefinition names must be unique within a DataAsset.
If the DataAsset is tied to a DataContext, the BatchDefinition will be persisted.
Args:
name (str): Name of the new batch definition.
partitioner: Optional Partitioner to partition this BatchDefinition
Returns:
BatchDefinition: The new batch definition.
"""
batch_definition_names = {bc.name for bc in self.batch_definitions}
if name in batch_definition_names:
raise ValueError( # noqa: TRY003 # FIXME CoP
f'"{name}" already exists (all existing batch_definition names are {", ".join(batch_definition_names)})' # noqa: E501 # FIXME CoP
)
# Let mypy know that self.datasource is a Datasource (it is currently bound to MetaDatasource) # noqa: E501 # FIXME CoP
assert isinstance(self.datasource, Datasource)
batch_definition = BatchDefinition[PartitionerT](name=name, partitioner=partitioner)
batch_definition.set_data_asset(self)
self.batch_definitions.append(batch_definition)
self.update_batch_definition_field_set()
if self.datasource.data_context:
try:
batch_definition = self.datasource.add_batch_definition(batch_definition)
except Exception:
self.batch_definitions.remove(batch_definition)
self.update_batch_definition_field_set()
raise
self.update_batch_definition_field_set()
return batch_definition
@public_api
def delete_batch_definition(self, name: str) -> None:
"""Delete a batch definition.
Args:
name (str): Name of the BatchDefinition to delete.
"""
try:
batch_def = self.get_batch_definition(name)
except KeyError as err:
# We collect the names as a list because while we shouldn't have more than 1
# batch definition with the same name, we want to represent it if it does occur.
batch_definition_names = [bc.name for bc in self.batch_definitions]
raise ValueError( # noqa: TRY003 # FIXME CoP
f'"{name}" does not exist. Existing batch_definition names are {batch_definition_names})' # noqa: E501 # FIXME CoP
) from err
self._delete_batch_definition(batch_def)
def _delete_batch_definition(self, batch_definition: BatchDefinition[PartitionerT]) -> None:
# Let mypy know that self.datasource is a Datasource (it is currently bound to MetaDatasource) # noqa: E501 # FIXME CoP
assert isinstance(self.datasource, Datasource)
self.batch_definitions.remove(batch_definition)
if self.datasource.data_context:
try:
self.datasource.delete_batch_definition(batch_definition)
except Exception:
self.batch_definitions.append(batch_definition)
raise
self.update_batch_definition_field_set()
def update_batch_definition_field_set(self) -> None:
"""Ensure that we have __fields_set__ set correctly for batch_definitions to ensure we serialize IFF needed.""" # noqa: E501 # FIXME CoP
has_batch_definitions = len(self.batch_definitions) > 0
if "batch_definitions" in self.__fields_set__ and not has_batch_definitions:
self.__fields_set__.remove("batch_definitions")
elif "batch_definitions" not in self.__fields_set__ and has_batch_definitions:
self.__fields_set__.add("batch_definitions")
@public_api
def get_batch_definition(self, name: str) -> BatchDefinition[PartitionerT]:
"""Get a batch definition.
Args:
name (str): Name of the BatchDefinition to get.
Raises:
KeyError: If the BatchDefinition does not exist.
"""
batch_definitions = [
batch_definition
for batch_definition in self.batch_definitions
if batch_definition.name == name
]
if len(batch_definitions) == 0:
raise KeyError( # noqa: TRY003 # FIXME CoP
f"BatchDefinition {name} not found"
)
elif len(batch_definitions) > 1:
# Our add_batch_definition() method should enforce that different
# batch definitions do not share a name.
raise KeyError( # noqa: TRY003 # FIXME CoP
f"Multiple keys for {name} found"
)
return batch_definitions[0]
def _batch_parameters_are_valid(
self, options: BatchParameters, partitioner: Optional[PartitionerT]
) -> bool:
valid_options = self.get_batch_parameters_keys(partitioner=partitioner)
return set(options.keys()).issubset(set(valid_options))
@pydantic.validator("batch_metadata", pre=True)
def ensure_batch_metadata_is_not_none(cls, value: Any) -> Union[dict, Any]:
"""If batch metadata is None, replace it with an empty dict."""
if value is None:
return {}
return value
def _get_batch_metadata_from_batch_request(
self, batch_request: BatchRequest, ignore_options: Sequence = ()
) -> BatchMetadata:
"""Performs config variable substitution and populates batch parameters for
Batch.metadata at runtime.
"""
batch_metadata = copy.deepcopy(self.batch_metadata)
if not self._datasource.data_context:
raise MissingDataContextError()
config_variables = self._datasource.data_context.config_variables
batch_metadata = _ConfigurationSubstitutor().substitute_all_config_variables(
data=batch_metadata, replace_variables_dict=config_variables
)
batch_metadata.update(
copy.deepcopy(
{k: v for k, v in batch_request.options.items() if k not in ignore_options}
)
)
return batch_metadata
# Sorter methods
@pydantic.validator("order_by", pre=True)
def _order_by_validator(
cls, order_by: Optional[List[Union[Sorter, str, dict]]] = None
) -> List[Sorter]:
if order_by:
raise DataAssetInitializationError(
message="'order_by' is no longer a valid argument. "
"Sorting should be configured in a batch definition."
)
return []
def sort_batches(
self, batch_list: List[Batch], partitioner: PartitionerSortingProtocol
) -> List[Batch]:
"""Sorts batch_list in place in the order configured in this DataAsset.
Args:
batch_list: The list of batches to sort in place.
partitioner: Configuration used to determine sort.
"""
def get_value(key: str) -> Callable[[Batch], Any]:
return lambda bd: bd.metadata[key]
return self._sort_batch_data_list(batch_list, partitioner, get_value)
def sort_legacy_batch_definitions(
self,
legacy_batch_definition_list: List[LegacyBatchDefinition],
partitioner: PartitionerSortingProtocol,
) -> List[LegacyBatchDefinition]:
"""Sorts batch_definition_list in the order configured by the partitioner."""
def get_value(key: str) -> Callable[[LegacyBatchDefinition], Any]:
return lambda bd: bd.batch_identifiers[key]
return self._sort_batch_data_list(legacy_batch_definition_list, partitioner, get_value)
def sort_batch_identifiers_list(
self, batch_identfiers_list: List[dict], partitioner: PartitionerSortingProtocol
) -> List[dict]:
"""Sorts batch_identfiers_list in the order configured by the partitioner."""
def get_value(key: str) -> Callable[[dict], Any]:
return lambda d: d[key]
return self._sort_batch_data_list(batch_identfiers_list, partitioner, get_value)
def _sort_batch_data_list(
self,
batch_data_list: List[_T],
partitioner: PartitionerSortingProtocol,
get_value: Callable[[str], Any],
) -> List[_T]:
"""Sorts batch_data_list in the order configured by the partitioner."""
reverse = not partitioner.sort_ascending
for key in reversed(partitioner.param_names):
try:
batch_data_list = sorted(
batch_data_list,
key=functools.cmp_to_key(
_sort_batch_identifiers_with_none_metadata_values(get_value(key))
),
reverse=reverse,
)
except KeyError as e:
raise KeyError( # noqa: TRY003 # FIXME CoP
f"Trying to sort {self.name}'s batches on key {key}, "
"which isn't available on all batches."
) from e
return batch_data_list
def _sort_batch_identifiers_with_none_metadata_values(
get_val: Callable[[_T], Any],
) -> Callable[[_T, _T], int]:
def _compare_function(a: _T, b: _T) -> int:
a_val = get_val(a)
b_val = get_val(b)
if a_val is not None and b_val is not None:
if a_val < b_val:
return -1
elif a_val > b_val:
return 1
else:
return 0
elif a_val is None and b_val is None:
return 0
elif a_val is None: # b.metadata_val is not None
return -1
else: # b[key] is None
return 1
return _compare_function
# If a Datasource can have more than 1 _DataAssetT, this will need to change.
_DataAssetT = TypeVar("_DataAssetT", bound=DataAsset)
@public_api
| DataAsset |
python | anthropics__anthropic-sdk-python | src/anthropic/types/tool_use_block.py | {
"start": 212,
"end": 331
} | class ____(BaseModel):
id: str
input: Dict[str, object]
name: str
type: Literal["tool_use"]
| ToolUseBlock |
python | huggingface__transformers | src/transformers/models/efficientloftr/image_processing_efficientloftr.py | {
"start": 5140,
"end": 21626
} | class ____(BaseImageProcessor):
r"""
Constructs a EfficientLoFTR image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden
by `do_resize` in the `preprocess` method.
size (`Dict[str, int]` *optional*, defaults to `{"height": 480, "width": 640}`):
Resolution of the output image after `resize` is applied. Only has an effect if `do_resize` is set to
`True`. Can be overridden by `size` in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_grayscale (`bool`, *optional*, defaults to `True`):
Whether to convert the image to grayscale. Can be overridden by `do_grayscale` in the `preprocess` method.
"""
model_input_names = ["pixel_values"]
valid_kwargs = EfficientLoFTRImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_factor: float = 1 / 255,
do_grayscale: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 480, "width": 640}
size = get_size_dict(size, default_to_square=False)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_grayscale = do_grayscale
# Copied from transformers.models.superpoint.image_processing_superpoint.SuperPointImageProcessor.resize
def resize(
self,
image: np.ndarray,
size: dict[str, int],
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
):
"""
Resize an image.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary of the form `{"height": int, "width": int}`, specifying the size of the output image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the output image. If not provided, it will be inferred from the input
image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
size = get_size_dict(size, default_to_square=False)
return resize(
image,
size=(size["height"], size["width"]),
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
# Copied from transformers.models.superglue.image_processing_superglue.SuperGlueImageProcessor.preprocess
def preprocess(
self,
images,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_grayscale: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> BatchFeature:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image pairs to preprocess. Expects either a list of 2 images or a list of list of 2 images list with
pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set
`do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image
is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the
image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to
`(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_grayscale (`bool`, *optional*, defaults to `self.do_grayscale`):
Whether to convert the image to grayscale.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_grayscale = do_grayscale if do_grayscale is not None else self.do_grayscale
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
# Validate and convert the input images into a flattened list of images for all subsequent processing steps.
images = validate_and_format_image_pairs(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_resize=do_resize,
size=size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
)
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if is_scaled_image(images[0]) and do_rescale:
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_grayscale:
image = convert_to_grayscale(image, input_data_format=input_data_format)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
all_images.append(image)
# Convert back the flattened list of images into a list of pairs of images.
image_pairs = [all_images[i : i + 2] for i in range(0, len(all_images), 2)]
data = {"pixel_values": image_pairs}
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_keypoint_matching(
self,
outputs: "EfficientLoFTRKeypointMatchingOutput",
target_sizes: Union[TensorType, list[tuple]],
threshold: float = 0.0,
) -> list[dict[str, torch.Tensor]]:
"""
Converts the raw output of [`EfficientLoFTRKeypointMatchingOutput`] into lists of keypoints, scores and descriptors
with coordinates absolute to the original image sizes.
Args:
outputs ([`EfficientLoFTRKeypointMatchingOutput`]):
Raw outputs of the model.
target_sizes (`torch.Tensor` or `List[Tuple[Tuple[int, int]]]`, *optional*):
Tensor of shape `(batch_size, 2, 2)` or list of tuples of tuples (`Tuple[int, int]`) containing the
target size `(height, width)` of each image in the batch. This must be the original image size (before
any processing).
threshold (`float`, *optional*, defaults to 0.0):
Threshold to filter out the matches with low scores.
Returns:
`List[Dict]`: A list of dictionaries, each dictionary containing the keypoints in the first and second image
of the pair, the matching scores and the matching indices.
"""
if outputs.matches.shape[0] != len(target_sizes):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the mask")
if not all(len(target_size) == 2 for target_size in target_sizes):
raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
if isinstance(target_sizes, list):
image_pair_sizes = torch.tensor(target_sizes, device=outputs.matches.device)
else:
if target_sizes.shape[1] != 2 or target_sizes.shape[2] != 2:
raise ValueError(
"Each element of target_sizes must contain the size (h, w) of each image of the batch"
)
image_pair_sizes = target_sizes
keypoints = outputs.keypoints.clone()
keypoints = keypoints * image_pair_sizes.flip(-1).reshape(-1, 2, 1, 2)
keypoints = keypoints.to(torch.int32)
results = []
for keypoints_pair, matches, scores in zip(keypoints, outputs.matches, outputs.matching_scores):
# Filter out matches with low scores
valid_matches = torch.logical_and(scores > threshold, matches > -1)
matched_keypoints0 = keypoints_pair[0][valid_matches[0]]
matched_keypoints1 = keypoints_pair[1][valid_matches[1]]
matching_scores = scores[0][valid_matches[0]]
results.append(
{
"keypoints0": matched_keypoints0,
"keypoints1": matched_keypoints1,
"matching_scores": matching_scores,
}
)
return results
def visualize_keypoint_matching(
self,
images: ImageInput,
keypoint_matching_output: list[dict[str, torch.Tensor]],
) -> list["Image.Image"]:
"""
Plots the image pairs side by side with the detected keypoints as well as the matching between them.
Args:
images (`ImageInput`):
Image pairs to plot. Same as `EfficientLoFTRImageProcessor.preprocess`. Expects either a list of 2
images or a list of list of 2 images list with pixel values ranging from 0 to 255.
keypoint_matching_output (List[Dict[str, torch.Tensor]]]):
A post processed keypoint matching output
Returns:
`List[PIL.Image.Image]`: A list of PIL images, each containing the image pairs side by side with the detected
keypoints as well as the matching between them.
"""
images = validate_and_format_image_pairs(images)
images = [to_numpy_array(image) for image in images]
image_pairs = [images[i : i + 2] for i in range(0, len(images), 2)]
results = []
for image_pair, pair_output in zip(image_pairs, keypoint_matching_output):
height0, width0 = image_pair[0].shape[:2]
height1, width1 = image_pair[1].shape[:2]
plot_image = np.zeros((max(height0, height1), width0 + width1, 3), dtype=np.uint8)
plot_image[:height0, :width0] = image_pair[0]
plot_image[:height1, width0:] = image_pair[1]
plot_image_pil = Image.fromarray(plot_image)
draw = ImageDraw.Draw(plot_image_pil)
keypoints0_x, keypoints0_y = pair_output["keypoints0"].unbind(1)
keypoints1_x, keypoints1_y = pair_output["keypoints1"].unbind(1)
for keypoint0_x, keypoint0_y, keypoint1_x, keypoint1_y, matching_score in zip(
keypoints0_x, keypoints0_y, keypoints1_x, keypoints1_y, pair_output["matching_scores"]
):
color = self._get_color(matching_score)
draw.line(
(keypoint0_x, keypoint0_y, keypoint1_x + width0, keypoint1_y),
fill=color,
width=3,
)
draw.ellipse((keypoint0_x - 2, keypoint0_y - 2, keypoint0_x + 2, keypoint0_y + 2), fill="black")
draw.ellipse(
(keypoint1_x + width0 - 2, keypoint1_y - 2, keypoint1_x + width0 + 2, keypoint1_y + 2),
fill="black",
)
results.append(plot_image_pil)
return results
def _get_color(self, score):
"""Maps a score to a color."""
r = int(255 * (1 - score))
g = int(255 * score)
b = 0
return (r, g, b)
__all__ = ["EfficientLoFTRImageProcessor"]
| EfficientLoFTRImageProcessor |
python | huggingface__transformers | src/transformers/pipelines/document_question_answering.py | {
"start": 3743,
"end": 25202
} | class ____(ChunkPipeline):
# TODO: Update task_summary docs to include an example with document QA and then update the first sentence
"""
Document Question Answering pipeline using any `AutoModelForDocumentQuestionAnswering`. The inputs/outputs are
similar to the (extractive) question answering pipeline; however, the pipeline takes an image (and optional OCR'd
words/boxes) as input instead of text context.
Unless the model you're using explicitly sets these generation parameters in its configuration files
(`generation_config.json`), the following default values will be used:
- max_new_tokens: 256
Example:
```python
>>> from transformers import pipeline
>>> document_qa = pipeline(model="impira/layoutlm-document-qa")
>>> document_qa(
... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png",
... question="What is the invoice number?",
... )
[{'score': 0.425, 'answer': 'us-001', 'start': 16, 'end': 16}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This document question answering pipeline can currently be loaded from [`pipeline`] using the following task
identifier: `"document-question-answering"`.
The models that this pipeline can use are models that have been fine-tuned on a document question answering task.
See the up-to-date list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=document-question-answering).
"""
_pipeline_calls_generate = True
_load_processor = False
_load_image_processor = None
_load_feature_extractor = None
_load_tokenizer = True
# Make sure the docstring is updated when the default generation config is changed
_default_generation_config = GenerationConfig(
max_new_tokens=256,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.tokenizer is not None and not self.tokenizer.__class__.__name__.endswith("Fast"):
raise ValueError(
"`DocumentQuestionAnsweringPipeline` requires a fast tokenizer, but a slow tokenizer "
f"(`{self.tokenizer.__class__.__name__}`) is provided."
)
if self.model.config.__class__.__name__ == "VisionEncoderDecoderConfig":
self.model_type = ModelType.VisionEncoderDecoder
if self.model.config.encoder.model_type != "donut-swin":
raise ValueError("Currently, the only supported VisionEncoderDecoder model is Donut")
else:
self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES)
if self.model.config.__class__.__name__ == "LayoutLMConfig":
self.model_type = ModelType.LayoutLM
else:
self.model_type = ModelType.LayoutLMv2andv3
def _sanitize_parameters(
self,
padding=None,
doc_stride=None,
max_question_len=None,
lang: str | None = None,
tesseract_config: str | None = None,
max_answer_len=None,
max_seq_len=None,
top_k=None,
handle_impossible_answer=None,
timeout=None,
**kwargs,
):
preprocess_params, postprocess_params = {}, {}
if padding is not None:
preprocess_params["padding"] = padding
if doc_stride is not None:
preprocess_params["doc_stride"] = doc_stride
if max_question_len is not None:
preprocess_params["max_question_len"] = max_question_len
if max_seq_len is not None:
preprocess_params["max_seq_len"] = max_seq_len
if lang is not None:
preprocess_params["lang"] = lang
if tesseract_config is not None:
preprocess_params["tesseract_config"] = tesseract_config
if timeout is not None:
preprocess_params["timeout"] = timeout
if top_k is not None:
if top_k < 1:
raise ValueError(f"top_k parameter should be >= 1 (got {top_k})")
postprocess_params["top_k"] = top_k
if max_answer_len is not None:
if max_answer_len < 1:
raise ValueError(f"max_answer_len parameter should be >= 1 (got {max_answer_len}")
postprocess_params["max_answer_len"] = max_answer_len
if handle_impossible_answer is not None:
postprocess_params["handle_impossible_answer"] = handle_impossible_answer
forward_params = {}
if getattr(self, "assistant_model", None) is not None:
forward_params["assistant_model"] = self.assistant_model
if getattr(self, "assistant_tokenizer", None) is not None:
forward_params["tokenizer"] = self.tokenizer
forward_params["assistant_tokenizer"] = self.assistant_tokenizer
return preprocess_params, forward_params, postprocess_params
@overload
def __call__(
self,
image: Union["Image.Image", str],
question: str,
word_boxes: tuple[str, list[float]] | None = None,
**kwargs: Any,
) -> list[dict[str, Any]]: ...
@overload
def __call__(self, image: dict[str, Any], **kwargs: Any) -> list[dict[str, Any]]: ...
@overload
def __call__(self, image: list[dict[str, Any]], **kwargs: Any) -> list[list[dict[str, Any]]]: ...
def __call__(
self,
image: Union["Image.Image", str, list[dict[str, Any]]],
question: str | None = None,
word_boxes: tuple[str, list[float]] | None = None,
**kwargs: Any,
) -> dict[str, Any] | list[dict[str, Any]]:
"""
Answer the question(s) given as inputs by using the document(s). A document is defined as an image and an
optional list of (word, box) tuples which represent the text in the document. If the `word_boxes` are not
provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically for
LayoutLM-like models which require them as input. For Donut, no OCR is run.
You can invoke the pipeline several ways:
- `pipeline(image=image, question=question)`
- `pipeline(image=image, question=question, word_boxes=word_boxes)`
- `pipeline([{"image": image, "question": question}])`
- `pipeline([{"image": image, "question": question, "word_boxes": word_boxes}])`
Args:
image (`str` or `PIL.Image`):
The pipeline handles three types of images:
- A string containing a http link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images. If given a single image, it can be
broadcasted to multiple questions.
question (`str`):
A question to ask of the document.
word_boxes (`list[str, tuple[float, float, float, float]]`, *optional*):
A list of words and bounding boxes (normalized 0->1000). If you provide this optional input, then the
pipeline will use these words and boxes instead of running OCR on the image to derive them for models
that need them (e.g. LayoutLM). This allows you to reuse OCR'd results across many invocations of the
pipeline without having to re-run it each time.
top_k (`int`, *optional*, defaults to 1):
The number of answers to return (will be chosen by order of likelihood). Note that we return less than
top_k answers if there are not enough options available within the context.
doc_stride (`int`, *optional*, defaults to 128):
If the words in the document are too long to fit with the question for the model, it will be split in
several chunks with some overlap. This argument controls the size of that overlap.
max_answer_len (`int`, *optional*, defaults to 15):
The maximum length of predicted answers (e.g., only answers with a shorter length are considered).
max_seq_len (`int`, *optional*, defaults to 384):
The maximum length of the total sentence (context + question) in tokens of each chunk passed to the
model. The context will be split in several chunks (using `doc_stride` as overlap) if needed.
max_question_len (`int`, *optional*, defaults to 64):
The maximum length of the question after tokenization. It will be truncated if needed.
handle_impossible_answer (`bool`, *optional*, defaults to `False`):
Whether or not we accept impossible as an answer.
lang (`str`, *optional*):
Language to use while running OCR. Defaults to english.
tesseract_config (`str`, *optional*):
Additional flags to pass to tesseract while running OCR.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
Return:
A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys:
- **score** (`float`) -- The probability associated to the answer.
- **start** (`int`) -- The start word index of the answer (in the OCR'd version of the input or provided
`word_boxes`).
- **end** (`int`) -- The end word index of the answer (in the OCR'd version of the input or provided
`word_boxes`).
- **answer** (`str`) -- The answer to the question.
- **words** (`list[int]`) -- The index of each word/box pair that is in the answer
"""
if isinstance(question, str):
inputs = {"question": question, "image": image}
if word_boxes is not None:
inputs["word_boxes"] = word_boxes
else:
inputs = image
return super().__call__(inputs, **kwargs)
def preprocess(
self,
input,
padding="do_not_pad",
doc_stride=None,
max_seq_len=None,
word_boxes: tuple[str, list[float]] | None = None,
lang=None,
tesseract_config="",
timeout=None,
):
# NOTE: This code mirrors the code in question answering and will be implemented in a follow up PR
# to support documents with enough tokens that overflow the model's window
if max_seq_len is None:
max_seq_len = self.tokenizer.model_max_length
if doc_stride is None:
doc_stride = min(max_seq_len // 2, 256)
image = None
image_features = {}
if input.get("image", None) is not None:
image = load_image(input["image"], timeout=timeout)
if self.image_processor is not None:
image_inputs = self.image_processor(images=image, return_tensors="pt")
image_inputs = image_inputs.to(self.dtype)
image_features.update(image_inputs)
elif self.feature_extractor is not None:
image_features.update(self.feature_extractor(images=image, return_tensors="pt"))
elif self.model_type == ModelType.VisionEncoderDecoder:
raise ValueError("If you are using a VisionEncoderDecoderModel, you must provide a feature extractor")
words, boxes = None, None
if self.model_type != ModelType.VisionEncoderDecoder:
if "word_boxes" in input:
words = [x[0] for x in input["word_boxes"]]
boxes = [x[1] for x in input["word_boxes"]]
elif "words" in image_features and "boxes" in image_features:
words = image_features.pop("words")[0]
boxes = image_features.pop("boxes")[0]
elif image is not None:
if not TESSERACT_LOADED:
raise ValueError(
"If you provide an image without word_boxes, then the pipeline will run OCR using Tesseract,"
" but pytesseract is not available"
)
if TESSERACT_LOADED:
words, boxes = apply_tesseract(image, lang=lang, tesseract_config=tesseract_config)
else:
raise ValueError(
"You must provide an image or word_boxes. If you provide an image, the pipeline will automatically"
" run OCR to derive words and boxes"
)
if self.tokenizer.padding_side != "right":
raise ValueError(
"Document question answering only supports tokenizers whose padding side is 'right', not"
f" {self.tokenizer.padding_side}"
)
if self.model_type == ModelType.VisionEncoderDecoder:
task_prompt = f"<s_docvqa><s_question>{input['question']}</s_question><s_answer>"
# Adapted from https://huggingface.co/spaces/nielsr/donut-docvqa/blob/main/app.py
encoding = {
"inputs": image_features["pixel_values"],
"decoder_input_ids": self.tokenizer(
task_prompt, add_special_tokens=False, return_tensors="pt"
).input_ids,
"return_dict_in_generate": True,
}
yield {
**encoding,
"p_mask": None,
"word_ids": None,
"words": None,
"output_attentions": True,
"is_last": True,
}
else:
tokenizer_kwargs = {}
if self.model_type == ModelType.LayoutLM:
tokenizer_kwargs["text"] = input["question"].split()
tokenizer_kwargs["text_pair"] = words
tokenizer_kwargs["is_split_into_words"] = True
else:
tokenizer_kwargs["text"] = [input["question"]]
tokenizer_kwargs["text_pair"] = [words]
tokenizer_kwargs["boxes"] = [boxes]
encoding = self.tokenizer(
padding=padding,
max_length=max_seq_len,
stride=doc_stride,
return_token_type_ids=True,
truncation="only_second",
return_overflowing_tokens=True,
**tokenizer_kwargs,
)
# TODO: check why slower `LayoutLMTokenizer` and `LayoutLMv2Tokenizer` don't have this key in outputs
# FIXME: ydshieh and/or Narsil
encoding.pop("overflow_to_sample_mapping", None) # We do not use this
num_spans = len(encoding["input_ids"])
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# We put 0 on the tokens from the context and 1 everywhere else (question and special tokens)
# This logic mirrors the logic in the question_answering pipeline
p_mask = [[tok != 1 for tok in encoding.sequence_ids(span_id)] for span_id in range(num_spans)]
for span_idx in range(num_spans):
span_encoding = {k: torch.tensor(v[span_idx : span_idx + 1]) for (k, v) in encoding.items()}
if "pixel_values" in image_features:
span_encoding["image"] = image_features["pixel_values"]
input_ids_span_idx = encoding["input_ids"][span_idx]
# keep the cls_token unmasked (some models use it to indicate unanswerable questions)
if self.tokenizer.cls_token_id is not None:
cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0]
for cls_index in cls_indices:
p_mask[span_idx][cls_index] = 0
# For each span, place a bounding box [0,0,0,0] for question and CLS tokens, [1000,1000,1000,1000]
# for SEP tokens, and the word's bounding box for words in the original document.
if "boxes" not in tokenizer_kwargs:
bbox = []
for input_id, sequence_id, word_id in zip(
encoding.input_ids[span_idx],
encoding.sequence_ids(span_idx),
encoding.word_ids(span_idx),
):
if sequence_id == 1:
bbox.append(boxes[word_id])
elif input_id == self.tokenizer.sep_token_id:
bbox.append([1000] * 4)
else:
bbox.append([0] * 4)
span_encoding["bbox"] = torch.tensor(bbox).unsqueeze(0)
yield {
**span_encoding,
"p_mask": p_mask[span_idx],
"word_ids": encoding.word_ids(span_idx),
"words": words,
"is_last": span_idx == num_spans - 1,
}
def _forward(self, model_inputs, **generate_kwargs):
p_mask = model_inputs.pop("p_mask", None)
word_ids = model_inputs.pop("word_ids", None)
words = model_inputs.pop("words", None)
is_last = model_inputs.pop("is_last", False)
if self.model_type == ModelType.VisionEncoderDecoder:
# User-defined `generation_config` passed to the pipeline call take precedence
if "generation_config" not in generate_kwargs:
generate_kwargs["generation_config"] = self.generation_config
model_outputs = self.model.generate(**model_inputs, **generate_kwargs)
else:
model_outputs = self.model(**model_inputs)
model_outputs = dict(model_outputs.items())
model_outputs["p_mask"] = p_mask
model_outputs["word_ids"] = word_ids
model_outputs["words"] = words
model_outputs["attention_mask"] = model_inputs.get("attention_mask", None)
model_outputs["is_last"] = is_last
return model_outputs
def postprocess(self, model_outputs, top_k=1, **kwargs):
if self.model_type == ModelType.VisionEncoderDecoder:
answers = [self.postprocess_encoder_decoder_single(o) for o in model_outputs]
else:
answers = self.postprocess_extractive_qa(model_outputs, top_k=top_k, **kwargs)
answers = sorted(answers, key=lambda x: x.get("score", 0), reverse=True)[:top_k]
return answers
def postprocess_encoder_decoder_single(self, model_outputs, **kwargs):
sequence = self.tokenizer.batch_decode(model_outputs["sequences"])[0]
# TODO: A lot of this logic is specific to Donut and should probably be handled in the tokenizer
# (see https://github.com/huggingface/transformers/pull/18414/files#r961747408 for more context).
sequence = sequence.replace(self.tokenizer.eos_token, "").replace(self.tokenizer.pad_token, "")
sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token
ret = {
"answer": None,
}
answer = re.search(r"<s_answer>(.*)</s_answer>", sequence)
if answer is not None:
ret["answer"] = answer.group(1).strip()
return ret
def postprocess_extractive_qa(
self, model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, **kwargs
):
min_null_score = 1000000 # large and positive
answers = []
for output in model_outputs:
words = output["words"]
if output["start_logits"].dtype in (torch.bfloat16, torch.float16):
output["start_logits"] = output["start_logits"].float()
if output["end_logits"].dtype in (torch.bfloat16, torch.float16):
output["end_logits"] = output["end_logits"].float()
starts, ends, scores, min_null_score = select_starts_ends(
start=output["start_logits"],
end=output["end_logits"],
p_mask=output["p_mask"],
attention_mask=output["attention_mask"].numpy()
if output.get("attention_mask", None) is not None
else None,
min_null_score=min_null_score,
top_k=top_k,
handle_impossible_answer=handle_impossible_answer,
max_answer_len=max_answer_len,
)
word_ids = output["word_ids"]
for start, end, score in zip(starts, ends, scores):
word_start, word_end = word_ids[start], word_ids[end]
if word_start is not None and word_end is not None:
answers.append(
{
"score": float(score),
"answer": " ".join(words[word_start : word_end + 1]),
"start": word_start,
"end": word_end,
}
)
if handle_impossible_answer:
answers.append({"score": min_null_score, "answer": "", "start": 0, "end": 0})
return answers
| DocumentQuestionAnsweringPipeline |
python | davidhalter__parso | parso/python/tree.py | {
"start": 3259,
"end": 3889
} | class ____:
"""
Some Python specific utilities.
"""
__slots__ = ()
def get_name_of_position(self, position):
"""
Given a (line, column) tuple, returns a :py:class:`Name` or ``None`` if
there is no name at that position.
"""
for c in self.children:
if isinstance(c, Leaf):
if c.type == 'name' and c.start_pos <= position <= c.end_pos:
return c
else:
result = c.get_name_of_position(position)
if result is not None:
return result
return None
| PythonMixin |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 25146,
"end": 26099
} | class ____(Operation):
def call(self, x):
return backend.numpy.arcsinh(x)
def compute_output_spec(self, x):
dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx()))
if dtype == "int64":
dtype = backend.floatx()
else:
dtype = dtypes.result_type(dtype, float)
sparse = getattr(x, "sparse", False)
return KerasTensor(x.shape, dtype=dtype, sparse=sparse)
@keras_export(["keras.ops.arcsinh", "keras.ops.numpy.arcsinh"])
def arcsinh(x):
"""Inverse hyperbolic sine, element-wise.
Arguments:
x: Input tensor.
Returns:
Output tensor of same shape as `x`.
Example:
>>> x = keras.ops.convert_to_tensor([1, -1, 0])
>>> keras.ops.arcsinh(x)
array([0.88137364, -0.88137364, 0.0], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Arcsinh().symbolic_call(x)
return backend.numpy.arcsinh(x)
| Arcsinh |
python | pytorch__pytorch | torch/_inductor/distributed_autotune.py | {
"start": 820,
"end": 1240
} | class ____:
"""
State used to track autotuning during a graph_context()
"""
# This is the next operator index. Used to figure out which rank should do
# the autotuning.
autotuned_index: int = 0
# For debugging - used to make sure that we autotune the same number of
# local operators that we expected to.
autotuned_local_count: int = 0
@dataclasses.dataclass
| _DistributedAutotuneState |
python | wandb__wandb | wandb/sdk/lib/service/service_token.py | {
"start": 3673,
"end": 5107
} | class ____(ServiceToken):
"""Connects to the service using TCP over a localhost socket."""
def __init__(self, *, parent_pid: int, port: int) -> None:
self._parent_pid = parent_pid
self._port = port
@override
def connect(
self,
*,
asyncer: asyncio_manager.AsyncioManager,
) -> ServiceClient:
try:
# TODO: This may block indefinitely if the service is unhealthy.
reader, writer = asyncer.run(
lambda: asyncio.open_connection("localhost", self._port),
)
except Exception as e:
raise WandbServiceConnectionError(
f"Failed to connect to service on port {self._port}",
) from e
return ServiceClient(asyncer, reader, writer)
@override
def _as_env_string(self):
return "-".join(
(
_CURRENT_VERSION,
str(self._parent_pid),
"tcp",
"localhost",
str(self._port),
)
)
@staticmethod
def from_env_string(token: str) -> TCPServiceToken | None:
"""Returns a TCP service token parsed from the env var."""
match = _TCP_TOKEN_RE.fullmatch(token)
if not match:
return None
parent_pid, port = match.groups()
return TCPServiceToken(parent_pid=int(parent_pid), port=int(port))
| TCPServiceToken |
python | pytorch__pytorch | torchgen/api/python.py | {
"start": 23440,
"end": 23851
} | class ____:
name: str
type_str: str
is_out_arg: bool
# To pass PyObjects arguments to C++ function (via the lambda wrapper),
# we need first convert PyObjects into simple C++ objects. This work
# is done by PythonArgParser.
# This data model is used to represent the output of PythonArgParser.
# It has 1-1 mapping with PythonArgument in PythonSignature.
@dataclass(frozen=True)
| DispatchLambdaArgument |
python | pypa__pipenv | pipenv/patched/pip/_vendor/distlib/database.py | {
"start": 18563,
"end": 32115
} | class ____(BaseInstalledDistribution):
"""
Created with the *path* of the ``.dist-info`` directory provided to the
constructor. It reads the metadata contained in ``pydist.json`` when it is
instantiated., or uses a passed in Metadata instance (useful for when
dry-run mode is being used).
"""
hasher = 'sha256'
def __init__(self, path, metadata=None, env=None):
self.modules = []
self.finder = finder = resources.finder_for_path(path)
if finder is None:
raise ValueError('finder unavailable for %s' % path)
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
r = finder.find(METADATA_FILENAME)
# Temporary - for Wheel 0.23 support
if r is None:
r = finder.find(WHEEL_METADATA_FILENAME)
# Temporary - for legacy support
if r is None:
r = finder.find(LEGACY_METADATA_FILENAME)
if r is None:
raise ValueError('no %s found in %s' % (METADATA_FILENAME, path))
with contextlib.closing(r.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
r = finder.find('REQUESTED')
self.requested = r is not None
p = os.path.join(path, 'top_level.txt')
if os.path.exists(p):
with open(p, 'rb') as f:
data = f.read().decode('utf-8')
self.modules = data.splitlines()
def __repr__(self):
return '<InstalledDistribution %r %s at %r>' % (self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
r = self.get_distinfo_resource('RECORD')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as record_reader:
# Base location is parent dir of .dist-info dir
# base_location = os.path.dirname(self.path)
# base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
# if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results
@cached_property
def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result
def read_exports(self):
"""
Read exports data from a file in .ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
with contextlib.closing(r.as_stream()) as stream:
result = read_exports(stream)
return result
def write_exports(self, exports):
"""
Write a dictionary of exports to a file in .ini format.
:param exports: A dictionary of exports, mapping an export category to
a list of :class:`ExportEntry` instances describing the
individual export entries.
"""
rf = self.get_distinfo_file(EXPORTS_FILENAME)
with open(rf, 'w') as f:
write_exports(exports, f)
def get_resource_path(self, relative_path):
"""
NOTE: This API may change in the future.
Return the absolute path to a resource file with the given relative
path.
:param relative_path: The path, relative to .dist-info, of the resource
of interest.
:return: The absolute path where the resource is to be found.
"""
r = self.get_distinfo_resource('RESOURCES')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as resources_reader:
for relative, destination in resources_reader:
if relative == relative_path:
return destination
raise KeyError('no resource file with relative path %r '
'is installed' % relative_path)
def list_installed_files(self):
"""
Iterates over the ``RECORD`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: iterator of (path, hash, size)
"""
for result in self._get_records():
yield result
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = self.get_distinfo_file('RECORD')
logger.info('creating %s', record_path)
if dry_run:
return None
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
return record_path
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
base = os.path.dirname(self.path)
record_path = self.get_distinfo_file('RECORD')
for path, hash_value, size in self.list_installed_files():
if not os.path.isabs(path):
path = os.path.join(base, path)
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
elif os.path.isfile(path):
actual_size = str(os.path.getsize(path))
if size and actual_size != size:
mismatches.append((path, 'size', size, actual_size))
elif hash_value:
if '=' in hash_value:
hasher = hash_value.split('=', 1)[0]
else:
hasher = None
with open(path, 'rb') as f:
actual_hash = self.get_hash(f.read(), hasher)
if actual_hash != hash_value:
mismatches.append((path, 'hash', hash_value, actual_hash))
return mismatches
@cached_property
def shared_locations(self):
"""
A dictionary of shared locations whose keys are in the set 'prefix',
'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
The corresponding value is the absolute path of that category for
this distribution, and takes into account any paths selected by the
user at installation time (e.g. via command-line arguments). In the
case of the 'namespace' key, this would be a list of absolute paths
for the roots of namespace packages in this distribution.
The first time this property is accessed, the relevant information is
read from the SHARED file in the .dist-info directory.
"""
result = {}
shared_path = os.path.join(self.path, 'SHARED')
if os.path.isfile(shared_path):
with codecs.open(shared_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines:
key, value = line.split('=', 1)
if key == 'namespace':
result.setdefault(key, []).append(value)
else:
result[key] = value
return result
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return None
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path))
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns)
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return shared_path
def get_distinfo_resource(self, path):
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
finder = resources.finder_for_path(self.path)
if finder is None:
raise DistlibException('Unable to get a finder for %s' % self.path)
return finder.find(path)
def get_distinfo_file(self, path):
"""
Returns a path located under the ``.dist-info`` directory. Returns a
string representing the path.
:parameter path: a ``'/'``-separated path relative to the
``.dist-info`` directory or an absolute path;
If *path* is an absolute path and doesn't start
with the ``.dist-info`` directory path,
a :class:`DistlibException` is raised
:type path: str
:rtype: str
"""
# Check if it is an absolute path # XXX use relpath, add tests
if path.find(os.sep) >= 0:
# it's an absolute path?
distinfo_dirname, path = path.split(os.sep)[-2:]
if distinfo_dirname != self.path.split(os.sep)[-1]:
raise DistlibException('dist-info file %r does not belong to the %r %s '
'distribution' % (path, self.name, self.version))
# The file must be relative
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
return os.path.join(self.path, path)
def list_distinfo_files(self):
"""
Iterates over the ``RECORD`` entries and returns paths for each line if
the path is pointing to a file located in the ``.dist-info`` directory
or one of its subdirectories.
:returns: iterator of paths
"""
base = os.path.dirname(self.path)
for path, checksum, size in self._get_records():
# XXX add separator or use real relpath algo
if not os.path.isabs(path):
path = os.path.join(base, path)
if path.startswith(self.path):
yield path
def __eq__(self, other):
return (isinstance(other, InstalledDistribution) and self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
| InstalledDistribution |
python | getsentry__sentry | src/sentry/search/events/fields.py | {
"start": 34764,
"end": 35470
} | class ____(FunctionArg):
def __init__(self, name: str):
super().__init__(name)
def normalize(self, value: str, params: ParamsType, combinator: Combinator | None) -> datetime:
if not params or not params.get("start") or not params.get("end"):
raise InvalidFunctionArgument("function called without date range")
try:
ts = datetime.fromtimestamp(int(value), tz=timezone.utc)
except (OverflowError, ValueError):
raise InvalidFunctionArgument(f"{value} is not a timestamp")
if ts < params["start"] or ts > params["end"]:
raise InvalidFunctionArgument("timestamp outside date range")
return ts
| TimestampArg |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/supers.py | {
"start": 228,
"end": 523
} | class ____:
def __init__(self):
self.attribute = _test_source()
def f1(self):
_test_sink(self.attribute)
def f2(self, x):
_test_sink(x)
def f3(self):
return _test_source()
def f4(self):
return "1"
def f5(self, x):
pass
| A |
python | FactoryBoy__factory_boy | factory/django.py | {
"start": 6786,
"end": 6965
} | class ____(declarations.Transformer):
def __init__(self, password, transform=make_password, **kwargs):
super().__init__(password, transform=transform, **kwargs)
| Password |
python | pytest-dev__pytest-asyncio | docs/reference/markers/class_scoped_loop_with_fixture_strict_mode_example.py | {
"start": 96,
"end": 440
} | class ____:
loop: asyncio.AbstractEventLoop
@pytest_asyncio.fixture(loop_scope="class")
async def my_fixture(self):
TestClassScopedLoop.loop = asyncio.get_running_loop()
async def test_runs_is_same_loop_as_fixture(self, my_fixture):
assert asyncio.get_running_loop() is TestClassScopedLoop.loop
| TestClassScopedLoop |
python | milvus-io__pymilvus | pymilvus/client/types.py | {
"start": 38153,
"end": 39095
} | class ____:
"""
Represents the information of a database.
Atributes:
name (str): The name of the database.
properties (dict): The properties of the database.
Example:
DatabaseInfo(name="test_db", id=1, properties={"key": "value"})
"""
@property
def name(self) -> str:
return self._name
@property
def properties(self) -> Dict:
return self._properties
def __init__(self, info: Any) -> None:
self._name = info.db_name
self._properties = {}
for p in info.properties:
self.properties[p.key] = p.value
def __str__(self) -> str:
return f"DatabaseInfo(name={self.name}, properties={self.properties})"
def to_dict(self) -> Dict[str, Any]:
"""Converts the DatabaseInfo instance to a dictionary."""
result = {"name": self.name}
result.update(self.properties)
return result
| DatabaseInfo |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_table_column_count_to_be_between.py | {
"start": 2252,
"end": 13634
} | class ____(BatchExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
ExpectTableColumnCountToBeBetween is a \
Batch Expectation.
BatchExpectations are one of the most common types of Expectation.
They are evaluated for an entire Batch, and answer a semantic question about the Batch itself.
Args:
min_value (int or None): {MIN_VALUE_DESCRIPTION}
max_value (int or None): {MAX_VALUE_DESCRIPTION}
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable columns \
has no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable columns \
has no maximum.
See Also:
[ExpectTableColumnCountToEqual](https://greatexpectations.io/expectations/expect_table_column_count_to_equal)
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[13]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test test2
0 1.00 2
1 2.30 5
2 4.33 0
Code Examples:
Passing Case:
Input:
ExpectTableColumnCountToBeBetween(
min_value=1,
max_value=3
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": 2
}},
"meta": {{}},
"success": true
}}
Failing Case:
Input:
ExpectTableColumnCountToBeBetween(
min_value=3
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": 2
}},
"meta": {{}},
"success": false
}}
""" # noqa: E501 # FIXME CoP
min_value: Optional[Comparable] = pydantic.Field(description=MIN_VALUE_DESCRIPTION)
max_value: Optional[Comparable] = pydantic.Field(description=MAX_VALUE_DESCRIPTION)
library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = {
"maturity": "production",
"tags": ["core expectation", "table expectation"],
"contributors": [
"@great_expectations",
],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
metric_dependencies = ("table.column_count",)
success_keys = (
"min_value",
"max_value",
)
args_keys = (
"min_value",
"max_value",
)
class Config:
title = "Expect table column count to be between"
@staticmethod
def schema_extra(
schema: Dict[str, Any], model: Type[ExpectTableColumnCountToBeBetween]
) -> None:
BatchExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
@classmethod
@override
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("min_value", [RendererValueType.NUMBER, RendererValueType.DATETIME]),
("max_value", [RendererValueType.NUMBER, RendererValueType.DATETIME]),
("strict_min", RendererValueType.BOOLEAN),
("strict_max", RendererValueType.BOOLEAN),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
if not params.min_value and not params.max_value:
template_str = "May have any number of columns."
else:
at_least_str: str = "greater than or equal to"
if params.strict_min:
at_least_str = cls._get_strict_min_string(
renderer_configuration=renderer_configuration
)
at_most_str: str = "less than or equal to"
if params.strict_max:
at_most_str = cls._get_strict_max_string(
renderer_configuration=renderer_configuration
)
if params.min_value and params.max_value:
template_str = (
f"Must have {at_least_str} $min_value and {at_most_str} $max_value columns."
)
elif not params.min_value:
template_str = f"Must have {at_most_str} $max_value columns."
else:
template_str = f"Must have {at_least_str} $min_value columns."
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@override
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer( # type: ignore[override] # TODO: Fix this type ignore
cls,
configuration: ExpectationConfiguration,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
_ = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["min_value", "max_value", "strict_min", "strict_max"],
)
if params["min_value"] is None and params["max_value"] is None:
template_str = "May have any number of columns."
else:
at_least_str, at_most_str = handle_strict_min_max(params)
if params["min_value"] is not None and params["max_value"] is not None:
template_str = (
f"Must have {at_least_str} $min_value and {at_most_str} $max_value columns."
)
elif params["min_value"] is None:
template_str = f"Must have {at_most_str} $max_value columns."
elif params["max_value"] is None:
template_str = f"Must have {at_least_str} $min_value columns."
else:
raise ValueError("unresolvable template_str") # noqa: TRY003 # FIXME CoP
return [
RenderedStringTemplateContent(
content_block_type="string_template",
string_template={
"template": template_str,
"params": params,
"styling": styling,
},
)
]
@override
def _validate(
self,
metrics: Dict,
runtime_configuration: Optional[dict] = None,
execution_engine: Optional[ExecutionEngine] = None,
):
return self._validate_metric_value_between(
metric_name="table.column_count",
metrics=metrics,
runtime_configuration=runtime_configuration,
execution_engine=execution_engine,
)
| ExpectTableColumnCountToBeBetween |
python | jina-ai__jina | jina/logging/formatter.py | {
"start": 673,
"end": 1558
} | class ____(Formatter):
"""Format the log message as a JSON object so that it can be later used/parsed in browser with javascript."""
KEYS = {
'created',
'filename',
'funcName',
'levelname',
'lineno',
'msg',
'module',
'name',
'pathname',
'process',
'thread',
'processName',
'threadName',
'log_id',
} #: keys to extract from the log
def format(self, record: 'LogRecord'):
"""
Format the log message as a JSON object.
:param record: A LogRecord object.
:return:: LogRecord with JSON format.
"""
cr = copy(record)
cr.msg = re.sub(r'\u001b\[.*?[@-~]', '', str(cr.msg))
return json.dumps(
{k: getattr(cr, k) for k in self.KEYS if hasattr(cr, k)}, sort_keys=True
)
| JsonFormatter |
python | pytorch__pytorch | torch/_dynamo/variables/ctx_manager.py | {
"start": 33937,
"end": 36384
} | class ____(ContextWrappingVariable):
@staticmethod
def create(
func: torch.amp.autocast_mode.autocast,
args: Sequence[Any],
kwargs: dict[str, Any],
) -> "AutocastModeVariable":
assert func in [
torch.amp.autocast_mode.autocast,
torch.cuda.amp.autocast,
torch.cpu.amp.autocast,
]
# device_type : str,
# dtype : Optional[_dtype] = None,
# enabled : bool = True,
# cache_enabled : Optional[bool] = None):cache_enabled
bound_args = inspect.signature(func).bind(*args, **kwargs)
bound_args.apply_defaults()
target_values = []
kwargs.clear()
for key in ["device_type", "dtype", "enabled", "cache_enabled"]:
if key == "device_type" and func in [
torch.cuda.amp.autocast,
torch.cpu.amp.autocast,
]:
arg = "cuda" if func is torch.cuda.amp.autocast else "cpu"
else:
arg = bound_args.arguments[key]
if isinstance(arg, VariableTracker):
target_values.append(arg.as_python_constant())
else:
target_values.append(arg)
var = AutocastModeVariable(target_values, initial_values=None, **kwargs)
return var
def __init__(
self,
target_values: Sequence[Any],
initial_values: Optional[Any] = None,
**kwargs: Any,
) -> None:
super().__init__(
target_values=target_values, initial_values=initial_values, **kwargs
)
def exit(
self, tx: "InstructionTranslator", *args: VariableTracker
) -> VariableTracker:
self.cleanup_assert()
tx.output.create_node(
"call_function", torch.amp._exit_autocast, (self.proxy,), {}
)
return variables.ConstantVariable.create(None)
def enter(self, tx: "InstructionTranslator") -> VariableTracker:
ctx = torch.amp._enter_autocast(*self.target_values)
self.set_cleanup_hook(tx, lambda: torch.amp._exit_autocast(ctx))
self.proxy = tx.output.create_node(
"call_function", torch.amp._enter_autocast, (*self.target_values,), {}
)
return variables.ConstantVariable.create(None)
def module_name(self) -> str:
return "torch.amp.autocast_mode"
def fn_name(self) -> str:
return "autocast"
| AutocastModeVariable |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt_tests/cloud_v2/test_asset_decorator.py | {
"start": 1463,
"end": 2270
} | class ____(DagsterDbtTranslator):
# DagsterDbtTranslator doesn't have a `get_asset_spec` method yet.
def get_metadata(self, dbt_resource_props: Mapping[str, Any]) -> Mapping[str, Any]:
default_metadata = super().get_metadata(dbt_resource_props)
return {**default_metadata, "custom": "metadata"}
def test_asset_defs_with_custom_metadata(
workspace: DbtCloudWorkspace,
fetch_workspace_data_api_mocks: responses.RequestsMock,
) -> None:
@dbt_cloud_assets(workspace=workspace, dagster_dbt_translator=MyCustomTranslator())
def my_dbt_cloud_assets(): ...
assets_def_specs = list(my_dbt_cloud_assets.specs)
asset_spec = next(iter(assets_def_specs))
assert "custom" in asset_spec.metadata
assert asset_spec.metadata["custom"] == "metadata"
| MyCustomTranslator |
python | ansible__ansible | lib/ansible/galaxy/collection/gpg.py | {
"start": 5460,
"end": 5812
} | class ____(GpgBaseError):
"""No data has been found. Codes for WHAT are:
- 1 :: No armored data.
- 2 :: Expected a packet but did not find one.
- 3 :: Invalid packet found, this may indicate a non OpenPGP
message.
- 4 :: Signature expected but not found.
"""
what: str
@dataclass(frozen=True, slots=True)
| GpgNoData |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/nocover/test_drypython_returns.py | {
"start": 891,
"end": 954
} | class ____(Generic[_InstanceType, _TypeArgType1]):
pass
| KindN |
python | huggingface__transformers | src/transformers/models/roberta/tokenization_roberta.py | {
"start": 1059,
"end": 7805
} | class ____(TokenizersBackend):
r"""
Construct a RoBERTa tokenizer (backed by HuggingFace's tokenizers library). Based on Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import RobertaTokenizer
>>> tokenizer = RobertaTokenizer.from_pretrained("FacebookAI/roberta-base")
>>> tokenizer("Hello world")["input_ids"]
[0, 31414, 232, 2]
>>> tokenizer(" Hello world")["input_ids"]
[0, 20920, 232, 2]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
</Tip>
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
trim_offsets (`bool`, *optional*, defaults to `True`):
Whether the post processing step should trim offsets to avoid including whitespaces.
vocab (`dict`, *optional*):
Custom vocabulary dictionary. If not provided, vocabulary is loaded from vocab_file.
merges (`list`, *optional*):
Custom merges list. If not provided, merges are loaded from merges_file.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = None
def __init__(
self,
errors: str = "replace",
bos_token: str = "<s>",
eos_token: str = "</s>",
sep_token: str = "</s>",
cls_token: str = "<s>",
unk_token: str = "<unk>",
pad_token: str = "<pad>",
mask_token: str = "<mask>",
add_prefix_space: bool = False,
trim_offsets: bool = True,
vocab: Optional[dict] = None,
merges: Optional[list] = None,
**kwargs,
):
self.add_prefix_space = add_prefix_space
self.trim_offsets = trim_offsets
if vocab is not None:
self._vocab = (
{token: idx for idx, (token, _score) in enumerate(vocab)} if isinstance(vocab, list) else vocab
)
else:
self._vocab = {
str(pad_token): 0,
str(unk_token): 1,
str(cls_token): 2,
str(sep_token): 3,
str(mask_token): 4,
}
if merges is not None:
self._merges = [tuple(merge) if isinstance(merge, list) else merge for merge in merges]
else:
self._merges = []
self._tokenizer = Tokenizer(
BPE(
vocab=self._vocab,
merges=self._merges,
dropout=None,
continuing_subword_prefix="",
end_of_word_suffix="",
fuse_unk=False,
)
)
self._tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
self._tokenizer.decoder = decoders.ByteLevel()
self._tokenizer.post_processor = processors.RobertaProcessing(
sep=(str(sep_token), self._vocab.get(str(sep_token), 3)),
cls=(str(cls_token), self._vocab.get(str(cls_token), 2)),
add_prefix_space=add_prefix_space,
trim_offsets=trim_offsets,
)
tokenizer_object = self._tokenizer
super().__init__(
tokenizer_object=tokenizer_object,
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
trim_offsets=trim_offsets,
**kwargs,
)
__all__ = ["RobertaTokenizer"]
| RobertaTokenizer |
python | pytorch__pytorch | test/test_sympy_utils.py | {
"start": 33503,
"end": 34464
} | class ____(TestCase):
def test_expand_identity(self):
"""
Test removing an identity via expansion.
"""
x = sympy.Symbol("x")
arg = x + sympy.S.One
expr = Identity(arg)
expanded = expr.expand(identity=True)
self.assertEqual(expanded.count(Identity), 0)
self.assertEqual(expanded, arg)
def test_cast_identity_int(self):
num = 1
expr = Identity(num)
self.assertEqual(num, int(expr))
def test_cast_identity_float(self):
num = 1.1
expr = Identity(num)
self.assertEqual(num, float(expr))
def test_cast_identity_illegal(self):
sym = Identity(sympy.Symbol("x"))
self.assertRaises(TypeError, int, sym)
self.assertRaises(TypeError, float, sym)
tup = (0, 1, 2)
tup_I = Identity(tup)
self.assertRaises(TypeError, int, tup_I)
self.assertRaises(TypeError, float, tup_I)
| TestIdentity |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol9.py | {
"start": 686,
"end": 751
} | class ____(Protocol):
def method1(self) -> "ProtoA": ...
| ProtoA |
python | PrefectHQ__prefect | tests/runner/test_runner.py | {
"start": 7097,
"end": 8777
} | class ____:
async def test_runner_respects_limit_setting(self):
runner = Runner()
assert runner.limit == PREFECT_RUNNER_PROCESS_LIMIT.value()
runner = Runner(limit=50)
assert runner.limit == 50
with temporary_settings({PREFECT_RUNNER_PROCESS_LIMIT: 100}):
runner = Runner()
assert runner.limit == 100
async def test_runner_limit_can_be_none(self):
runner = Runner(limit=None)
assert runner.limit is None
# Be extra sure that the limiter is not initialized
assert runner._limiter is None
assert runner._acquire_limit_slot("foobar") is True
async def test_runner_respects_poll_setting(self):
runner = Runner()
assert runner.query_seconds == PREFECT_RUNNER_POLL_FREQUENCY.value()
runner = Runner(query_seconds=50)
assert runner.query_seconds == 50
with temporary_settings({PREFECT_RUNNER_POLL_FREQUENCY: 100}):
runner = Runner()
assert runner.query_seconds == 100
async def test_runner_respects_heartbeat_setting(self):
runner = Runner()
assert runner.heartbeat_seconds == PREFECT_RUNNER_HEARTBEAT_FREQUENCY.value()
assert runner.heartbeat_seconds is None
with pytest.raises(
ValueError, match="Heartbeat must be 30 seconds or greater."
):
Runner(heartbeat_seconds=29)
runner = Runner(heartbeat_seconds=50)
assert runner.heartbeat_seconds == 50
with temporary_settings({PREFECT_RUNNER_HEARTBEAT_FREQUENCY: 100}):
runner = Runner()
assert runner.heartbeat_seconds == 100
| TestInit |
python | scipy__scipy | scipy/signal/tests/test_fir_filter_design.py | {
"start": 5332,
"end": 13058
} | class ____:
"""Different author, different style, different tests..."""
def test_lowpass(self, xp):
width = 0.04
ntaps, beta = kaiserord(120, width)
cutoff = xp.asarray(0.5)
kwargs = dict(cutoff=cutoff, window=('kaiser', beta), scale=False)
taps = firwin(ntaps, **kwargs)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], xp.flip(taps)[:ntaps//2])
# Check the gain at a few samples where
# we know it should be approximately 0 or 1.
freq_samples = xp.asarray([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
freq_samples = _xp_copy_to_numpy(freq_samples)
freqs, response = freqz(_xp_copy_to_numpy(taps), worN=np.pi*freq_samples)
assert_array_almost_equal(
xp.abs(xp.asarray(response)),
xp.asarray([1.0, 1.0, 1.0, 0.0, 0.0, 0.0]), decimal=5
)
taps_str = firwin(ntaps, pass_zero='lowpass', **kwargs)
xp_assert_close(taps, taps_str)
def test_highpass(self, xp):
width = 0.04
ntaps, beta = kaiserord(120, width)
# Ensure that ntaps is odd.
ntaps |= 1
cutoff = xp.asarray(0.5)
kwargs = dict(cutoff=cutoff, window=('kaiser', beta), scale=False)
taps = firwin(ntaps, pass_zero=False, **kwargs)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], xp.flip(taps)[:ntaps//2])
# Check the gain at a few samples where
# we know it should be approximately 0 or 1.
freq_samples = xp.asarray([0.0, 0.25, 0.5 - width/2, 0.5 + width/2, 0.75, 1.0])
freq_samples = _xp_copy_to_numpy(freq_samples)
freqs, response = freqz(_xp_copy_to_numpy(taps), worN=np.pi*freq_samples)
assert_array_almost_equal(xp.abs(xp.asarray(response)),
xp.asarray([0.0, 0.0, 0.0, 1.0, 1.0, 1.0]), decimal=5)
taps_str = firwin(ntaps, pass_zero='highpass', **kwargs)
xp_assert_close(taps, taps_str)
def test_bandpass(self, xp):
width = 0.04
ntaps, beta = kaiserord(120, width)
kwargs = dict(
cutoff=xp.asarray([0.3, 0.7]), window=('kaiser', beta), scale=False
)
taps = firwin(ntaps, pass_zero=False, **kwargs)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], xp.flip(taps)[:ntaps//2])
# Check the gain at a few samples where
# we know it should be approximately 0 or 1.
freq_samples = xp.asarray([0.0, 0.2, 0.3 - width/2, 0.3 + width/2, 0.5,
0.7 - width/2, 0.7 + width/2, 0.8, 1.0])
freq_samples = _xp_copy_to_numpy(freq_samples)
freqs, response = freqz(_xp_copy_to_numpy(taps), worN=np.pi*freq_samples)
assert_array_almost_equal(xp.abs(xp.asarray(response)),
xp.asarray([0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]), decimal=5)
taps_str = firwin(ntaps, pass_zero='bandpass', **kwargs)
xp_assert_close(taps, taps_str)
def test_bandstop_multi(self, xp):
width = 0.04
ntaps, beta = kaiserord(120, width)
kwargs = dict(cutoff=xp.asarray([0.2, 0.5, 0.8]), window=('kaiser', beta),
scale=False)
taps = firwin(ntaps, **kwargs)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], xp.flip(taps)[:ntaps//2])
# Check the gain at a few samples where
# we know it should be approximately 0 or 1.
freq_samples = xp.asarray([0.0, 0.1, 0.2 - width/2, 0.2 + width/2, 0.35,
0.5 - width/2, 0.5 + width/2, 0.65,
0.8 - width/2, 0.8 + width/2, 0.9, 1.0])
freq_samples = _xp_copy_to_numpy(freq_samples)
freqs, response = freqz(_xp_copy_to_numpy(taps), worN=np.pi*freq_samples)
assert_array_almost_equal(
xp.abs(xp.asarray(response)),
xp.asarray([1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]),
decimal=5
)
taps_str = firwin(ntaps, pass_zero='bandstop', **kwargs)
xp_assert_close(taps, taps_str)
def test_fs_nyq(self, xp):
"""Test the fs and nyq keywords."""
nyquist = 1000
width = 40.0
relative_width = width/nyquist
ntaps, beta = kaiserord(120, relative_width)
taps = firwin(ntaps, cutoff=xp.asarray([300, 700]), window=('kaiser', beta),
pass_zero=False, scale=False, fs=2*nyquist)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], xp.flip(taps)[:ntaps//2])
# Check the gain at a few samples where
# we know it should be approximately 0 or 1.
freq_samples = xp.asarray([0.0, 200, 300 - width/2, 300 + width/2, 500,
700 - width/2, 700 + width/2, 800, 1000])
freq_samples = _xp_copy_to_numpy(freq_samples)
freqs, response = freqz(
_xp_copy_to_numpy(taps), worN=np.pi*freq_samples/nyquist
)
assert_array_almost_equal(xp.abs(xp.asarray(response)),
xp.asarray([0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]), decimal=5)
def test_array_cutoff(self, xp):
taps = firwin(3, xp.asarray([.1, .2]))
# smoke test against the value computed by scipy==1.5.2
xp_assert_close(
taps, xp.asarray([-0.00801395, 1.0160279, -0.00801395]), atol=1e-8
)
def test_bad_cutoff(self):
"""Test that invalid cutoff argument raises ValueError."""
# cutoff values must be greater than 0 and less than 1.
assert_raises(ValueError, firwin, 99, -0.5)
assert_raises(ValueError, firwin, 99, 1.5)
# Don't allow 0 or 1 in cutoff.
assert_raises(ValueError, firwin, 99, [0, 0.5])
assert_raises(ValueError, firwin, 99, [0.5, 1])
# cutoff values must be strictly increasing.
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.2])
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.5])
# Must have at least one cutoff value.
assert_raises(ValueError, firwin, 99, [])
# 2D array not allowed.
assert_raises(ValueError, firwin, 99, [[0.1, 0.2],[0.3, 0.4]])
# cutoff values must be less than nyq.
assert_raises(ValueError, firwin, 99, 50.0, fs=80)
assert_raises(ValueError, firwin, 99, [10, 20, 30], fs=50)
def test_even_highpass_raises_value_error(self):
"""Test that attempt to create a highpass filter with an even number
of taps raises a ValueError exception."""
assert_raises(ValueError, firwin, 40, 0.5, pass_zero=False)
assert_raises(ValueError, firwin, 40, [.25, 0.5])
def test_bad_pass_zero(self):
"""Test degenerate pass_zero cases."""
with assert_raises(ValueError, match="^Parameter pass_zero='foo' not in "):
firwin(41, 0.5, pass_zero='foo')
with assert_raises(ValueError, match="^Parameter pass_zero=1.0 not in "):
firwin(41, 0.5, pass_zero=1.)
for pass_zero in ('lowpass', 'highpass'):
with assert_raises(ValueError, match='cutoff must have one'):
firwin(41, [0.5, 0.6], pass_zero=pass_zero)
for pass_zero in ('bandpass', 'bandstop'):
with assert_raises(ValueError, match='must have at least two'):
firwin(41, [0.5], pass_zero=pass_zero)
def test_fs_validation(self):
with pytest.raises(ValueError, match="Sampling.*single scalar"):
firwin2(51, .5, 1, fs=np.array([10, 20]))
@make_xp_test_case(firwin2)
| TestFirWinMore |
python | ray-project__ray | python/ray/data/_internal/logical/operators/from_operators.py | {
"start": 501,
"end": 2656
} | class ____(LogicalOperator, SourceOperator, metaclass=abc.ABCMeta):
"""Abstract logical operator for `from_*`."""
def __init__(
self,
input_blocks: List[ObjectRef[Block]],
input_metadata: List[BlockMetadataWithSchema],
):
super().__init__(
name=self.__class__.__name__,
input_dependencies=[],
num_outputs=len(input_blocks),
)
assert len(input_blocks) == len(input_metadata), (
len(input_blocks),
len(input_metadata),
)
# `owns_blocks` is False because this op may be shared by multiple Datasets.
self._input_data = [
RefBundle(
[(input_blocks[i], input_metadata[i])],
owns_blocks=False,
schema=input_metadata[i].schema,
)
for i in range(len(input_blocks))
]
@property
def input_data(self) -> List[RefBundle]:
return self._input_data
def output_data(self) -> Optional[List[RefBundle]]:
return self._input_data
@functools.cached_property
def _cached_output_metadata(self) -> BlockMetadata:
return BlockMetadata(
num_rows=self._num_rows(),
size_bytes=self._size_bytes(),
input_files=None,
exec_stats=None,
)
def _num_rows(self):
if all(bundle.num_rows() is not None for bundle in self._input_data):
return sum(bundle.num_rows() for bundle in self._input_data)
else:
return None
def _size_bytes(self):
metadata = [m for bundle in self._input_data for m in bundle.metadata]
if all(m.size_bytes is not None for m in metadata):
return sum(m.size_bytes for m in metadata)
else:
return None
def infer_metadata(self) -> BlockMetadata:
return self._cached_output_metadata
def infer_schema(self):
return unify_ref_bundles_schema(self._input_data)
def is_lineage_serializable(self) -> bool:
# This operator isn't serializable because it contains ObjectRefs.
return False
| AbstractFrom |
python | getsentry__sentry | src/sentry/projectoptions/manager.py | {
"start": 956,
"end": 2822
} | class ____:
"""Project options used to be implemented in a relatively ad-hoc manner
in the past. The project manager still uses the functionality of the
project model and just dispatches to it.
Options can be used without declaring defaults, but if defaults are
declared they are returned without having to define a default at the
time of the option lookup.
"""
def __init__(self):
self.registry = {}
def lookup_well_known_key(self, key):
return self.registry.get(key)
def freeze_option_epoch(self, project, force=False):
# The options are frozen in a receiver hook for project saves.
# See `sentry.receivers.core.freeze_option_epoch_for_project`
if force or project.get_option("sentry:option-epoch") is None:
from .defaults import LATEST_EPOCH
project.update_option("sentry:option-epoch", LATEST_EPOCH)
def set(self, project, key, value):
from sentry.models.options.project_option import ProjectOption
return ProjectOption.objects.set_value(project, key, value)
def isset(self, project, key):
return project.get_option(key, default=Ellipsis) is not Ellipsis
def get(self, project, key, default=None, validate=None):
return project.get_option(key, default=default, validate=validate)
def delete(self, project, key):
from sentry.models.options.project_option import ProjectOption
return ProjectOption.objects.unset_value(project, key)
def register(self, key, default=None, epoch_defaults=None):
self.registry[key] = WellKnownProjectOption(
key=key, default=default, epoch_defaults=epoch_defaults
)
def all(self):
"""
Return an iterator for all keys in the registry.
"""
return self.registry.values()
| ProjectOptionsManager |
python | TheAlgorithms__Python | data_structures/binary_tree/binary_tree_path_sum.py | {
"start": 250,
"end": 504
} | class ____:
"""
A Node has value variable and pointers to Nodes to its left and right.
"""
def __init__(self, value: int) -> None:
self.value = value
self.left: Node | None = None
self.right: Node | None = None
| Node |
python | pydata__xarray | xarray/util/deprecation_helpers.py | {
"start": 5538,
"end": 7982
} | class ____:
"""Object that handles deprecation cycle for kwarg default values.
Similar to ReprObject
"""
_old: str
_new: str | None
_name: str
def __init__(self, *, name: str, old: str, new: str | None):
self._name = name
self._old = old
self._new = new
def __repr__(self) -> str:
return str(self._value)
def __eq__(self, other: Self | Any) -> bool:
return (
self._value == other._value
if isinstance(other, type(self))
else self._value == other
)
@property
def _value(self) -> str | None:
return self._new if OPTIONS["use_new_combine_kwarg_defaults"] else self._old
def __hash__(self) -> int:
return hash(self._value)
def __dask_tokenize__(self) -> object:
from dask.base import normalize_token
return normalize_token((type(self), self._value))
def warning_message(self, message: str, recommend_set_options: bool = True) -> str:
if recommend_set_options:
recommendation = (
" To opt in to new defaults and get rid of these warnings now "
"use `set_options(use_new_combine_kwarg_defaults=True) or "
f"set {self._name} explicitly."
)
else:
recommendation = (
f" The recommendation is to set {self._name} explicitly for this case."
)
return (
f"In a future version of xarray the default value for {self._name} will "
f"change from {self._name}={self._old!r} to {self._name}={self._new!r}. "
+ message
+ recommendation
)
def error_message(self) -> str:
return (
f" Error might be related to new default (`{self._name}={self._new!r}`). "
f"Previously the default was `{self._name}={self._old!r}`. "
f"The recommendation is to set {self._name!r} explicitly for this case."
)
_DATA_VARS_DEFAULT = CombineKwargDefault(name="data_vars", old="all", new=None)
_COORDS_DEFAULT = CombineKwargDefault(name="coords", old="different", new="minimal")
_COMPAT_CONCAT_DEFAULT = CombineKwargDefault(
name="compat", old="equals", new="override"
)
_COMPAT_DEFAULT = CombineKwargDefault(name="compat", old="no_conflicts", new="override")
_JOIN_DEFAULT = CombineKwargDefault(name="join", old="outer", new="exact")
| CombineKwargDefault |
python | agronholm__apscheduler | tests/test_marshalling.py | {
"start": 569,
"end": 619
} | class ____(DummyClass):
pass
| InheritedDummyClass |
python | optuna__optuna | optuna/samplers/_lazy_random_state.py | {
"start": 57,
"end": 729
} | class ____:
"""Lazy Random State class.
This is a class to initialize the random state just before use to prevent
duplication of the same random state when deepcopy is applied to the instance of sampler.
"""
def __init__(self, seed: int | None = None) -> None:
self._rng: np.random.RandomState | None = None
if seed is not None:
self.rng.seed(seed=seed)
def _set_rng(self) -> None:
self._rng = np.random.RandomState()
@property
def rng(self) -> np.random.RandomState:
if self._rng is None:
self._set_rng()
assert self._rng is not None
return self._rng
| LazyRandomState |
python | viewflow__viewflow | viewflow/workflow/flow/views/list.py | {
"start": 4901,
"end": 5547
} | class ____(WorkflowTaskListView):
"""List of current user assigned tasks from all viewset registered flows."""
columns = ("task_id", "flow_task", "brief", "process_brief", "created")
bulk_actions = (
Action(name=_("Unassign selected tasks"), viewname="tasks_unassign"),
)
title = _("Inbox")
filterset_class = filters.FlowUserTaskListFilter
@viewprop
def queryset(self):
"""List of tasks assigned to the current user."""
return self.model._default_manager.inbox(self.flow_classes, self.request.user)
def created(self, task):
return timesince(task.created)
| WorkflowInboxListView |
python | plotly__plotly.py | plotly/graph_objs/scatterpolargl/marker/colorbar/_tickfont.py | {
"start": 233,
"end": 9989
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterpolargl.marker.colorbar"
_path_str = "scatterpolargl.marker.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterpolargl
.marker.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterpolargl.marker.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolargl.marker.colorbar.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | h5py__h5py | h5py/_hl/base.py | {
"start": 8297,
"end": 11537
} | class ____(CommonStateObject):
"""
Base class for high-level interface objects.
"""
@property
def file(self):
""" Return a File instance associated with this object """
from . import files
with phil:
return files.File(self.id)
@property
@with_phil
def name(self):
""" Return the full name of this object. None if anonymous. """
return self._d(h5i.get_name(self.id))
@property
@with_phil
def parent(self):
"""Return the parent group of this object.
This is always equivalent to obj.file[posixpath.dirname(obj.name)].
ValueError if this object is anonymous.
"""
if self.name is None:
raise ValueError("Parent of an anonymous object is undefined")
return self.file[posixpath.dirname(self.name)]
@property
@with_phil
def id(self):
""" Low-level identifier appropriate for this object """
return self._id
@property
@with_phil
def ref(self):
""" An (opaque) HDF5 reference to this object """
return h5r.create(self.id, b'.', h5r.OBJECT)
@property
@with_phil
def regionref(self):
"""Create a region reference (Datasets only).
The syntax is regionref[<slices>]. For example, dset.regionref[...]
creates a region reference in which the whole dataset is selected.
Can also be used to determine the shape of the referenced dataset
(via .shape property), or the shape of the selection (via the
.selection property).
"""
return _RegionProxy(self)
@property
def attrs(self):
""" Attributes attached to this object """
from . import attrs
with phil:
return attrs.AttributeManager(self)
@with_phil
def __init__(self, oid):
""" Setup this object, given its low-level identifier """
self._id = oid
@with_phil
def __hash__(self):
return hash(self.id)
@with_phil
def __eq__(self, other):
if hasattr(other, 'id'):
return self.id == other.id
return NotImplemented
def __bool__(self):
with phil:
return bool(self.id)
def __getnewargs__(self):
"""Disable pickle.
Handles for HDF5 objects can't be reliably deserialised, because the
recipient may not have access to the same files. So we do this to
fail early.
If you really want to pickle h5py objects and can live with some
limitations, look at the h5pickle project on PyPI.
"""
raise TypeError("h5py objects cannot be pickled")
def __getstate__(self):
# Pickle protocols 0 and 1 use this instead of __getnewargs__
raise TypeError("h5py objects cannot be pickled")
# --- Dictionary-style interface ----------------------------------------------
# To implement the dictionary-style interface from groups and attributes,
# we inherit from the appropriate abstract base classes in collections.
#
# All locking is taken care of by the subclasses.
# We have to override ValuesView and ItemsView here because Group and
# AttributeManager can only test for key names.
| HLObject |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 13476,
"end": 13819
} | class ____(VegaLiteSchema):
"""
Root schema wrapper.
A Vega-Lite top-level specification. This is the root class for all Vega-Lite
specifications. (The json schema is generated from this type.)
"""
_schema = VegaLiteSchema._rootschema
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| Root |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/property_with_parameters.py | {
"start": 730,
"end": 1063
} | class ____:
@property
def attribute_var_args(self, *args): # [property-with-parameters]
return sum(args)
@property
def attribute_var_kwargs(self, **kwargs): #[property-with-parameters]
return {key: value * 2 for key, value in kwargs.items()}
from functools import cached_property
| VariadicParameters |
python | ApeWorX__ape | src/ape/contracts/base.py | {
"start": 64359,
"end": 66380
} | class ____:
"""
A class that bridges contract containers in a namespace.
For example, if you have an interface structure like this::
contracts:
accounts:
- interface.json
mocks:
- interface.json
You can interact with them like this::
account_interface = project.accounts.interface
mock_interface = project.mocks.interface
"""
def __init__(self, name: str, contracts: list[ContractContainer]):
self.name = name
self.contracts = contracts
@log_instead_of_fail(default="<ContractNamespace>")
def __repr__(self) -> str:
return f"<{self.name}>"
@only_raise_attribute_error
def __getattr__(self, item: str) -> Union[ContractContainer, "ContractNamespace"]:
"""
Access the next contract container or namespace.
Args:
item (str): The name of the next node.
Returns:
Union[:class:`~ape.contracts.base.ContractContainer`,
:class:`~ape.contracts.base.ContractNamespace`]
"""
_assert_not_ipython_check(item)
def _get_name(cc: ContractContainer) -> str:
return cc.contract_type.name or ""
for contract in self.contracts:
search_contract_name = _get_name(contract)
search_name = (
search_contract_name.replace(f"{self.name}.", "") if search_contract_name else None
)
if not search_name:
continue
elif search_name == item:
return contract
elif "." in search_name:
next_node = search_name.partition(".")[0]
if next_node != item:
continue
subname = f"{self.name}.{next_node}"
subcontracts = [c for c in self.contracts if _get_name(c).startswith(subname)]
return ContractNamespace(subname, subcontracts)
return self.__getattribute__(item)
| ContractNamespace |
python | pandas-dev__pandas | pandas/tests/indexes/timedeltas/methods/test_astype.py | {
"start": 262,
"end": 6331
} | class ____:
def test_astype_object(self):
idx = timedelta_range(start="1 days", periods=4, freq="D", name="idx")
expected_list = [
Timedelta("1 days"),
Timedelta("2 days"),
Timedelta("3 days"),
Timedelta("4 days"),
]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name="idx")
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype_object_with_nat(self):
idx = TimedeltaIndex(
[timedelta(days=1), timedelta(days=2), NaT, timedelta(days=4)], name="idx"
)
expected_list = [
Timedelta("1 days"),
Timedelta("2 days"),
NaT,
Timedelta("4 days"),
]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name="idx")
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype(self, using_infer_string):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, "NaT", NaT, np.nan], name="idx")
result = idx.astype(object)
expected = Index(
[Timedelta("1 days 03:46:40")] + [NaT] * 3, dtype=object, name="idx"
)
tm.assert_index_equal(result, expected)
result = idx.astype(np.int64)
expected = Index(
[100000000000000] + [-9223372036854775808] * 3, dtype=np.int64, name="idx"
)
tm.assert_index_equal(result, expected)
result = idx.astype(str)
if using_infer_string:
expected = Index(
[str(x) if x is not NaT else None for x in idx], name="idx", dtype="str"
)
else:
expected = Index([str(x) for x in idx], name="idx", dtype=object)
tm.assert_index_equal(result, expected)
rng = timedelta_range("1 days", periods=10)
result = rng.astype("i8")
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(rng.asi8, result.values)
def test_astype_uint(self):
arr = timedelta_range("1h", periods=2)
with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"):
arr.astype("uint64")
with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"):
arr.astype("uint32")
def test_astype_timedelta64(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, "NaT", NaT, np.nan])
msg = (
r"Cannot convert from timedelta64\[ns\] to timedelta64. "
"Supported resolutions are 's', 'ms', 'us', 'ns'"
)
with pytest.raises(ValueError, match=msg):
idx.astype("timedelta64")
result = idx.astype("timedelta64[ns]")
tm.assert_index_equal(result, idx)
assert result is not idx
result = idx.astype("timedelta64[ns]", copy=False)
tm.assert_index_equal(result, idx)
assert result is idx
def test_astype_to_td64d_raises(self, index_or_series):
# We don't support "D" reso
scalar = Timedelta(days=31)
td = index_or_series(
[scalar, scalar, scalar + timedelta(minutes=5, seconds=3), NaT],
dtype="m8[ns]",
)
msg = (
r"Cannot convert from timedelta64\[ns\] to timedelta64\[D\]. "
"Supported resolutions are 's', 'ms', 'us', 'ns'"
)
with pytest.raises(ValueError, match=msg):
td.astype("timedelta64[D]")
def test_astype_ms_to_s(self, index_or_series):
scalar = Timedelta(days=31)
td = index_or_series(
[scalar, scalar, scalar + timedelta(minutes=5, seconds=3), NaT],
dtype="m8[ns]",
)
exp_values = np.asarray(td).astype("m8[s]")
exp_tda = TimedeltaArray._simple_new(exp_values, dtype=exp_values.dtype)
expected = index_or_series(exp_tda)
assert expected.dtype == "m8[s]"
result = td.astype("timedelta64[s]")
tm.assert_equal(result, expected)
def test_astype_freq_conversion(self):
# pre-2.0 td64 astype converted to float64. now for supported units
# (s, ms, us, ns) this converts to the requested dtype.
# This matches TDA and Series
tdi = timedelta_range("1 Day", periods=30)
res = tdi.astype("m8[s]")
exp_values = np.asarray(tdi).astype("m8[s]")
exp_tda = TimedeltaArray._simple_new(
exp_values, dtype=exp_values.dtype, freq=tdi.freq
)
expected = Index(exp_tda)
assert expected.dtype == "m8[s]"
tm.assert_index_equal(res, expected)
# check this matches Series and TimedeltaArray
res = tdi._data.astype("m8[s]")
tm.assert_equal(res, expected._values)
res = tdi.to_series().astype("m8[s]")
tm.assert_equal(res._values, expected._values._with_freq(None))
@pytest.mark.parametrize("dtype", [float, "datetime64", "datetime64[ns]"])
def test_astype_raises(self, dtype):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, "NaT", NaT, np.nan])
msg = "Cannot cast TimedeltaIndex to dtype"
with pytest.raises(TypeError, match=msg):
idx.astype(dtype)
def test_astype_category(self):
obj = timedelta_range("1h", periods=2, freq="h")
result = obj.astype("category")
expected = pd.CategoricalIndex([Timedelta("1h"), Timedelta("2h")])
tm.assert_index_equal(result, expected)
result = obj._data.astype("category")
expected = expected.values
tm.assert_categorical_equal(result, expected)
def test_astype_array_fallback(self):
obj = timedelta_range("1h", periods=2)
result = obj.astype(bool)
expected = Index(np.array([True, True]))
tm.assert_index_equal(result, expected)
result = obj._data.astype(bool)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
| TestTimedeltaIndex |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-qianfan/llama_index/llms/qianfan/base.py | {
"start": 3942,
"end": 15360
} | class ____(CustomLLM):
"""
The LLM supported by Baidu Intelligent Cloud's QIANFAN LLM Platform.
"""
access_key: str = Field(
description="The Access Key obtained from the Security Authentication Center of Baidu Intelligent Cloud Console."
)
secret_key: str = Field(description="The Secret Key paired with the Access Key.")
model_name: str = Field(description="The name of the model service.")
endpoint_url: str = Field(description="The chat endpoint URL of the model service.")
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW, description="The context window size."
)
llm_type: APIType = Field(default="chat", description="The LLM type.")
_client = PrivateAttr()
def __init__(
self,
access_key: str,
secret_key: str,
model_name: str,
endpoint_url: str,
context_window: int,
llm_type: APIType = "chat",
) -> None:
"""
Initialize a Qianfan LLM instance.
:param access_key: The Access Key obtained from the Security Authentication Center
of Baidu Intelligent Cloud Console.
:param secret_key: The Secret Key paired with the Access Key.
:param model_name: The name of the model service. For example: ERNIE-4.0-8K.
:param endpoint_url: The chat endpoint URL of the model service.
For example: https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro .
:param context_windows: The context window size. for example: 8192.
:param llm_type: The LLM type. Currently, only the chat type is supported.
"""
if llm_type != "chat":
raise NotImplementedError("Only the chat type is supported.")
super().__init__(
model_name=model_name,
endpoint_url=endpoint_url,
context_window=context_window,
access_key=access_key,
secret_key=secret_key,
llm_type=llm_type,
)
self._client = Client(access_key, secret_key)
@classmethod
def from_model_name(
cls,
access_key: str,
secret_key: str,
model_name: str,
context_window: int,
):
"""
Initialize a Qianfan LLM instance. Then query more parameters based on the model name.
:param access_key: The Access Key obtained from the Security Authentication Center
of Baidu Intelligent Cloud Console.
:param secret_key: The Secret Key paired with the Access Key.
:param model_name: The name of the model service. For example: ERNIE-4.0-8K.
:param context_windows: The context window size. for example: 8192.
"""
service_list = get_service_list(access_key, secret_key, ["chat"])
try:
service = next(
service for service in service_list if service.name == model_name
)
except StopIteration:
raise NameError(f"not found {model_name}")
return cls(
access_key=access_key,
secret_key=secret_key,
model_name=model_name,
endpoint_url=service.url,
context_window=context_window,
llm_type=service.api_type,
)
@classmethod
async def afrom_model_name(
cls,
access_key: str,
secret_key: str,
model_name: str,
context_window: int,
):
"""
Initialize a Qianfan LLM instance. Then asynchronously query more parameters based on the model name.
:param access_key: The Access Key obtained from the Security Authentication Center of
Baidu Intelligent Cloud Console.
:param secret_key: The Secret Key paired with the Access Key.
:param model_name: The name of the model service. For example: ERNIE-4.0-8K.
:param context_windows: The context window size. for example: 8192.
The LLMs developed by Baidu all carry context window size in their names.
"""
service_list = await aget_service_list(access_key, secret_key, ["chat"])
try:
service = next(
service for service in service_list if service.name == model_name
)
except StopIteration:
raise NameError(f"not found {model_name}")
return cls(
access_key=access_key,
secret_key=secret_key,
model_name=model_name,
endpoint_url=service.url,
context_window=context_window,
llm_type=service.api_type,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "Qianfan_LLM"
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return LLMMetadata(
context_window=self.context_window,
is_chat_model=self.llm_type == "chat",
model_name=self.model_name,
)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
"""
Request a chat.
:param messages: The chat message list. The last message is the current request,
and the previous messages are the historical chat information. The number of
members must be odd, and the role value of the odd-numbered messages must be
"user", while the role value of the even-numbered messages must be "assistant".
:return: The ChatResponse object.
"""
request = build_chat_request(stream=False, messages=messages, **kwargs)
resp_dict = self._client.post(self.endpoint_url, json=request.dict())
return parse_chat_response(resp_dict)
@llm_chat_callback()
async def achat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponse:
"""
Asynchronous request for a chat.
:param messages: The chat message list. The last message is the current request,
and the previous messages are the historical chat information. The number of
members must be odd, and the role value of the odd-numbered messages must be
"user", while the role value of the even-numbered messages must be "assistant".
:return: The ChatResponse object.
"""
request = build_chat_request(stream=False, messages=messages, **kwargs)
resp_dict = await self._client.apost(self.endpoint_url, json=request.dict())
return parse_chat_response(resp_dict)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
"""
Request a chat, and the response is returned in a stream.
:param messages: The chat message list. The last message is the current request,
and the previous messages are the historical chat information. The number of
members must be odd, and the role value of the odd-numbered messages must be
"user", while the role value of the even-numbered messages must be "assistant".
:return: A ChatResponseGen object, which is a generator of ChatResponse.
"""
request = build_chat_request(stream=True, messages=messages, **kwargs)
def gen():
resp_dict_iter = self._client.post_reply_stream(
self.endpoint_url, json=request.dict()
)
yield from parse_stream_chat_response(resp_dict_iter)
return gen()
@llm_chat_callback()
async def astream_chat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponseAsyncGen:
"""
Asynchronous request a chat, and the response is returned in a stream.
:param messages: The chat message list. The last message is the current request,
and the previous messages are the historical chat information. The number of
members must be odd, and the role value of the odd-numbered messages must be
"user", while the role value of the even-numbered messages must be "assistant".
:return: A ChatResponseAsyncGen object, which is a asynchronous generator of ChatResponse.
"""
request = build_chat_request(stream=True, messages=messages, **kwargs)
async def gen():
resp_dict_iter = self._client.apost_reply_stream(
self.endpoint_url, json=request.dict()
)
async for part in aparse_stream_chat_response(resp_dict_iter):
yield part
return gen()
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
"""
Request to complete a message that begins with the specified prompt.
The LLM developed by Baidu does not support the complete function.
Here use a converter to convert the chat function to a complete function.
:param prompt: The prompt message at the beginning of the completed content.
:return: CompletionResponse.
"""
complete_fn = chat_to_completion_decorator(self.chat)
return complete_fn(prompt, **kwargs)
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
"""
Asynchronous request to complete a message that begins with the specified prompt.
The LLM developed by Baidu does not support the complete function.
Here use a converter to convert the chat function to a complete function.
:param prompt: The prompt message at the beginning of the completed content.
:return: A CompletionResponse object.
"""
complete_fn = achat_to_completion_decorator(self.achat)
return await complete_fn(prompt, **kwargs)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
"""
Request to complete a message that begins with the specified prompt,
and the response is returned in a stream.
The LLM developed by Baidu does not support the complete function.
Here use a converter to convert the chat function to a complete function.
:param prompt: The prompt message at the beginning of the completed content.
:return: A CompletionResponseGen object.
"""
complete_fn = stream_chat_to_completion_decorator(self.stream_chat)
return complete_fn(prompt, **kwargs)
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
"""
Asynchronous request to complete a message that begins with the specified prompt,
and the response is returned in a stream.
The LLM developed by Baidu does not support the complete function.
Here use a converter to convert the chat function to a complete function.
:param prompt: The prompt message at the beginning of the completed content.
:return: A CompletionResponseAsyncGen object.
"""
complete_fn = astream_chat_to_completion_decorator(self.astream_chat)
return await complete_fn(prompt, **kwargs)
| Qianfan |
python | spack__spack | lib/spack/spack/package_base.py | {
"start": 19929,
"end": 95350
} | class ____(WindowsRPath, PackageViewMixin, metaclass=PackageMeta):
"""This is the universal base class for all Spack packages.
At its core, a package consists of a set of software to be installed. A package may focus on a
piece of software and its associated software dependencies or it may simply be a set, or
bundle, of software. The former requires defining how to fetch, verify (via, e.g., ``sha256``),
build, and install that software and the packages it depends on, so that dependencies can be
installed along with the package itself. The latter, sometimes referred to as a "no-source"
package, requires only defining the packages to be built.
There are two main parts of a Spack package:
1. **The package class**. Classes contain *directives*, which are functions such as
:py:func:`spack.package.version`, :py:func:`spack.package.patch`, and
:py:func:`spack.package.depends_on`, that store metadata on the package class. Directives
provide the constraints that are used as input to the concretizer.
2. **Package instances**. Once instantiated with a concrete spec, a package can be passed to
the :py:class:`spack.installer.PackageInstaller`. It calls methods like :meth:`do_stage` on
the package instance, and it uses those to drive user-implemented methods like ``def patch``
and install phases like ``def configure`` and ``def install``.
Packages are imported from package repositories (see :py:mod:`spack.repo`).
For most use cases, package creators typically just add attributes like ``homepage`` and, for
a code-based package, ``url``, or installation phases such as ``install()``.
There are many custom ``PackageBase`` subclasses in the ``spack_repo.builtin.build_systems``
package that make things even easier for specific build systems.
.. note::
Many methods and attributes that appear to be public interface are not meant to be
overridden by packagers. They are "final", but we currently have not adopted the ``@final``
decorator in the Spack codebase. For example, the ``do_*`` functions are intended only to be
called internally by Spack commands. These aren't for package writers to override, and
doing so may break the functionality of the ``PackageBase`` class."""
compiler = DeprecatedCompiler()
#: Class level dictionary populated by :func:`~spack.directives.version` directives
versions: dict
#: Class level dictionary populated by :func:`~spack.directives.resource` directives
resources: Dict[spack.spec.Spec, List[Resource]]
#: Class level dictionary populated by :func:`~spack.directives.depends_on` and
#: :func:`~spack.directives.extends` directives
dependencies: Dict[spack.spec.Spec, Dict[str, spack.dependency.Dependency]]
#: Class level dictionary populated by :func:`~spack.directives.extends` directives
extendees: Dict[str, Tuple[spack.spec.Spec, spack.spec.Spec]]
#: Class level dictionary populated by :func:`~spack.directives.conflicts` directives
conflicts: Dict[spack.spec.Spec, List[Tuple[spack.spec.Spec, Optional[str]]]]
#: Class level dictionary populated by :func:`~spack.directives.requires` directives
requirements: Dict[
spack.spec.Spec, List[Tuple[Tuple[spack.spec.Spec, ...], str, Optional[str]]]
]
#: Class level dictionary populated by :func:`~spack.directives.provides` directives
provided: Dict[spack.spec.Spec, Set[spack.spec.Spec]]
#: Class level dictionary populated by :func:`~spack.directives.provides` directives
provided_together: Dict[spack.spec.Spec, List[Set[str]]]
#: Class level dictionary populated by :func:`~spack.directives.patch` directives
patches: Dict[spack.spec.Spec, List[spack.patch.Patch]]
#: Class level dictionary populated by :func:`~spack.directives.variant` directives
variants: Dict[spack.spec.Spec, Dict[str, spack.variant.Variant]]
#: Class level dictionary populated by :func:`~spack.directives.license` directives
licenses: Dict[spack.spec.Spec, str]
#: Class level dictionary populated by :func:`~spack.directives.can_splice` directives
splice_specs: Dict[spack.spec.Spec, Tuple[spack.spec.Spec, Union[None, str, List[str]]]]
#: Class level dictionary populated by :func:`~spack.directives.redistribute` directives
disable_redistribute: Dict[spack.spec.Spec, DisableRedistribute]
#: Must be defined as a fallback for old specs that don't have the ``build_system`` variant
default_buildsystem: str
#: Use :attr:`~spack.package_base.PackageBase.default_buildsystem` instead of this attribute,
#: which is deprecated
legacy_buildsystem: str
#: Used when reporting the build system to users
build_system_class: str = "PackageBase"
#: By default, packages are not virtual
#: Virtual packages override this attribute
virtual: bool = False
#: Most Spack packages are used to install source or binary code while
#: those that do not can be used to install a set of other Spack packages.
has_code: bool = True
#: By default we build in parallel. Subclasses can override this.
parallel: bool = True
#: By default do not run tests within package's install()
run_tests: bool = False
#: Most packages are NOT extendable. Set to True if you want extensions.
extendable: bool = False
#: When True, add RPATHs for the entire DAG. When False, add RPATHs only
#: for immediate dependencies.
transitive_rpaths: bool = True
#: List of shared objects that should be replaced with a different library at
#: runtime. Typically includes stub libraries like ``libcuda.so``. When linking
#: against a library listed here, the dependent will only record its soname
#: or filename, not its absolute path, so that the dynamic linker will search
#: for it. Note: accepts both file names and directory names, for example
#: ``["libcuda.so", "stubs"]`` will ensure ``libcuda.so`` and all libraries in the
#: ``stubs`` directory are not bound by path.
non_bindable_shared_objects: List[str] = []
#: List of fnmatch patterns of library file names (specifically DT_NEEDED entries) that are not
#: expected to be locatable in RPATHs. Generally this is a problem, and Spack install with
#: config:shared_linking:strict will cause install failures if such libraries are found.
#: However, in certain cases it can be hard if not impossible to avoid accidental linking
#: against system libraries; until that is resolved, this attribute can be used to suppress
#: errors.
unresolved_libraries: List[str] = []
#: List of prefix-relative file paths (or a single path). If these do
#: not exist after install, or if they exist but are not files,
#: sanity checks fail.
sanity_check_is_file: List[str] = []
#: List of prefix-relative directory paths (or a single path). If
#: these do not exist after install, or if they exist but are not
#: directories, sanity checks will fail.
sanity_check_is_dir: List[str] = []
#: Boolean. Set to ``True`` for packages that require a manual download.
#: This is currently used by package sanity tests and generation of a
#: more meaningful fetch failure error.
manual_download: bool = False
#: Set of additional options used when fetching package versions.
fetch_options: Dict[str, Any] = {}
#
# Set default licensing information
#
#: If set to ``True``, this software requires a license.
#: If set to ``False``, all of the ``license_*`` attributes will
#: be ignored. Defaults to ``False``.
license_required: bool = False
#: Contains the symbol used by the license manager to denote
#: a comment. Defaults to ``#``.
license_comment: str = "#"
#: These are files that the software searches for when
#: looking for a license. All file paths must be relative to the
#: installation directory. More complex packages like Intel may require
#: multiple licenses for individual components. Defaults to the empty list.
license_files: List[str] = []
#: Environment variables that can be set to tell the
#: software where to look for a license if it is not in the usual location.
#: Defaults to the empty list.
license_vars: List[str] = []
#: A URL pointing to license setup instructions for the software.
#: Defaults to the empty string.
license_url: str = ""
#: Verbosity level, preserved across installs.
_verbose = None
#: Package homepage where users can find more information about the package
homepage: ClassProperty[Optional[str]] = None
#: Default list URL (place to find available versions)
list_url: ClassProperty[Optional[str]] = None
#: Link depth to which list_url should be searched for new versions
list_depth: int = 0
#: List of GitHub usernames of package maintainers.
#: Do not include @ here in order not to unnecessarily ping the users.
maintainers: List[str] = []
#: Set to ``True`` to indicate the stand-alone test requires a compiler.
#: It is used to ensure a compiler and build dependencies like ``cmake``
#: are available to build a custom test code.
test_requires_compiler: bool = False
#: TestSuite instance used to manage stand-alone tests for 1+ specs.
test_suite: Optional[Any] = None
def __init__(self, spec: spack.spec.Spec) -> None:
# this determines how the package should be built.
self.spec = spec
# Allow custom staging paths for packages
self.path = None
# Keep track of whether or not this package was installed from
# a binary cache.
self.installed_from_binary_cache = False
# Ensure that only one of these two attributes are present
if getattr(self, "url", None) and getattr(self, "urls", None):
msg = "a package can have either a 'url' or a 'urls' attribute"
msg += " [package '{0.name}' defines both]"
raise ValueError(msg.format(self))
# init internal variables
self._stage: Optional[stg.StageComposite] = None
# need to track patch stages separately, in order to apply them
self._patch_stages: List[stg.Stage] = []
self._fetcher = None
self._tester: Optional[Any] = None
# Set up timing variables
self._fetch_time = 0.0
super().__init__()
def __getitem__(self, key: str) -> "PackageBase":
return self.spec[key].package
@classmethod
def dependency_names(cls):
return _subkeys(cls.dependencies)
@classmethod
def dependencies_by_name(cls, when: bool = False):
return _by_subkey(cls.dependencies, when=when)
# Accessors for variants
# External code working with Variants should go through the methods below
@classmethod
def variant_names(cls) -> List[str]:
return _subkeys(cls.variants)
@classmethod
def has_variant(cls, name) -> bool:
return _has_subkey(cls.variants, name)
@classmethod
def num_variant_definitions(cls) -> int:
"""Total number of variant definitions in this class so far."""
return _num_definitions(cls.variants)
@classmethod
def variant_definitions(cls, name: str) -> List[Tuple[spack.spec.Spec, spack.variant.Variant]]:
"""Iterator over (when_spec, Variant) for all variant definitions for a particular name."""
return _definitions(cls.variants, name)
@classmethod
def variant_items(cls) -> Iterable[Tuple[spack.spec.Spec, Dict[str, spack.variant.Variant]]]:
"""Iterate over ``cls.variants.items()`` with overridden definitions removed."""
# Note: This is quadratic in the average number of variant definitions per name.
# That is likely close to linear in practice, as there are few variants with
# multiple definitions (but it matters when they are there).
exclude = {
name: [id(vdef) for _, vdef in cls.variant_definitions(name)]
for name in cls.variant_names()
}
for when, variants_by_name in cls.variants.items():
filtered_variants_by_name = {
name: vdef for name, vdef in variants_by_name.items() if id(vdef) in exclude[name]
}
if filtered_variants_by_name:
yield when, filtered_variants_by_name
def get_variant(self, name: str) -> spack.variant.Variant:
"""Get the highest precedence variant definition matching this package's spec.
Arguments:
name: name of the variant definition to get
"""
try:
highest_to_lowest = reversed(self.variant_definitions(name))
return next(vdef for when, vdef in highest_to_lowest if self.spec.satisfies(when))
except StopIteration:
raise ValueError(f"No variant '{name}' on spec: {self.spec}")
@classmethod
def validate_variant_names(self, spec: spack.spec.Spec):
"""Check that all variant names on Spec exist in this package.
Raises ``UnknownVariantError`` if invalid variants are on the spec.
"""
names = self.variant_names()
for v in spec.variants:
if v not in names:
raise spack.variant.UnknownVariantError(
f"No such variant '{v}' in package {self.name}", [v]
)
@classproperty
def package_dir(cls):
"""Directory where the package.py file lives."""
return os.path.abspath(os.path.dirname(cls.module.__file__))
@classproperty
def module(cls):
"""Module instance that this package class is defined in.
We use this to add variables to package modules. This makes
install() methods easier to write (e.g., can call configure())
"""
return sys.modules[cls.__module__]
@classproperty
def namespace(cls):
"""Spack namespace for the package, which identifies its repo."""
return spack.repo.namespace_from_fullname(cls.__module__)
@classproperty
def fullname(cls):
"""Name of this package, including the namespace"""
return "%s.%s" % (cls.namespace, cls.name)
@classproperty
def fullnames(cls):
"""Fullnames for this package and any packages from which it inherits."""
fullnames = []
for base in cls.__mro__:
if not spack.repo.is_package_module(base.__module__):
break
fullnames.append(base.fullname)
return fullnames
@classproperty
def name(cls):
"""The name of this package."""
if cls._name is None:
# We cannot know the exact package API version, but we can distinguish between v1
# v2 based on the module. We don't want to figure out the exact package API version
# since it requires parsing the repo.yaml.
module = cls.__module__
if module.startswith(spack.repo.PKG_MODULE_PREFIX_V1):
version = (1, 0)
elif module.startswith(spack.repo.PKG_MODULE_PREFIX_V2):
version = (2, 0)
else:
raise ValueError(f"Package {cls.__qualname__} is not a known Spack package")
if version < (2, 0):
# spack.pkg.builtin.package_name.
_, _, pkg_module = module.rpartition(".")
else:
# spack_repo.builtin.packages.package_name.package
pkg_module = module.rsplit(".", 2)[-2]
cls._name = spack.util.naming.pkg_dir_to_pkg_name(pkg_module, version)
return cls._name
@classproperty
def global_license_dir(cls):
"""Returns the directory where license files for all packages are stored."""
return spack.util.path.canonicalize_path(spack.config.get("config:license_dir"))
@property
def global_license_file(self):
"""Returns the path where a global license file for this
particular package should be stored."""
if not self.license_files:
return
return os.path.join(
self.global_license_dir, self.name, os.path.basename(self.license_files[0])
)
# Source redistribution must be determined before concretization (because source mirrors work
# with abstract specs).
@classmethod
def redistribute_source(cls, spec):
"""Whether it should be possible to add the source of this
package to a Spack mirror."""
for when_spec, disable_redistribute in cls.disable_redistribute.items():
if disable_redistribute.source and spec.satisfies(when_spec):
return False
return True
@property
def redistribute_binary(self):
"""Whether it should be possible to create a binary out of an installed instance of this
package."""
for when_spec, disable_redistribute in self.disable_redistribute.items():
if disable_redistribute.binary and self.spec.satisfies(when_spec):
return False
return True
@property
def keep_werror(self) -> Optional[Literal["all", "specific", "none"]]:
"""Keep ``-Werror`` flags, matches ``config:flags:keep_werror`` to override config.
Valid return values are:
* ``"all"``: keep all ``-Werror`` flags.
* ``"specific"``: keep only ``-Werror=specific-warning`` flags.
* ``"none"``: filter out all ``-Werror*`` flags.
* :data:`None`: respect the user's configuration (``"none"`` by default).
"""
if self.spec.satisfies("%nvhpc@:23.3"):
# Filtering works by replacing -Werror with -Wno-error, but older nvhpc and
# PGI do not understand -Wno-error, so we disable filtering.
return "all"
elif self.spec.satisfies("%nvhpc@23.4:"):
# newer nvhpc supports -Wno-error but can't disable specific warnings with
# -Wno-error=warning. Skip -Werror=warning, but still filter -Werror.
return "specific"
else:
# use -Werror disablement by default for other compilers
return None
@property
def version(self):
if not self.spec.versions.concrete:
raise ValueError(
"Version requested for a package that" " does not have a concrete version."
)
return self.spec.versions[0]
@classmethod
@memoized
def version_urls(cls) -> Dict[StandardVersion, str]:
"""Dict of explicitly defined URLs for versions of this package.
Return:
An dict mapping version to url, ordered by version.
A version's URL only appears in the result if it has an an explicitly defined ``url``
argument. So, this list may be empty if a package only defines ``url`` at the top level.
"""
return {v: args["url"] for v, args in sorted(cls.versions.items()) if "url" in args}
def nearest_url(self, version):
"""Finds the URL with the "closest" version to ``version``.
This uses the following precedence order:
1. Find the next lowest or equal version with a URL.
2. If no lower URL, return the next *higher* URL.
3. If no higher URL, return None.
"""
version_urls = self.version_urls()
if version in version_urls:
return version_urls[version]
last_url = None
for v, u in self.version_urls().items():
if v > version:
if last_url:
return last_url
last_url = u
return last_url
def url_for_version(self, version: Union[str, StandardVersion]) -> str:
"""Returns a URL from which the specified version of this package may be downloaded.
Arguments:
version: The version for which a URL is sought."""
return self._implement_all_urls_for_version(version)[0]
def _update_external_dependencies(
self, extendee_spec: Optional[spack.spec.Spec] = None
) -> None:
"""
Method to override in package classes to handle external dependencies
"""
pass
def detect_dev_src_change(self) -> bool:
"""
Method for checking for source code changes to trigger rebuild/reinstall
"""
dev_path_var = self.spec.variants.get("dev_path", None)
_, record = spack.store.STORE.db.query_by_spec_hash(self.spec.dag_hash())
assert dev_path_var and record, "dev_path variant and record must be present"
return fsys.recursive_mtime_greater_than(dev_path_var.value, record.installation_time)
@classmethod
def version_or_package_attr(cls, attr, version, default=NO_DEFAULT):
"""
Get an attribute that could be on the version or package with preference to the version
"""
version_attrs = cls.versions.get(version)
if version_attrs and attr in version_attrs:
return version_attrs.get(attr)
if default is NO_DEFAULT and not hasattr(cls, attr):
raise PackageError(f"{attr} attribute not defined on {cls.name}")
return getattr(cls, attr, default)
@classmethod
def needs_commit(cls, version) -> bool:
"""
Method for checking if the package instance needs a commit sha to be found
"""
if isinstance(version, GitVersion):
return True
ver_attrs = cls.versions.get(version)
if ver_attrs:
return bool(ver_attrs.get("commit") or ver_attrs.get("tag") or ver_attrs.get("branch"))
return False
@classmethod
def _resolve_git_provenance(cls, spec) -> None:
# early return cases, don't overwrite user intention
# commit pre-assigned or develop specs don't need commits changed
# since this would create un-necessary churn
if "commit" in spec.variants or spec.is_develop:
return
if is_git_version(str(spec.version)):
ref = spec.version.ref
else:
v_attrs = cls.versions.get(spec.version, {})
if "commit" in v_attrs:
spec.variants["commit"] = spack.variant.SingleValuedVariant(
"commit", v_attrs["commit"]
)
return
ref = v_attrs.get("tag") or v_attrs.get("branch")
if not ref:
raise VersionError(
f"{spec.name}'s version {str(spec.version)} "
"is missing a git ref (commit, tag or branch)"
)
# Look for commits in the following places:
# 1) mirror archive file, (cheapish, local, staticish)
# 2) URL (cheap, remote, dynamic)
#
# If users pre-stage (_LOCAL_CACHE), or use a mirror they can expect
# consistent commit resolution
sha = None
# construct a package instance to get fetch/staging together
pkg_instance = cls(spec.copy())
try:
pkg_instance.do_fetch(mirror_only=True)
except spack.error.FetchError:
pass
if pkg_instance.stage.archive_file:
sha = spack.util.archive.retrieve_commit_from_archive(
pkg_instance.stage.archive_file, ref
)
if not sha:
url = cls.version_or_package_attr("git", spec.version)
sha = spack.util.git.get_commit_sha(url, ref)
if sha:
spec.variants["commit"] = spack.variant.SingleValuedVariant("commit", sha)
def resolve_binary_provenance(self):
"""
Method to ensure concrete spec has binary provenance.
Base implementation will look up git commits when appropriate.
Packages may override this implementation for custom implementations
"""
self._resolve_git_provenance(self.spec)
def all_urls_for_version(self, version: StandardVersion) -> List[str]:
"""Return all URLs derived from version_urls(), url, urls, and
list_url (if it contains a version) in a package in that order.
Args:
version: the version for which a URL is sought
"""
uf = None
if type(self).url_for_version != PackageBase.url_for_version:
uf = self.url_for_version
return self._implement_all_urls_for_version(version, uf)
def _implement_all_urls_for_version(
self,
version: Union[str, StandardVersion],
custom_url_for_version: Optional[Callable[[StandardVersion], Optional[str]]] = None,
) -> List[str]:
version = StandardVersion.from_string(version) if isinstance(version, str) else version
urls: List[str] = []
# If we have a specific URL for this version, don't extrapolate.
url = self.version_urls().get(version)
if url:
urls.append(url)
# if there is a custom url_for_version, use it
if custom_url_for_version is not None:
u = custom_url_for_version(version)
if u is not None and u not in urls:
urls.append(u)
def sub_and_add(u: Optional[str]) -> None:
if u is None:
return
# skip the url if there is no version to replace
try:
spack.url.parse_version(u)
except spack.url.UndetectableVersionError:
return
urls.append(spack.url.substitute_version(u, self.url_version(version)))
# If no specific URL, use the default, class-level URL
sub_and_add(getattr(self, "url", None))
for u in getattr(self, "urls", []):
sub_and_add(u)
sub_and_add(getattr(self, "list_url", None))
# if no version-bearing URLs can be found, try them raw
if not urls:
default_url = getattr(self, "url", getattr(self, "urls", [None])[0])
# if no exact match AND no class-level default, use the nearest URL
if not default_url:
default_url = self.nearest_url(version)
# if there are NO URLs to go by, then we can't do anything
if not default_url:
raise NoURLError(self.__class__)
urls.append(spack.url.substitute_version(default_url, self.url_version(version)))
return urls
def find_valid_url_for_version(self, version: StandardVersion) -> Optional[str]:
"""Returns a URL from which the specified version of this package may be downloaded after
testing whether the url is valid. Will try ``url``, ``urls``, and :attr:`list_url`
before failing.
Arguments:
version: The version for which a URL is sought.
"""
urls = self.all_urls_for_version(version)
for u in urls:
if spack.util.web.url_exists(u):
return u
return None
def _make_resource_stage(self, root_stage, resource):
pretty_resource_name = fsys.polite_filename(f"{resource.name}-{self.version}")
return stg.ResourceStage(
resource.fetcher,
root=root_stage,
resource=resource,
name=self._resource_stage(resource),
mirror_paths=spack.mirrors.layout.default_mirror_layout(
resource.fetcher, os.path.join(self.name, pretty_resource_name)
),
mirrors=spack.mirrors.mirror.MirrorCollection(source=True).values(),
path=self.path,
)
def _download_search(self):
dynamic_fetcher = fs.from_list_url(self)
return [dynamic_fetcher] if dynamic_fetcher else []
def _make_root_stage(self, fetcher):
# Construct a mirror path (TODO: get this out of package.py)
format_string = "{name}-{version}"
pretty_name = self.spec.format_path(format_string)
mirror_paths = spack.mirrors.layout.default_mirror_layout(
fetcher, os.path.join(self.name, pretty_name), self.spec
)
# Construct a path where the stage should build..
s = self.spec
stage_name = stg.compute_stage_name(s)
stage = stg.Stage(
fetcher,
mirror_paths=mirror_paths,
mirrors=spack.mirrors.mirror.MirrorCollection(source=True).values(),
name=stage_name,
path=self.path,
search_fn=self._download_search,
)
return stage
def _make_stages(self) -> Tuple[stg.StageComposite, List[stg.Stage]]:
"""Create stages for this package, its resources, and any patches to be applied.
Returns:
A StageComposite containing all stages created, as well as a list of patch stages for
any patches that need to be fetched remotely.
The StageComposite is used to manage (create destroy, etc.) the stages.
The list of patch stages will be in the same order that patches are to be applied
to the package's staged source code. This is needed in order to apply the patches later.
"""
# If it's a dev package (not transitively), use a DIY stage object
dev_path_var = self.spec.variants.get("dev_path", None)
if dev_path_var:
dev_path = dev_path_var.value
link_format = spack.config.get("config:develop_stage_link")
if not link_format:
link_format = "build-{arch}-{hash:7}"
stage_link = self.spec.format_path(link_format)
source_stage = stg.DevelopStage(
stg.compute_stage_name(self.spec), dev_path, stage_link
)
else:
source_stage = self._make_root_stage(self.fetcher)
# all_stages is source + resources + patches
all_stages = stg.StageComposite()
all_stages.append(source_stage)
all_stages.extend(
self._make_resource_stage(source_stage, r) for r in self._get_needed_resources()
)
def make_patch_stage(patch: spack.patch.UrlPatch, uniqe_part: str):
# UrlPatches can make their own fetchers
fetcher = patch.fetcher()
# The same package can have multiple patches with the same name but
# with different contents, therefore apply a subset of the hash.
fetch_digest = patch.archive_sha256 or patch.sha256
name = f"{os.path.basename(patch.url)}-{fetch_digest[:7]}"
per_package_ref = os.path.join(patch.owner.split(".")[-1], name)
mirror_ref = spack.mirrors.layout.default_mirror_layout(fetcher, per_package_ref)
return stg.Stage(
fetcher,
name=f"{stg.stage_prefix}-{uniqe_part}-patch-{fetch_digest}",
mirror_paths=mirror_ref,
mirrors=spack.mirrors.mirror.MirrorCollection(source=True).values(),
)
if self.spec.concrete:
patches = self.spec.patches
uniqe_part = self.spec.dag_hash(7)
else:
# The only code path that gets here is `spack mirror create --all`,
# which needs all matching patches.
patch_lists = [
plist for when, plist in self.patches.items() if self.spec.intersects(when)
]
patches = sum(patch_lists, [])
uniqe_part = self.name
patch_stages = [
make_patch_stage(p, uniqe_part) for p in patches if isinstance(p, spack.patch.UrlPatch)
]
all_stages.extend(patch_stages)
return all_stages, patch_stages
@property
def stage(self):
"""Get the build staging area for this package.
This automatically instantiates a ``Stage`` object if the package
doesn't have one yet, but it does not create the Stage directory
on the filesystem.
"""
if not self.spec.versions.concrete:
raise ValueError("Cannot retrieve stage for package without concrete version.")
if self._stage is None:
self._stage, self._patch_stages = self._make_stages()
return self._stage
@stage.setter
def stage(self, stage: stg.StageComposite):
"""Allow a stage object to be set to override the default."""
self._stage = stage
@property
def env_path(self):
"""Return the build environment file path associated with staging."""
return os.path.join(self.stage.path, _spack_build_envfile)
@property
def env_mods_path(self):
"""
Return the build environment modifications file path associated with
staging.
"""
return os.path.join(self.stage.path, _spack_build_envmodsfile)
@property
def metadata_dir(self):
"""Return the install metadata directory."""
return spack.store.STORE.layout.metadata_path(self.spec)
@property
def install_env_path(self):
"""
Return the build environment file path on successful installation.
"""
# Backward compatibility: Return the name of an existing log path;
# otherwise, return the current install env path name.
old_filename = os.path.join(self.metadata_dir, "build.env")
if os.path.exists(old_filename):
return old_filename
else:
return os.path.join(self.metadata_dir, _spack_build_envfile)
@property
def log_path(self):
"""Return the build log file path associated with staging."""
return os.path.join(self.stage.path, _spack_build_logfile)
@property
def phase_log_files(self):
"""Find sorted phase log files written to the staging directory"""
logs_dir = os.path.join(self.stage.path, "spack-build-*-out.txt")
log_files = glob.glob(logs_dir)
log_files.sort()
return log_files
@property
def install_log_path(self):
"""Return the (compressed) build log file path on successful installation"""
# Backward compatibility: Return the name of an existing install log.
for filename in [_spack_build_logfile, "build.out", "build.txt"]:
old_log = os.path.join(self.metadata_dir, filename)
if os.path.exists(old_log):
return old_log
# Otherwise, return the current install log path name.
return os.path.join(self.metadata_dir, _spack_build_logfile + ".gz")
@property
def configure_args_path(self):
"""Return the configure args file path associated with staging."""
return os.path.join(self.stage.path, _spack_configure_argsfile)
@property
def times_log_path(self):
"""Return the times log json file."""
return os.path.join(self.metadata_dir, spack_times_log)
@property
def install_configure_args_path(self):
"""Return the configure args file path on successful installation."""
return os.path.join(self.metadata_dir, _spack_configure_argsfile)
def archive_install_test_log(self):
"""Archive the install-phase test log, if present."""
if getattr(self, "tester", None):
self.tester.archive_install_test_log(self.metadata_dir)
@property
def tester(self):
import spack.install_test
if not self.spec.versions.concrete:
raise ValueError("Cannot retrieve tester for package without concrete version.")
if not self._tester:
self._tester = spack.install_test.PackageTest(self)
return self._tester
@property
def fetcher(self):
if not self.spec.versions.concrete:
raise ValueError("Cannot retrieve fetcher for package without concrete version.")
if not self._fetcher:
# assign private member with the public setter api for error checking
self.fetcher = fs.for_package_version(self)
return self._fetcher
@fetcher.setter
def fetcher(self, f):
self._fetcher = f
self._fetcher.set_package(self)
@classmethod
def dependencies_of_type(cls, deptypes: dt.DepFlag):
"""Get names of dependencies that can possibly have these deptypes.
This analyzes the package and determines which dependencies *can*
be a certain kind of dependency. Note that they may not *always*
be this kind of dependency, since dependencies can be optional,
so something may be a build dependency in one configuration and a
run dependency in another.
"""
return {
name
for name, dependencies in cls.dependencies_by_name().items()
if any(deptypes & dep.depflag for dep in dependencies)
}
# TODO: allow more than one active extendee.
@property
def extendee_spec(self) -> Optional[spack.spec.Spec]:
"""Spec of the extendee of this package, or None if it is not an extension."""
if not self.extendees:
return None
# If the extendee is in the spec's deps already, return that.
deps = [
dep
for dep in self.spec.dependencies(deptype=("link", "run"))
for d, when in self.extendees.values()
if dep.satisfies(d) and self.spec.satisfies(when)
]
if deps:
assert len(deps) == 1
return deps[0]
# if the spec is concrete already, then it extends something
# that is an *optional* dependency, and the dep isn't there.
if self.spec._concrete:
return None
else:
# If it's not concrete, then return the spec from the
# extends() directive since that is all we know so far.
spec_str = next(iter(self.extendees))
return spack.spec.Spec(spec_str)
@property
def is_extension(self):
# if it is concrete, it's only an extension if it actually
# dependes on the extendee.
if self.spec._concrete:
return self.extendee_spec is not None
else:
# If not, then it's an extension if it *could* be an extension
return bool(self.extendees)
def extends(self, spec: spack.spec.Spec) -> bool:
"""
Returns True if this package extends the given spec.
If ``self.spec`` is concrete, this returns whether this package extends
the given spec.
If ``self.spec`` is not concrete, this returns whether this package may
extend the given spec.
"""
if spec.name not in self.extendees:
return False
s = self.extendee_spec
return s is not None and spec.satisfies(s)
def provides(self, vpkg_name: str) -> bool:
"""
True if this package provides a virtual package with the specified name
"""
return any(
any(spec.name == vpkg_name for spec in provided)
for when_spec, provided in self.provided.items()
if self.spec.intersects(when_spec)
)
def intersects(self, spec: spack.spec.Spec) -> bool:
"""Context-ful intersection that takes into account package information.
By design, ``Spec.intersects()`` does not know anything about package metdata.
This avoids unnecessary package lookups and keeps things efficient where extra
information is not needed, and it decouples ``Spec`` from ``PackageBase``.
In many cases, though, we can rule more cases out in ``intersects()`` if we
know, for example, that certain variants are always single-valued, or that
certain variants are conditional on other variants. This adds logic for such
cases when they are knowable.
Note that because ``intersects()`` is conservative, it can only give false
positives ("i.e., the two specs *may* overlap"), not false negatives. This
method can fix false positives (i.e. it may return ``False`` when
``Spec.intersects()`` would return ``True``, but it will never return ``True``
when ``Spec.intersects()`` returns ``False``.
"""
# Spec.intersects() is right when False
if not self.spec.intersects(spec):
return False
def sv_variant_conflicts(spec, variant):
name = variant.name
return (
variant.name in spec.variants
and all(not d[name].multi for when, d in self.variants.items() if name in d)
and spec.variants[name].value != variant.value
)
# Specs don't know if a variant is single- or multi-valued (concretization handles this)
# But, we know if the spec has a value for a single-valued variant, it *has* to equal the
# value in self.spec, if there is one.
for v, variant in spec.variants.items():
if sv_variant_conflicts(self.spec, variant):
return False
# if there is no intersecting condition for a conditional variant, it can't exist. e.g.:
# - cuda_arch=<anything> can't be satisfied when ~cuda.
# - generator=<anything> can't be satisfied when build_system=autotools
def mutually_exclusive(spec, variant_name):
return all(
not spec.intersects(when)
or any(sv_variant_conflicts(spec, wv) for wv in when.variants.values())
for when, d in self.variants.items()
if variant_name in d
)
names = self.variant_names()
for v in set(itertools.chain(spec.variants, self.spec.variants)):
if v not in names: # treat unknown variants as intersecting
continue
if mutually_exclusive(self.spec, v) or mutually_exclusive(spec, v):
return False
return True
@property
def virtuals_provided(self):
"""
virtual packages provided by this package with its spec
"""
return [
vspec
for when_spec, provided in self.provided.items()
for vspec in sorted(provided)
if self.spec.satisfies(when_spec)
]
@classmethod
def provided_virtual_names(cls):
"""Return sorted list of names of virtuals that can be provided by this package."""
return sorted(
set(vpkg.name for virtuals in cls.provided.values() for vpkg in sorted(virtuals))
)
@property
def prefix(self):
"""Get the prefix into which this package should be installed."""
return self.spec.prefix
@property
def home(self):
return self.prefix
@property
def command(self) -> spack.util.executable.Executable:
"""Returns the main executable for this package."""
path = os.path.join(self.home.bin, self.spec.name)
if fsys.is_exe(path):
return spack.util.executable.Executable(path)
raise RuntimeError(f"Unable to locate {self.spec.name} command in {self.home.bin}")
def url_version(self, version):
"""
Given a version, this returns a string that should be substituted
into the package's URL to download that version.
By default, this just returns the version string. Subclasses may need
to override this, e.g. for boost versions where you need to ensure that
there are _'s in the download URL.
"""
return str(version)
def remove_prefix(self):
"""
Removes the prefix for a package along with any empty parent
directories
"""
spack.store.STORE.layout.remove_install_directory(self.spec)
@property
def download_instr(self) -> str:
"""
Defines the default manual download instructions. Packages can
override the property to provide more information.
Returns:
default manual download instructions
"""
required = (
f"Manual download is required for {self.spec.name}. " if self.manual_download else ""
)
return f"{required}Refer to {self.homepage} for download instructions."
def do_fetch(self, mirror_only=False):
"""
Creates a stage directory and downloads the tarball for this package.
Working directory will be set to the stage directory.
"""
if not self.has_code or self.spec.external:
tty.debug("No fetch required for {0}".format(self.name))
return
checksum = spack.config.get("config:checksum")
if (
checksum
and (self.version not in self.versions)
and (not isinstance(self.version, GitVersion))
and ("dev_path" not in self.spec.variants)
):
tty.warn(
"There is no checksum on file to fetch %s safely."
% self.spec.cformat("{name}{@version}")
)
# Ask the user whether to skip the checksum if we're
# interactive, but just fail if non-interactive.
ck_msg = "Add a checksum or use --no-checksum to skip this check."
ignore_checksum = False
if sys.stdout.isatty():
ignore_checksum = tty.get_yes_or_no(" Fetch anyway?", default=False)
if ignore_checksum:
tty.debug("Fetching with no checksum. {0}".format(ck_msg))
if not ignore_checksum:
raise spack.error.FetchError(
"Will not fetch %s" % self.spec.format("{name}{@version}"), ck_msg
)
deprecated = spack.config.get("config:deprecated")
if not deprecated and self.versions.get(self.version, {}).get("deprecated", False):
tty.warn(
"{0} is deprecated and may be removed in a future Spack "
"release.".format(self.spec.format("{name}{@version}"))
)
# Ask the user whether to install deprecated version if we're
# interactive, but just fail if non-interactive.
dp_msg = (
"If you are willing to be a maintainer for this version "
"of the package, submit a PR to remove `deprecated=False"
"`, or use `--deprecated` to skip this check."
)
ignore_deprecation = False
if sys.stdout.isatty():
ignore_deprecation = tty.get_yes_or_no(" Fetch anyway?", default=False)
if ignore_deprecation:
tty.debug("Fetching deprecated version. {0}".format(dp_msg))
if not ignore_deprecation:
raise spack.error.FetchError(
"Will not fetch {0}".format(self.spec.format("{name}{@version}")), dp_msg
)
self.stage.create()
err_msg = None if not self.manual_download else self.download_instr
start_time = time.time()
self.stage.fetch(mirror_only, err_msg=err_msg)
self._fetch_time = time.time() - start_time
if checksum and self.version in self.versions:
self.stage.check()
self.stage.cache_local()
def do_stage(self, mirror_only=False):
"""Unpacks and expands the fetched tarball."""
# Always create the stage directory at this point. Why? A no-code
# package may want to use the installation process to install metadata.
self.stage.create()
# Fetch/expand any associated code.
user_dev_path = spack.config.get(f"develop:{self.name}:path", None)
skip = user_dev_path and os.path.exists(user_dev_path)
if skip:
tty.debug("Skipping staging because develop path exists")
if self.has_code and not self.spec.external and not skip:
self.do_fetch(mirror_only)
self.stage.expand_archive()
else:
# Support for post-install hooks requires a stage.source_path
fsys.mkdirp(self.stage.source_path)
def do_patch(self):
"""Applies patches if they haven't been applied already."""
if not self.spec.concrete:
raise ValueError("Can only patch concrete packages.")
# Kick off the stage first. This creates the stage.
self.do_stage()
# Package can add its own patch function.
has_patch_fun = hasattr(self, "patch") and callable(self.patch)
# Get the patches from the spec (this is a shortcut for the MV-variant)
patches = self.spec.patches
# If there are no patches, note it.
if not patches and not has_patch_fun:
tty.msg("No patches needed for {0}".format(self.name))
return
# Construct paths to special files in the archive dir used to
# keep track of whether patches were successfully applied.
archive_dir = self.stage.source_path
good_file = os.path.join(archive_dir, ".spack_patched")
no_patches_file = os.path.join(archive_dir, ".spack_no_patches")
bad_file = os.path.join(archive_dir, ".spack_patch_failed")
# If we encounter an archive that failed to patch, restage it
# so that we can apply all the patches again.
if os.path.isfile(bad_file):
if self.stage.requires_patch_success:
tty.debug("Patching failed last time. Restaging.")
self.stage.restage()
else:
# develop specs may have patch failures but should never be restaged
tty.warn(
f"A patch failure was detected in {self.name}."
" Build errors may occur due to this."
)
return
# If this file exists, then we already applied all the patches.
if os.path.isfile(good_file):
tty.msg("Already patched {0}".format(self.name))
return
elif os.path.isfile(no_patches_file):
tty.msg("No patches needed for {0}".format(self.name))
return
errors = []
# Apply all the patches for specs that match this one
patched = False
patch_stages = iter(self._patch_stages)
for patch in patches:
try:
with fsys.working_dir(self.stage.source_path):
# get the path either from the stage where it was fetched, or from the Patch
if isinstance(patch, spack.patch.UrlPatch):
patch_stage = next(patch_stages)
patch_path = patch_stage.single_file
else:
patch_path = patch.path
spack.patch.apply_patch(
self.stage.source_path,
patch_path,
patch.level,
patch.working_dir,
patch.reverse,
)
tty.msg(f"Applied patch {patch.path_or_url}")
patched = True
except spack.error.SpackError as e:
# Touch bad file if anything goes wrong.
fsys.touch(bad_file)
error_msg = f"Patch {patch.path_or_url} failed."
if self.stage.requires_patch_success:
tty.msg(error_msg)
raise
else:
tty.debug(error_msg)
tty.debug(e)
errors.append(e)
if has_patch_fun:
try:
with fsys.working_dir(self.stage.source_path):
self.patch()
tty.msg("Ran patch() for {0}".format(self.name))
patched = True
except spack.multimethod.NoSuchMethodError:
# We are running a multimethod without a default case.
# If there's no default it means we don't need to patch.
if not patched:
# if we didn't apply a patch from a patch()
# directive, AND the patch function didn't apply, say
# no patches are needed. Otherwise, we already
# printed a message for each patch.
tty.msg("No patches needed for {0}".format(self.name))
except spack.error.SpackError as e:
# Touch bad file if anything goes wrong.
fsys.touch(bad_file)
error_msg = f"patch() function failed for {self.name}"
if self.stage.requires_patch_success:
tty.msg(error_msg)
raise
else:
tty.debug(error_msg)
tty.debug(e)
errors.append(e)
if not errors:
# Get rid of any old failed file -- patches have either succeeded
# or are not needed. This is mostly defensive -- it's needed
# if we didn't restage
if os.path.isfile(bad_file):
os.remove(bad_file)
# touch good or no patches file so that we skip next time.
if patched:
fsys.touch(good_file)
else:
fsys.touch(no_patches_file)
@classmethod
def all_patches(cls):
"""Retrieve all patches associated with the package.
Retrieves patches on the package itself as well as patches on the
dependencies of the package."""
patches = []
for _, patch_list in cls.patches.items():
for patch in patch_list:
patches.append(patch)
pkg_deps = cls.dependencies
for dep_name in pkg_deps:
for _, dependency in pkg_deps[dep_name].items():
for _, patch_list in dependency.patches.items():
for patch in patch_list:
patches.append(patch)
return patches
def content_hash(self, content: Optional[bytes] = None) -> str:
"""Create a hash based on the artifacts and patches used to build this package.
This includes:
* source artifacts (tarballs, repositories) used to build;
* content hashes (``sha256``'s) of all patches applied by Spack; and
* canonicalized contents the ``package.py`` recipe used to build.
This hash is only included in Spack's DAG hash for concrete specs, but if it
happens to be called on a package with an abstract spec, only applicable (i.e.,
determinable) portions of the hash will be included.
"""
# list of components to make up the hash
hash_content = []
# source artifacts/repositories
# TODO: resources
if self.spec.versions.concrete:
try:
source_id = fs.for_package_version(self).source_id()
except (fs.ExtrapolationError, fs.InvalidArgsError, spack.error.NoURLError):
# ExtrapolationError happens if the package has no fetchers defined.
# InvalidArgsError happens when there are version directives with args,
# but none of them identifies an actual fetcher.
# NoURLError happens if the package is external-only with no url
source_id = None
if not source_id:
# TODO? in cases where a digest or source_id isn't available,
# should this attempt to download the source and set one? This
# probably only happens for source repositories which are
# referenced by branch name rather than tag or commit ID.
from_local_sources = "dev_path" in self.spec.variants
if self.has_code and not self.spec.external and not from_local_sources:
message = "Missing a source id for {s.name}@{s.version}"
tty.debug(message.format(s=self))
hash_content.append("".encode("utf-8"))
else:
hash_content.append(source_id.encode("utf-8"))
# patch sha256's
# Only include these if they've been assigned by the concretizer.
# We check spec._patches_assigned instead of spec.concrete because
# we have to call package_hash *before* marking specs concrete
if self.spec._patches_assigned():
hash_content.extend(
":".join((p.sha256, str(p.level))).encode("utf-8") for p in self.spec.patches
)
# package.py contents
hash_content.append(package_hash(self.spec, source=content).encode("utf-8"))
# put it all together and encode as base32
b32_hash = base64.b32encode(
hashlib.sha256(bytes().join(sorted(hash_content))).digest()
).lower()
b32_hash = b32_hash.decode("utf-8")
return b32_hash
@property
def cmake_prefix_paths(self) -> List[str]:
"""Return a list of paths to be used in CMake's ``CMAKE_PREFIX_PATH``."""
return [self.prefix]
def _has_make_target(self, target):
"""Checks to see if 'target' is a valid target in a Makefile.
Parameters:
target (str): the target to check for
Returns:
bool: True if 'target' is found, else False
"""
# Check if we have a Makefile
for makefile in ["GNUmakefile", "Makefile", "makefile"]:
if os.path.exists(makefile):
break
else:
tty.debug("No Makefile found in the build directory")
return False
# Prevent altering LC_ALL for 'make' outside this function
make = copy.deepcopy(self.module.make)
# Use English locale for missing target message comparison
make.add_default_env("LC_ALL", "C")
# Check if 'target' is a valid target.
#
# `make -n target` performs a "dry run". It prints the commands that
# would be run but doesn't actually run them. If the target does not
# exist, you will see one of the following error messages:
#
# GNU Make:
# make: *** No rule to make target `test'. Stop.
# *** No rule to make target 'test'. Stop.
#
# BSD Make:
# make: don't know how to make test. Stop
#
# Note: "Stop." is not printed when running a Make jobserver (spack env depfile) that runs
# with `make -k/--keep-going`
missing_target_msgs = [
"No rule to make target `{0}'.",
"No rule to make target '{0}'.",
"don't know how to make {0}.",
]
kwargs = {
"fail_on_error": False,
"output": os.devnull,
"error": str,
# Remove MAKEFLAGS to avoid inherited flags from Make jobserver (spack env depfile)
"extra_env": {"MAKEFLAGS": ""},
}
stderr = make("-n", target, **kwargs)
for missing_target_msg in missing_target_msgs:
if missing_target_msg.format(target) in stderr:
tty.debug("Target '{0}' not found in {1}".format(target, makefile))
return False
return True
def _if_make_target_execute(self, target, *args, **kwargs):
"""Runs ``make target`` if 'target' is a valid target in the Makefile.
Parameters:
target (str): the target to potentially execute
"""
if self._has_make_target(target):
# Execute target
self.module.make(target, *args, **kwargs)
def _has_ninja_target(self, target):
"""Checks to see if 'target' is a valid target in a Ninja build script.
Parameters:
target (str): the target to check for
Returns:
bool: True if 'target' is found, else False
"""
ninja = self.module.ninja
# Check if we have a Ninja build script
if not os.path.exists("build.ninja"):
tty.debug("No Ninja build script found in the build directory")
return False
# Get a list of all targets in the Ninja build script
# https://ninja-build.org/manual.html#_extra_tools
all_targets = ninja("-t", "targets", "all", output=str).split("\n")
# Check if 'target' is a valid target
matches = [line for line in all_targets if line.startswith(target + ":")]
if not matches:
tty.debug("Target '{0}' not found in build.ninja".format(target))
return False
return True
def _if_ninja_target_execute(self, target, *args, **kwargs):
"""Runs ``ninja target`` if 'target' is a valid target in the Ninja
build script.
Parameters:
target (str): the target to potentially execute
"""
if self._has_ninja_target(target):
# Execute target
self.module.ninja(target, *args, **kwargs)
def _get_needed_resources(self):
# We use intersects here cause it would also work if self.spec is abstract
resources = [
resource
for when_spec, resource_list in self.resources.items()
if self.spec.intersects(when_spec)
for resource in resource_list
]
# Sorts the resources by the length of the string representing their destination. Since any
# nested resource must contain another resource's path, that should work
return sorted(resources, key=lambda res: len(res.destination))
def _resource_stage(self, resource):
pieces = ["resource", resource.name, self.spec.dag_hash()]
resource_stage_folder = "-".join(pieces)
return resource_stage_folder
def do_test(self, *, dirty=False, externals=False, timeout: Optional[int] = None):
if self.test_requires_compiler and not any(
lang in self.spec for lang in ("c", "cxx", "fortran")
):
tty.error(
f"Skipping tests for package {self.spec}, since a compiler is required, "
f"but not available"
)
return
kwargs = {
"dirty": dirty,
"fake": False,
"context": "test",
"externals": externals,
"verbose": tty.is_verbose(),
}
self.tester.stand_alone_tests(kwargs, timeout=timeout)
def unit_test_check(self) -> bool:
"""Hook for unit tests to assert things about package internals.
Unit tests can override this function to perform checks after
``Package.install`` and all post-install hooks run, but before
the database is updated.
The overridden function may indicate that the install procedure
should terminate early (before updating the database) by
returning :data:`False` (or any value such that ``bool(result)`` is
:data:`False`).
Return:
:data:`True` to continue, :data:`False` to skip ``install()``
"""
return True
@classmethod
def inject_flags(cls: Type[Pb], name: str, flags: Iterable[str]) -> FLAG_HANDLER_RETURN_TYPE:
"""See :func:`spack.package.inject_flags`."""
return flags, None, None
@classmethod
def env_flags(cls: Type[Pb], name: str, flags: Iterable[str]) -> FLAG_HANDLER_RETURN_TYPE:
"""See :func:`spack.package.env_flags`."""
return None, flags, None
@classmethod
def build_system_flags(
cls: Type[Pb], name: str, flags: Iterable[str]
) -> FLAG_HANDLER_RETURN_TYPE:
"""See :func:`spack.package.build_system_flags`."""
return None, None, flags
def setup_run_environment(self, env: spack.util.environment.EnvironmentModifications) -> None:
"""Sets up the run environment for a package.
Args:
env: environment modifications to be applied when the package is run. Package authors
can call methods on it to alter the run environment.
"""
pass
def setup_dependent_run_environment(
self, env: spack.util.environment.EnvironmentModifications, dependent_spec: spack.spec.Spec
) -> None:
"""Sets up the run environment of packages that depend on this one.
This is similar to ``setup_run_environment``, but it is used to modify the run environment
of a package that *depends* on this one.
This gives packages like Python and others that follow the extension model a way to
implement common environment or run-time settings for dependencies.
Args:
env: environment modifications to be applied when the dependent package is run. Package
authors can call methods on it to alter the build environment.
dependent_spec: The spec of the dependent package about to be run. This allows the
extendee (self) to query the dependent's state. Note that *this* package's spec is
available as ``self.spec``
"""
pass
def setup_dependent_package(self, module, dependent_spec: spack.spec.Spec) -> None:
"""Set up module-scope global variables for dependent packages.
This function is called when setting up the build and run environments of a DAG.
Examples:
1. Extensions often need to invoke the ``python`` interpreter from the Python installation
being extended. This routine can put a ``python`` Executable as a global in the module
scope for the extension package to simplify extension installs.
2. MPI compilers could set some variables in the dependent's scope that point to ``mpicc``,
``mpicxx``, etc., allowing them to be called by common name regardless of which MPI is
used.
Args:
module: The Python ``module`` object of the dependent package. Packages can use this to
set module-scope variables for the dependent to use.
dependent_spec: The spec of the dependent package about to be built. This allows the
extendee (self) to query the dependent's state. Note that *this* package's spec is
available as ``self.spec``.
"""
pass
_flag_handler: Optional[FLAG_HANDLER_TYPE] = None
@property
def flag_handler(self) -> FLAG_HANDLER_TYPE:
if self._flag_handler is None:
self._flag_handler = PackageBase.inject_flags
return self._flag_handler
@flag_handler.setter
def flag_handler(self, var: FLAG_HANDLER_TYPE) -> None:
self._flag_handler = var
# The flag handler method is called for each of the allowed compiler flags.
# It returns a triple of inject_flags, env_flags, build_system_flags.
# The flags returned as inject_flags are injected through the spack
# compiler wrappers.
# The flags returned as env_flags are passed to the build system through
# the environment variables of the same name.
# The flags returned as build_system_flags are passed to the build system
# package subclass to be turned into the appropriate part of the standard
# arguments. This is implemented for build system classes where
# appropriate and will otherwise raise a NotImplementedError.
def flags_to_build_system_args(self, flags: Dict[str, List[str]]) -> None:
# Takes flags as a dict name: list of values
if any(v for v in flags.values()):
msg = "The {0} build system".format(self.__class__.__name__)
msg += " cannot take command line arguments for compiler flags"
raise NotImplementedError(msg)
@staticmethod
def uninstall_by_spec(spec, force=False, deprecator=None):
if not os.path.isdir(spec.prefix):
# prefix may not exist, but DB may be inconsistent. Try to fix by
# removing, but omit hooks.
specs = spack.store.STORE.db.query(spec, installed=True)
if specs:
if deprecator:
spack.store.STORE.db.deprecate(specs[0], deprecator)
tty.debug("Deprecating stale DB entry for {0}".format(spec.short_spec))
else:
spack.store.STORE.db.remove(specs[0])
tty.debug("Removed stale DB entry for {0}".format(spec.short_spec))
return
else:
raise InstallError(str(spec) + " is not installed.")
if not force:
dependents = spack.store.STORE.db.installed_relatives(
spec, direction="parents", transitive=True, deptype=("link", "run")
)
if dependents:
raise PackageStillNeededError(spec, dependents)
# Try to get the package for the spec
try:
pkg = spec.package
except spack.repo.UnknownEntityError:
pkg = None
# Pre-uninstall hook runs first.
with spack.store.STORE.prefix_locker.write_lock(spec):
if pkg is not None:
try:
spack.hooks.pre_uninstall(spec)
except Exception as error:
if force:
error_msg = (
"One or more pre_uninstall hooks have failed"
" for {0}, but Spack is continuing with the"
" uninstall".format(str(spec))
)
if isinstance(error, spack.error.SpackError):
error_msg += "\n\nError message: {0}".format(str(error))
tty.warn(error_msg)
# Note that if the uninstall succeeds then we won't be
# seeing this error again and won't have another chance
# to run the hook.
else:
raise
# Uninstalling in Spack only requires removing the prefix.
if not spec.external:
msg = "Deleting package prefix [{0}]"
tty.debug(msg.format(spec.short_spec))
# test if spec is already deprecated, not whether we want to
# deprecate it now
deprecated = bool(spack.store.STORE.db.deprecator(spec))
spack.store.STORE.layout.remove_install_directory(spec, deprecated)
# Delete DB entry
if deprecator:
msg = "deprecating DB entry [{0}] in favor of [{1}]"
tty.debug(msg.format(spec.short_spec, deprecator.short_spec))
spack.store.STORE.db.deprecate(spec, deprecator)
else:
msg = "Deleting DB entry [{0}]"
tty.debug(msg.format(spec.short_spec))
spack.store.STORE.db.remove(spec)
if pkg is not None:
try:
spack.hooks.post_uninstall(spec)
except Exception:
# If there is a failure here, this is our only chance to do
# something about it: at this point the Spec has been removed
# from the DB and prefix, so the post-uninstallation hooks
# will not have another chance to run.
error_msg = (
"One or more post-uninstallation hooks failed for"
" {0}, but the prefix has been removed (if it is not"
" external).".format(str(spec))
)
tb_msg = traceback.format_exc()
error_msg += "\n\nThe error:\n\n{0}".format(tb_msg)
tty.warn(error_msg)
tty.msg("Successfully uninstalled {0}".format(spec.short_spec))
def do_uninstall(self, force=False):
"""Uninstall this package by spec."""
# delegate to instance-less method.
PackageBase.uninstall_by_spec(self.spec, force)
def view(self):
"""Create a view with the prefix of this package as the root.
Extensions added to this view will modify the installation prefix of
this package.
"""
return YamlFilesystemView(self.prefix, spack.store.STORE.layout)
def do_restage(self):
"""Reverts expanded/checked out source to a pristine state."""
self.stage.restage()
def do_clean(self):
"""Removes the package's build stage and source tarball."""
self.stage.destroy()
@classmethod
def format_doc(cls, **kwargs):
"""Wrap doc string at 72 characters and format nicely"""
indent = kwargs.get("indent", 0)
if not cls.__doc__:
return ""
doc = re.sub(r"\s+", " ", cls.__doc__)
lines = textwrap.wrap(doc, 72)
results = io.StringIO()
for line in lines:
results.write((" " * indent) + line + "\n")
return results.getvalue()
@property
def all_urls(self) -> List[str]:
"""A list of all URLs in a package.
Check both class-level and version-specific URLs.
Returns a list of URLs
"""
urls: List[str] = []
if hasattr(self, "url") and self.url:
urls.append(self.url)
# fetch from first entry in urls to save time
if hasattr(self, "urls") and self.urls:
urls.append(self.urls[0])
for args in self.versions.values():
if "url" in args:
urls.append(args["url"])
return urls
def fetch_remote_versions(
self, concurrency: Optional[int] = None
) -> Dict[StandardVersion, str]:
"""Find remote versions of this package.
Uses :attr:`list_url` and any other URLs listed in the package file.
Returns:
a dictionary mapping versions to URLs
"""
if not self.all_urls:
return {}
try:
return spack.url.find_versions_of_archive(
self.all_urls, self.list_url, self.list_depth, concurrency, reference_package=self
)
except spack.util.web.NoNetworkConnectionError as e:
tty.die("Package.fetch_versions couldn't connect to:", e.url, e.message)
@property
def rpath(self):
"""Get the rpath this package links with, as a list of paths."""
deps = self.spec.dependencies(deptype="link")
# on Windows, libraries of runtime interest are typically
# stored in the bin directory
# Do not include Windows system libraries in the rpath interface
# these libraries are handled automatically by VS/VCVARS and adding
# Spack derived system libs into the link path or address space of a program
# can result in conflicting versions, which makes Spack packages less useable
if sys.platform == "win32":
rpaths = [self.prefix.bin]
rpaths.extend(
d.prefix.bin
for d in deps
if os.path.isdir(d.prefix.bin)
and "windows-system" not in getattr(d.package, "tags", [])
)
else:
rpaths = [self.prefix.lib, self.prefix.lib64]
rpaths.extend(d.prefix.lib for d in deps if os.path.isdir(d.prefix.lib))
rpaths.extend(d.prefix.lib64 for d in deps if os.path.isdir(d.prefix.lib64))
return rpaths
@property
def rpath_args(self):
"""
Get the rpath args as a string, with -Wl,-rpath, for each element
"""
return " ".join("-Wl,-rpath,%s" % p for p in self.rpath)
| PackageBase |
python | h5py__h5py | h5py/tests/test_dataset.py | {
"start": 29753,
"end": 30153
} | class ____(BaseDataset):
"""
Feature: Datasets created with LZF compression
"""
def test_szip(self):
""" Create with explicit szip """
dset = self.f.create_dataset(make_name(), (20, 30), compression='szip',
compression_opts=('ec', 16))
@ut.skipIf('shuffle' not in h5py.filters.encode, "SHUFFLE is not installed")
| TestCreateSZIP |
python | facelessuser__pymdown-extensions | pymdownx/details.py | {
"start": 6341,
"end": 6728
} | class ____(Extension):
"""Add Details extension."""
def extendMarkdown(self, md):
"""Add Details to Markdown instance."""
md.registerExtension(self)
md.parser.blockprocessors.register(DetailsProcessor(md.parser), "details", 105)
def makeExtension(*args, **kwargs):
"""Return extension."""
return DetailsExtension(*args, **kwargs)
| DetailsExtension |
python | huggingface__transformers | src/transformers/models/granite_speech/feature_extraction_granite_speech.py | {
"start": 1145,
"end": 7395
} | class ____(FeatureExtractionMixin):
model_input_names = ["input_features"]
def __init__(
self,
sampling_rate: int = 16000,
n_fft: int = 512,
win_length: int = 400,
hop_length: int = 160,
n_mels: int = 80,
projector_window_size: int = 15,
projector_downsample_rate: int = 5,
**kwargs,
):
super().__init__(**kwargs)
self.sampling_rate = sampling_rate
self.melspec_kwargs = {
"sample_rate": sampling_rate,
"n_fft": n_fft,
"win_length": win_length,
"hop_length": hop_length,
"n_mels": n_mels,
}
requires_backends(self, ["torchaudio"])
self.mel_filters = torchaudio.transforms.MelSpectrogram(**self.melspec_kwargs)
self.projector_window_size = projector_window_size
self.projector_downsample_rate = projector_downsample_rate
def __call__(
self,
audios: AudioInput,
device: Optional[str] = "cpu",
) -> BatchFeature:
requires_backends(self, ["torchaudio"])
speech_inputs = {}
batched_audio, audio_lengths = self._get_audios_and_audio_lengths(audios)
speech_inputs["input_features"] = self._extract_mel_spectrograms(
batched_audio,
device=device,
)
audio_embed_sizes = self._get_num_audio_features(audio_lengths)
speech_inputs["audio_embed_sizes"] = audio_embed_sizes
# TODO (@alex-jw-brooks): Currently input_features_mask is not
# a great name, because input_features and input_features_mask
# have different shapes (before/after the projector).
#
# We should align this with other multimodal models, e.g,. llava
# and qwen2audio and refactor this to ensure input_feature_mask
# has the same dimensionality as input_features, or compute it in
# the model based on the audio embedding sizes (since we do not
# have an attention mask for the audio features to infer padding from).
speech_inputs["input_features_mask"] = torch.arange(max(audio_embed_sizes)).view(1, -1) < torch.tensor(
audio_embed_sizes
).view(-1, 1)
return BatchFeature(data=speech_inputs)
def _extract_mel_spectrograms(self, audio: "torch.Tensor", device="cpu"):
"""
Compute the Mel features to be passed to the conformer encoder.
"""
requires_backends(self, ["torchaudio"])
if device is not None:
melspec = self.mel_filters.to(device)
audio = audio.to(device)
else:
melspec = self.mel_filters
bsz = audio.shape[0]
with torch.no_grad():
# Compute mel features
mel = melspec(audio.float())
logmel = mel.transpose(-1, -2).clip_(min=1e-10).log10_()
mx = logmel.amax(dim=(-2, -1), keepdim=True)
logmel = torch.maximum(logmel, mx - 8.0).div_(4).add_(1)
# remove last frame if odd
if logmel.shape[1] % 2 == 1:
logmel = logmel[:, :-1]
# stacking and skipping by 2
audio = logmel.reshape(bsz, -1, 2 * logmel.shape[-1])
return audio
def _get_num_audio_features(self, audio_lengths: Sequence[int]) -> Sequence[int]:
"""
Gets the (variable length) number of features (i.e., projector output) for the sequences
being considered.
Args:
audio_lengths (`Sequence[int]`):
Sequence of one or more raw audio lengths.
"""
hop_length = self.melspec_kwargs["hop_length"]
effective_window_size = self.projector_window_size // self.projector_downsample_rate
projector_lengths = []
for raw_length in audio_lengths:
# mel sequence length computation
mel_length = raw_length // hop_length + 1
# encoder frame takes two mel features
encoder_length = mel_length // 2
nblocks = math.ceil(encoder_length / self.projector_window_size)
# projector output length
projector_length = nblocks * effective_window_size
projector_lengths.append(projector_length)
return projector_lengths
def _get_audios_and_audio_lengths(self, audios: AudioInput) -> Sequence["torch.Tensor", Sequence[int]]:
"""
Coerces audio inputs to torch tensors and extracts audio lengths prior to stacking.
Args:
audios (`AudioInput`):
Audio sequence, numpy array, or torch tensor.
"""
requires_backends(self, ["torch"])
# Coerce to PyTorch tensors if we have numpy arrays, since
# currently we have a dependency on torch/torchaudio anyway
if isinstance(audios, np.ndarray):
audios = torch.from_numpy(audios)
elif isinstance(audios, Sequence) and isinstance(audios[0], np.ndarray):
audios = [torch.from_numpy(arr) for arr in audios]
if isinstance(audios, torch.Tensor):
if audios.ndim == 1:
audios = audios.unsqueeze(0)
if not torch.is_floating_point(audios):
raise ValueError("Invalid audio provided. Audio should be a floating point between 0 and 1")
if audios.shape[0] > 1:
logger.warning("Audio samples are already collated; assuming they all have the same length")
lengths = [audios.shape[-1]] * audios.shape[0]
return audios, lengths
elif isinstance(audios, Sequence) and isinstance(audios[0], torch.Tensor):
if not torch.is_floating_point(audios[0]):
raise ValueError("Invalid audio provided. Audio should be a floating point between 0 and 1")
lengths = [audio.shape[-1] for audio in audios]
audios = [audio.squeeze(0) for audio in audios]
audios = torch.nn.utils.rnn.pad_sequence(audios, batch_first=True, padding_value=0.0)
return audios, lengths
raise TypeError("Invalid audio provided. Audio should be a one or more torch tensors or numpy arrays")
__all__ = ["GraniteSpeechFeatureExtractor"]
| GraniteSpeechFeatureExtractor |
python | PrefectHQ__prefect | tests/server/orchestration/test_rules.py | {
"start": 69639,
"end": 81362
} | class ____:
@pytest.mark.parametrize(
"intended_transition",
list(product([*states.StateType], [*states.StateType])),
ids=transition_names,
)
async def test_null_rejects_fizzle_all_prior_rules(
self, session, initialize_orchestration, intended_transition, run_type
):
side_effects = 0
minimal_before_hook = MagicMock()
first_after_hook = MagicMock()
null_rejection_before_hook = MagicMock()
minimal_after_hook = MagicMock()
null_rejection_after_hook = MagicMock()
minimal_cleanup_hook = MagicMock()
null_rejection_cleanup = MagicMock()
class MinimalRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
nonlocal side_effects
side_effects += 1
minimal_before_hook()
async def after_transition(self, initial_state, validated_state, context):
nonlocal side_effects
side_effects += 1
first_after_hook()
async def cleanup(self, initial_state, validated_state, context):
nonlocal side_effects
side_effects -= 1
minimal_cleanup_hook()
class NullRejectionRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
null_rejection_before_hook()
await self.reject_transition(None, reason="its okay")
async def after_transition(self, initial_state, validated_state, context):
null_rejection_after_hook()
async def cleanup(self, initial_state, validated_state, context):
null_rejection_cleanup()
ctx = await initialize_orchestration(session, run_type, *intended_transition)
async with contextlib.AsyncExitStack() as stack:
# first enter a minimal rule that fires its pre-transition hook
minimal_rule = MinimalRule(ctx, *intended_transition)
ctx = await stack.enter_async_context(minimal_rule)
assert side_effects == 1
minimal_before_hook.assert_called_once()
minimal_after_hook.assert_not_called()
minimal_cleanup_hook.assert_not_called()
null_rejection_before_hook.assert_not_called()
null_rejection_after_hook.assert_not_called()
null_rejection_cleanup.assert_not_called()
# the null rejection rule rejects the transition
null_rejector = NullRejectionRule(ctx, *intended_transition)
ctx = await stack.enter_async_context(null_rejector)
assert side_effects == 1
minimal_before_hook.assert_called_once()
minimal_after_hook.assert_not_called()
minimal_cleanup_hook.assert_not_called()
null_rejection_before_hook.assert_called_once()
null_rejection_after_hook.assert_not_called()
null_rejection_cleanup.assert_not_called()
await ctx.validate_proposed_state()
assert side_effects == 0
assert await minimal_rule.fizzled() is True
assert await null_rejector.invalid() is False
assert await null_rejector.fizzled() is False
minimal_after_hook.assert_not_called()
minimal_cleanup_hook.assert_called_once()
assert ctx.response_status == schemas.responses.SetStateStatus.REJECT
@pytest.mark.parametrize(
"intended_transition",
list(product([*states.StateType], [*states.StateType])),
ids=transition_names,
)
async def test_null_rejects_abort_all_subsequent_rules(
self, session, initialize_orchestration, intended_transition, run_type
):
side_effects = 0
minimal_before_hook = MagicMock()
null_rejection_before_hook = MagicMock()
minimal_after_hook = MagicMock()
first_after_hook = MagicMock()
null_rejection_after_hook = MagicMock()
minimal_cleanup_hook = MagicMock()
null_rejection_cleanup = MagicMock()
class MinimalRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
nonlocal side_effects
side_effects += 1
minimal_before_hook()
async def after_transition(self, initial_state, validated_state, context):
nonlocal side_effects
side_effects += 1
first_after_hook()
async def cleanup(self, initial_state, validated_state, context):
nonlocal side_effects
side_effects -= 1
minimal_cleanup_hook()
class NullRejectionRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
null_rejection_before_hook()
await self.reject_transition(None, reason="its okay")
async def after_transition(self, initial_state, validated_state, context):
null_rejection_after_hook()
async def cleanup(self, initial_state, validated_state, context):
null_rejection_cleanup()
ctx = await initialize_orchestration(session, run_type, *intended_transition)
async with contextlib.AsyncExitStack() as stack:
# the null rejection rule rejects the transition
null_rejector = NullRejectionRule(ctx, *intended_transition)
ctx = await stack.enter_async_context(null_rejector)
assert side_effects == 0
null_rejection_before_hook.assert_called_once()
null_rejection_after_hook.assert_not_called()
null_rejection_cleanup.assert_not_called()
minimal_before_hook.assert_not_called()
minimal_after_hook.assert_not_called()
minimal_cleanup_hook.assert_not_called()
# first enter a minimal rule that fires its pre-transition hook
minimal_rule = MinimalRule(ctx, *intended_transition)
ctx = await stack.enter_async_context(minimal_rule)
assert side_effects == 0
null_rejection_before_hook.assert_called_once()
null_rejection_after_hook.assert_not_called()
null_rejection_cleanup.assert_not_called()
minimal_before_hook.assert_not_called()
minimal_after_hook.assert_not_called()
minimal_cleanup_hook.assert_not_called()
await ctx.validate_proposed_state()
assert side_effects == 0
assert await minimal_rule.invalid() is True
assert await null_rejector.invalid() is False
assert await null_rejector.fizzled() is False
minimal_after_hook.assert_not_called()
minimal_cleanup_hook.assert_not_called()
assert ctx.response_status == schemas.responses.SetStateStatus.REJECT
@pytest.mark.parametrize(
"proposed_state_type",
list(states.StateType),
)
async def test_cannot_null_reject_runs_with_no_state(
self, session, run_type, proposed_state_type, initialize_orchestration
):
class NullRejectionRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
await self.reject_transition(None, reason="its okay")
initial_state_type = None
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(session, run_type, *intended_transition)
async with contextlib.AsyncExitStack() as stack:
null_rejector = NullRejectionRule(ctx, *intended_transition)
ctx = await stack.enter_async_context(null_rejector)
await ctx.validate_proposed_state()
assert isinstance(ctx.orchestration_error, OrchestrationError)
assert ctx.response_status == SetStateStatus.ABORT
async def test_context_will_not_write_new_state_with_null_reject(
self, session, run_type, initialize_orchestration
):
class NullRejectionRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
await self.reject_transition(None, reason="its okay")
initial_state_type = states.StateType.PENDING
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(session, run_type, *intended_transition)
async with contextlib.AsyncExitStack() as stack:
reject_no_write = NullRejectionRule(ctx, *intended_transition)
ctx = await stack.enter_async_context(reject_no_write)
intial_state = ctx.run.state
await ctx.validate_proposed_state()
assert ctx.proposed_state is None
assert ctx.validated_state == states.State.model_validate(
intial_state, from_attributes=True
)
assert ctx.response_status == schemas.responses.SetStateStatus.REJECT
async def test_rules_that_reject_state_with_null_do_not_fizzle_themselves(
self, session, task_run, run_type, initialize_orchestration
):
before_transition_hook = MagicMock()
after_transition_hook = MagicMock()
cleanup_step = MagicMock()
class NullRejectionRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
# this rule mutates the proposed state type, but won't fizzle itself
# upon exiting
before_transition_hook()
# `BaseOrchestrationRule` provides hooks designed to mutate the proposed state
await self.reject_transition(None, reason="for testing, of course")
async def after_transition(self, initial_state, validated_state, context):
after_transition_hook()
async def cleanup(self, initial_state, validated_state, context):
cleanup_step()
# this rule seems valid because the initial and proposed states match the intended transition
initial_state_type = states.StateType.PENDING
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(session, run_type, *intended_transition)
null_rejector = NullRejectionRule(ctx, *intended_transition)
async with null_rejector as ctx:
pass
assert await null_rejector.invalid() is False
assert await null_rejector.fizzled() is False
# despite the mutation, this rule is valid so before and after hooks will fire
assert before_transition_hook.call_count == 1
assert after_transition_hook.call_count == 1
assert cleanup_step.call_count == 0
| TestNullRejection |
python | python-attrs__attrs | src/attr/validators.py | {
"start": 4993,
"end": 6057
} | class ____:
validator = attrib()
def __call__(self, inst, attr, value):
if value is None:
return
self.validator(inst, attr, value)
def __repr__(self):
return f"<optional validator for {self.validator!r} or None>"
def optional(validator):
"""
A validator that makes an attribute optional. An optional attribute is one
which can be set to `None` in addition to satisfying the requirements of
the sub-validator.
Args:
validator
(typing.Callable | tuple[typing.Callable] | list[typing.Callable]):
A validator (or validators) that is used for non-`None` values.
.. versionadded:: 15.1.0
.. versionchanged:: 17.1.0 *validator* can be a list of validators.
.. versionchanged:: 23.1.0 *validator* can also be a tuple of validators.
"""
if isinstance(validator, (list, tuple)):
return _OptionalValidator(_AndValidator(validator))
return _OptionalValidator(validator)
@attrs(repr=False, slots=True, unsafe_hash=True)
| _OptionalValidator |
python | huggingface__transformers | src/transformers/models/levit/modeling_levit.py | {
"start": 18150,
"end": 18387
} | class ____(PreTrainedModel):
config: LevitConfig
base_model_prefix = "levit"
main_input_name = "pixel_values"
input_modalities = ("image",)
_no_split_modules = ["LevitResidualLayer"]
@auto_docstring
| LevitPreTrainedModel |
python | pytorch__pytorch | torch/distributed/elastic/multiprocessing/errors/error_handler.py | {
"start": 451,
"end": 6675
} | class ____:
"""
Write the provided exception object along with some other metadata about
the error in a structured way in JSON format to an error file specified by the
environment variable: ``TORCHELASTIC_ERROR_FILE``. If this environment
variable is not set, then simply logs the contents of what would have been
written to the error file.
This handler may be subclassed to customize the handling of the error.
Subclasses should override ``initialize()`` and ``record_exception()``.
"""
def _get_error_file_path(self) -> str | None:
"""
Return the error file path.
May return ``None`` to have the structured error be logged only.
"""
return os.environ.get("TORCHELASTIC_ERROR_FILE", None)
def initialize(self) -> None:
"""
Call prior to running code that we wish to capture errors/exceptions.
Typically registers signal/fault handlers. Users can override this
function to add custom initialization/registrations that aid in
propagation/information of errors/signals/exceptions/faults.
"""
try:
faulthandler.enable(all_threads=True)
except Exception as e:
warnings.warn(
f"Unable to enable fault handler. {type(e).__name__}: {e}", stacklevel=2
)
def _write_error_file(self, file_path: str, error_msg: str) -> None:
"""Write error message to the file."""
try:
with open(file_path, "w") as fp:
fp.write(error_msg)
except Exception as e:
warnings.warn(
f"Unable to write error to file. {type(e).__name__}: {e}", stacklevel=2
)
def record_exception(self, e: BaseException) -> None:
"""
Write a structured information about the exception into an error file in JSON format.
If the error file cannot be determined, then logs the content
that would have been written to the error file.
"""
file = self._get_error_file_path()
if file:
data = {
"message": {
"message": f"{type(e).__name__}: {e}",
"extraInfo": {
"py_callstack": traceback.format_exc(),
"timestamp": str(int(time.time())),
},
}
}
with open(file, "w") as fp:
json.dump(data, fp)
def override_error_code_in_rootcause_data(
self,
rootcause_error_file: str,
rootcause_error: dict[str, Any],
error_code: int = 0,
):
"""Modify the rootcause_error read from the file, to correctly set the exit code."""
if "message" not in rootcause_error:
logger.warning(
"child error file (%s) does not have field `message`. \n"
"cannot override error code: %s",
rootcause_error_file,
error_code,
)
elif isinstance(rootcause_error["message"], str):
logger.warning(
"child error file (%s) has a new message format. \n"
"skipping error code override",
rootcause_error_file,
)
else:
rootcause_error["message"]["errorCode"] = error_code
def dump_error_file(self, rootcause_error_file: str, error_code: int = 0):
"""Dump parent error file from child process's root cause error and error code."""
with open(rootcause_error_file) as fp:
rootcause_error = json.load(fp)
# Override error code since the child process cannot capture the error code if it
# is terminated by signals like SIGSEGV.
if error_code:
self.override_error_code_in_rootcause_data(
rootcause_error_file, rootcause_error, error_code
)
logger.debug(
"child error file (%s) contents:\n%s",
rootcause_error_file,
json.dumps(rootcause_error, indent=2),
)
my_error_file = self._get_error_file_path()
if my_error_file:
# Guard against existing error files
# This can happen when the child is created using multiprocessing
# and the same env var (TORCHELASTIC_ERROR_FILE) is used on the
# parent and child to specify the error files (respectively)
# because the env vars on the child is set in the wrapper function
# and by default the child inherits the parent's env vars, if the child
# process receives a signal before the wrapper function kicks in
# and the signal handler writes to the error file, then the child
# will write to the parent's error file. In this case just log the
# original error file contents and overwrite the error file.
self._rm(my_error_file)
self._write_error_file(my_error_file, json.dumps(rootcause_error))
logger.info("dumped error file to parent's %s", my_error_file)
else:
logger.error(
"no error file defined for parent, to copy child error file (%s)",
rootcause_error_file,
)
def _rm(self, my_error_file):
if os.path.isfile(my_error_file):
# Log the contents of the original file.
with open(my_error_file) as fp:
try:
original = json.dumps(json.load(fp), indent=2)
logger.warning(
"%s already exists"
" and will be overwritten."
" Original contents:\n%s",
my_error_file,
original,
)
except json.decoder.JSONDecodeError:
logger.warning(
"%s already exists"
" and will be overwritten."
" Unable to load original contents:\n",
my_error_file,
)
os.remove(my_error_file)
| ErrorHandler |
python | getsentry__sentry | src/sentry/release_health/metrics_sessions_v2.py | {
"start": 3321,
"end": 3657
} | class ____(TypedDict):
series: dict[SessionsQueryFunction, list[SessionsQueryValue]]
totals: dict[SessionsQueryFunction, SessionsQueryValue]
def default_for(field: SessionsQueryFunction) -> SessionsQueryValue:
return 0 if field in ("sum(session)", "count_unique(user)") else None
GroupedData = Mapping[GroupKey, Any]
| Group |
python | pydata__xarray | xarray/tests/test_backends.py | {
"start": 99576,
"end": 100831
} | class ____:
engine: T_NetcdfEngine | None
def test_roundtrip_via_memoryview(self) -> None:
original = create_test_data()
result = original.to_netcdf(engine=self.engine)
roundtrip = load_dataset(result, engine=self.engine)
assert_identical(roundtrip, original)
def test_roundtrip_via_bytes(self) -> None:
original = create_test_data()
result = bytes(original.to_netcdf(engine=self.engine))
roundtrip = load_dataset(result, engine=self.engine)
assert_identical(roundtrip, original)
def test_pickle_open_dataset_from_bytes(self) -> None:
original = Dataset({"foo": ("x", [1, 2, 3])})
netcdf_bytes = bytes(original.to_netcdf(engine=self.engine))
with open_dataset(netcdf_bytes, engine=self.engine) as roundtrip:
with pickle.loads(pickle.dumps(roundtrip)) as unpickled:
assert_identical(unpickled, original)
def test_compute_false(self) -> None:
original = create_test_data()
with pytest.raises(
NotImplementedError,
match=re.escape("to_netcdf() with compute=False is not yet implemented"),
):
original.to_netcdf(engine=self.engine, compute=False)
| InMemoryNetCDF |
python | coleifer__peewee | tests/regressions.py | {
"start": 25475,
"end": 26961
} | class ____(ModelTestCase):
requires = [Product, Sku]
def test_fk_composite_pk_regression(self):
Product.insert_many([
(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'green'),
(3, 'white')]).execute()
Sku.insert_many([
('1-red', 1, 'red'),
('1-blue', 1, 'blue'),
('2-red', 2, 'red'),
('2-green', 2, 'green'),
('3-white', 3, 'white')]).execute()
query = (Product
.select(Product, Sku)
.join(Sku, on=Sku.product)
.where(Product.color == 'red')
.order_by(Product.id, Product.color))
with self.assertQueryCount(1):
rows = [(p.id, p.color, p.sku.upc) for p in query]
self.assertEqual(rows, [
('1', 'red', '1-red'),
('2', 'red', '2-red')])
query = (Sku
.select(Sku, Product)
.join(Product, on=Sku.product)
.where(Product.color != 'red')
.order_by(Sku.upc))
with self.assertQueryCount(1):
rows = [(s.upc, s.product_id, s.color,
s.product.id, s.product.color) for s in query]
self.assertEqual(rows, [
('1-blue', '1', 'blue', '1', 'blue'),
('2-green', '2', 'green', '2', 'green'),
('3-white', '3', 'white', '3', 'white')])
| TestFKCompositePK |
python | Lightning-AI__lightning | src/lightning/pytorch/utilities/exceptions.py | {
"start": 1179,
"end": 1511
} | class ____(Exception):
"""Exception used to exit early while tuning."""
def _augment_message(exception: BaseException, pattern: str, new_message: str) -> None:
exception.args = tuple(
new_message if isinstance(arg, str) and re.match(pattern, arg, re.DOTALL) else arg for arg in exception.args
)
| _TunerExitException |
python | python-attrs__attrs | tests/test_dunders.py | {
"start": 9268,
"end": 12574
} | class ____:
"""
Tests for `_add_repr`.
"""
def test_repr(self, slots):
"""
If `repr` is False, ignore that attribute.
"""
C = make_class(
"C", {"a": attr.ib(repr=False), "b": attr.ib()}, slots=slots
)
assert "C(b=2)" == repr(C(1, 2))
@pytest.mark.parametrize("cls", [ReprC, ReprCSlots])
def test_repr_works(self, cls):
"""
repr returns a sensible value.
"""
assert "C(a=1, b=2)" == repr(cls(1, 2))
def test_custom_repr_works(self):
"""
repr returns a sensible value for attributes with a custom repr
callable.
"""
def custom_repr(value):
return "foo:" + str(value)
@attr.s
class C:
a = attr.ib(repr=custom_repr)
assert "C(a=foo:1)" == repr(C(1))
def test_infinite_recursion(self):
"""
In the presence of a cyclic graph, repr will emit an ellipsis and not
raise an exception.
"""
@attr.s
class Cycle:
value = attr.ib(default=7)
cycle = attr.ib(default=None)
cycle = Cycle()
cycle.cycle = cycle
assert "Cycle(value=7, cycle=...)" == repr(cycle)
def test_infinite_recursion_long_cycle(self):
"""
A cyclic graph can pass through other non-attrs objects, and repr will
still emit an ellipsis and not raise an exception.
"""
@attr.s
class LongCycle:
value = attr.ib(default=14)
cycle = attr.ib(default=None)
cycle = LongCycle()
# Ensure that the reference cycle passes through a non-attrs object.
# This demonstrates the need for a thread-local "global" ID tracker.
cycle.cycle = {"cycle": [cycle]}
assert "LongCycle(value=14, cycle={'cycle': [...]})" == repr(cycle)
def test_underscores(self):
"""
repr does not strip underscores.
"""
class C:
__attrs_attrs__ = [simple_attr("_x")]
C = _add_repr(C)
i = C()
i._x = 42
assert "C(_x=42)" == repr(i)
def test_repr_uninitialized_member(self):
"""
repr signals unset attributes
"""
C = make_class("C", {"a": attr.ib(init=False)})
assert "C(a=NOTHING)" == repr(C())
@given(add_str=booleans(), slots=booleans())
def test_str(self, add_str, slots):
"""
If str is True, it returns the same as repr.
This only makes sense when subclassing a class with an poor __str__
(like Exceptions).
"""
@attr.s(str=add_str, slots=slots)
class Error(Exception):
x = attr.ib()
e = Error(42)
assert (str(e) == repr(e)) is add_str
def test_str_no_repr(self):
"""
Raises a ValueError if repr=False and str=True.
"""
with pytest.raises(ValueError) as e:
simple_class(repr=False, str=True)
assert (
"__str__ can only be generated if a __repr__ exists."
) == e.value.args[0]
# these are for use in TestAddHash.test_cache_hash_serialization
# they need to be out here so they can be un-pickled
@attr.attrs(unsafe_hash=True, cache_hash=False)
| TestAddRepr |
python | aio-libs__aiohttp | aiohttp/client_reqrep.py | {
"start": 2418,
"end": 2536
} | class ____(NamedTuple):
url: URL
method: str
headers: "CIMultiDictProxy[str]"
real_url: URL
| _RequestInfo |
python | getsentry__sentry | src/sentry/workflow_engine/handlers/detector/base.py | {
"start": 1237,
"end": 2643
} | class ____:
issue_title: str
subtitle: str
evidence_data: Mapping[str, Any] = dataclasses.field(default_factory=dict)
evidence_display: Sequence[IssueEvidence] = dataclasses.field(default_factory=list)
type: type[GroupType]
level: str
culprit: str
resource_id: str | None = None
assignee: Actor | None = None
priority: DetectorPriorityLevel | None = None
detection_time: datetime | None = None
def to_issue_occurrence(
self,
*,
occurrence_id: str,
project_id: int,
status: DetectorPriorityLevel,
additional_evidence_data: Mapping[str, Any],
fingerprint: list[str],
) -> IssueOccurrence:
return IssueOccurrence(
id=occurrence_id,
project_id=project_id,
event_id=occurrence_id,
fingerprint=fingerprint,
issue_title=self.issue_title,
subtitle=self.subtitle,
resource_id=self.resource_id,
evidence_data={**self.evidence_data, **additional_evidence_data},
evidence_display=self.evidence_display,
type=self.type,
detection_time=self.detection_time or timezone.now(),
level=self.level,
culprit=self.culprit,
priority=self.priority or status,
assignee=self.assignee,
)
@dataclass(frozen=True)
| DetectorOccurrence |
python | huggingface__transformers | src/transformers/models/video_llama_3/modular_video_llama_3.py | {
"start": 8186,
"end": 9598
} | class ____(VisionRotaryEmbedding):
def forward(self, grid_thw, merge_sizes) -> tuple[torch.Tensor, torch.Tensor]:
pos_ids = []
for (t, h, w), merge_size in zip(grid_thw, merge_sizes):
hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
hpos_ids = hpos_ids.reshape(
h // merge_size,
merge_size,
w // merge_size,
merge_size,
)
hpos_ids = hpos_ids.permute(0, 2, 1, 3)
hpos_ids = hpos_ids.flatten()
wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
wpos_ids = wpos_ids.reshape(
h // merge_size,
merge_size,
w // merge_size,
merge_size,
)
wpos_ids = wpos_ids.permute(0, 2, 1, 3)
wpos_ids = wpos_ids.flatten()
pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
pos_ids = torch.cat(pos_ids, dim=0)
max_grid_thw = grid_thw[:, 1:].max()
seq = torch.arange(max_grid_thw, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
rotary_pos_emb_full = torch.outer(seq, self.inv_freq)
rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
return (emb.cos(), emb.sin())
| VideoLlama3VisionRotaryEmbedding |
python | huggingface__transformers | src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py | {
"start": 6836,
"end": 44438
} | class ____(TokenizersBackend):
r"""
Construct a "fast" LayoutLMv2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [CLS] token.
sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
The bounding box to use for the special [SEP] token.
pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (`int`, *optional*, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (`bool`, *optional*, defaults to `True`):
Whether or not to only label the first subword, in case word labels are provided.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original LayoutLMv2).
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = None
@staticmethod
def _load_vocab_from_file(vocab_file):
"""Load vocab from a BERT-style vocab file (one token per line)."""
vocab = {}
with open(vocab_file, "r", encoding="utf-8") as reader:
for index, line in enumerate(reader):
token = line.rstrip("\n")
vocab[token] = index
return vocab
def __init__(
self,
vocab=None,
vocab_file=None,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
cls_token_box=[0, 0, 0, 0],
sep_token_box=[1000, 1000, 1000, 1000],
pad_token_box=[0, 0, 0, 0],
pad_token_label=-100,
only_label_first_subword=True,
tokenize_chinese_chars=True,
strip_accents=None,
**kwargs,
):
self.vocab_file = vocab_file
self.do_lower_case = do_lower_case
# Build vocab for WordPiece
if vocab is not None:
if isinstance(vocab, dict):
_vocab = vocab
else:
raise ValueError("vocab must be a dict mapping tokens to ids")
elif vocab_file is not None:
# Load vocab from file (BERT format: one token per line)
_vocab = self._load_vocab_from_file(vocab_file)
else:
# Initialize with at least the special tokens for WordPiece
_vocab = {
str(pad_token): 0,
str(unk_token): 1,
str(cls_token): 2,
str(sep_token): 3,
str(mask_token): 4,
}
# Initialize WordPiece tokenizer
self._tokenizer = Tokenizer(models.WordPiece(vocab=_vocab, unk_token=str(unk_token)))
# Set normalizer
self._tokenizer.normalizer = normalizers.BertNormalizer(
clean_text=True,
handle_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
lowercase=do_lower_case,
)
# Set pre_tokenizer
self._tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
# Set decoder
self._tokenizer.decoder = decoders.WordPiece(prefix="##")
# Set post_processor (will be set after super().__init__ when we have token IDs)
# Temporarily set to None, will be configured after parent init
self._tokenizer.post_processor = None
tokenizer_object = self._tokenizer
# additional properties
self.cls_token_box = cls_token_box
self.sep_token_box = sep_token_box
self.pad_token_box = pad_token_box
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword
super().__init__(
tokenizer_object=tokenizer_object,
do_lower_case=do_lower_case,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
cls_token_box=cls_token_box,
sep_token_box=sep_token_box,
pad_token_box=pad_token_box,
pad_token_label=pad_token_label,
only_label_first_subword=only_label_first_subword,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kwargs,
)
# Now set post_processor with actual token IDs
cls = str(self.cls_token)
sep = str(self.sep_token)
cls_token_id = self.cls_token_id
sep_token_id = self.sep_token_id
self._tokenizer.post_processor = processors.TemplateProcessing(
single=f"{cls}:0 $A:0 {sep}:0",
pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1",
special_tokens=[
(cls, cls_token_id),
(sep, sep_token_id),
],
)
# additional properties
self.cls_token_box = cls_token_box
self.sep_token_box = sep_token_box
self.pad_token_box = pad_token_box
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]],
text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]] = None,
boxes: Optional[Union[list[list[int]], list[list[list[int]]]]] = None,
word_labels: Optional[Union[list[int], list[list[int]]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`List[List[int]]`, `List[List[List[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`List[int]`, `List[List[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
"""
# Input type checking for clearer error
def _is_valid_text_input(t):
if isinstance(t, str):
# Strings are fine
return True
elif isinstance(t, (list, tuple)):
# List are fine as long as they are...
if len(t) == 0:
# ... empty
return True
elif isinstance(t[0], str):
# ... list of strings
return True
elif isinstance(t[0], (list, tuple)):
# ... list with an empty list or with a list of strings
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False
if text_pair is not None:
# in case text + text_pair are provided, text = questions, text_pair = words
if not _is_valid_text_input(text):
raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
if not isinstance(text_pair, (list, tuple)):
raise ValueError(
"Words must be of type `List[str]` (single pretokenized example), "
"or `List[List[str]]` (batch of pretokenized examples)."
)
else:
# in case only text is provided => must be words
if not isinstance(text, (list, tuple)):
raise ValueError(
"Words must be of type `List[str]` (single pretokenized example), "
"or `List[List[str]]` (batch of pretokenized examples)."
)
if text_pair is not None:
is_batched = isinstance(text, (list, tuple))
else:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
words = text if text_pair is None else text_pair
if boxes is None:
raise ValueError("You must provide corresponding bounding boxes")
if is_batched:
if len(words) != len(boxes):
raise ValueError("You must provide words and boxes for an equal amount of examples")
for words_example, boxes_example in zip(words, boxes):
if len(words_example) != len(boxes_example):
raise ValueError("You must provide as many words as there are bounding boxes")
else:
if len(words) != len(boxes):
raise ValueError("You must provide as many words as there are bounding boxes")
if is_batched:
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(
f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
f" {len(text_pair)}."
)
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
is_pair = bool(text_pair is not None)
return self.batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self.encode_plus(
text=text,
text_pair=text_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
list[TextInput],
list[TextInputPair],
list[PreTokenizedInput],
],
is_pair: Optional[bool] = None,
boxes: Optional[list[list[list[int]]]] = None,
word_labels: Optional[Union[list[int], list[list[int]]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> list[str]:
batched_input = [(text, pair)] if pair else [text]
encodings = self._tokenizer.encode_batch(
batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
)
return encodings[0].tokens if encodings else []
@add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[list[list[int]]] = None,
word_labels: Optional[list[int]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._encode_plus(
text=text,
boxes=boxes,
text_pair=text_pair,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
list[TextInput],
list[TextInputPair],
list[PreTokenizedInput],
],
is_pair: Optional[bool] = None,
boxes: Optional[list[list[list[int]]]] = None,
word_labels: Optional[list[list[int]]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> BatchEncoding:
if not isinstance(batch_text_or_text_pairs, list):
raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
# Set the truncation and padding strategy and restore the initial configuration
self.set_truncation_and_padding(
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
)
if is_pair:
batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
encodings = self._tokenizer.encode_batch(
batch_text_or_text_pairs,
add_special_tokens=add_special_tokens,
is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs
)
# Convert encoding to dict
# `Tokens` has type: Tuple[
# List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
# List[EncodingFast]
# ]
# with nested dimensions corresponding to batch, overflows, sequence length
tokens_and_encodings = [
self._convert_encoding(
encoding=encoding,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=True
if word_labels is not None
else return_offsets_mapping, # we use offsets to create the labels
return_length=return_length,
verbose=verbose,
)
for encoding in encodings
]
# Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
# From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
# (we say ~ because the number of overflow varies with the example in the batch)
#
# To match each overflowing sample with the original sample in the batch
# we add an overflow_to_sample_mapping array (see below)
sanitized_tokens = {}
for key in tokens_and_encodings[0][0]:
stack = [e for item, _ in tokens_and_encodings for e in item[key]]
sanitized_tokens[key] = stack
sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
# If returning overflowing tokens, we need to return a mapping
# from the batch idx to the original sample
if return_overflowing_tokens:
overflow_to_sample_mapping = []
for i, (toks, _) in enumerate(tokens_and_encodings):
overflow_to_sample_mapping += [i] * len(toks["input_ids"])
sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
for input_ids in sanitized_tokens["input_ids"]:
self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
# create the token boxes
token_boxes = []
for batch_index in range(len(sanitized_tokens["input_ids"])):
if return_overflowing_tokens:
original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
else:
original_index = batch_index
token_boxes_example = []
for id, sequence_id, word_id in zip(
sanitized_tokens["input_ids"][batch_index],
sanitized_encodings[batch_index].sequence_ids,
sanitized_encodings[batch_index].word_ids,
):
if word_id is not None:
if is_pair and sequence_id == 0:
token_boxes_example.append(self.pad_token_box)
else:
token_boxes_example.append(boxes[original_index][word_id])
else:
if id == self.cls_token_id:
token_boxes_example.append(self.cls_token_box)
elif id == self.sep_token_id:
token_boxes_example.append(self.sep_token_box)
elif id == self.pad_token_id:
token_boxes_example.append(self.pad_token_box)
else:
raise ValueError("Id not recognized")
token_boxes.append(token_boxes_example)
sanitized_tokens["bbox"] = token_boxes
# optionally, create the labels
if word_labels is not None:
labels = []
for batch_index in range(len(sanitized_tokens["input_ids"])):
if return_overflowing_tokens:
original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
else:
original_index = batch_index
labels_example = []
for id, offset, word_id in zip(
sanitized_tokens["input_ids"][batch_index],
sanitized_tokens["offset_mapping"][batch_index],
sanitized_encodings[batch_index].word_ids,
):
if word_id is not None:
if self.only_label_first_subword:
if offset[0] == 0:
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
labels_example.append(word_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
else:
labels_example.append(word_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
labels.append(labels_example)
sanitized_tokens["labels"] = labels
# finally, remove offsets if the user didn't want them
if not return_offsets_mapping:
del sanitized_tokens["offset_mapping"]
return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[list[list[int]]] = None,
word_labels: Optional[list[int]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[bool] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
# make it a batched input
# 2 options:
# 1) only text, in case text must be a list of str
# 2) text + text_pair, in which case text = str and text_pair a list of str
batched_input = [(text, text_pair)] if text_pair else [text]
batched_boxes = [boxes]
batched_word_labels = [word_labels] if word_labels is not None else None
batched_output = self._batch_encode_plus(
batched_input,
is_pair=bool(text_pair is not None),
boxes=batched_boxes,
word_labels=batched_word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
# Return tensor is None, then we can remove the leading batch axis
# Overflowing tokens are returned as a batch of output so we keep them in this case
if return_tensors is None and not return_overflowing_tokens:
batched_output = BatchEncoding(
{
key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
for key, value in batched_output.items()
},
batched_output.encodings,
)
self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
return batched_output
def _pad(
self,
encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
# Load from model defaults
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
# Initialize attention mask if not present.
if return_attention_mask and "attention_mask" not in encoded_inputs:
encoded_inputs["attention_mask"] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
padding_side = padding_side if padding_side is not None else self.padding_side
if padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
)
if "bbox" in encoded_inputs:
encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
if "labels" in encoded_inputs:
encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if "bbox" in encoded_inputs:
encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
if "labels" in encoded_inputs:
encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError("Invalid padding strategy:" + str(padding_side))
return encoded_inputs
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1:
output += token_ids_1 + [self.sep_token_id]
return output
__all__ = ["LayoutLMv2Tokenizer"]
# Backward alias
LayoutLMv2TokenizerFast = LayoutLMv2Tokenizer
| LayoutLMv2Tokenizer |
python | bokeh__bokeh | src/bokeh/models/widgets/inputs.py | {
"start": 2503,
"end": 3258
} | class ____(Widget):
''' Abstract base class for input widgets.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
title = Either(String, Instance(HTML), default="", help="""
Widget's label.
""")
description = Nullable(Either(String, Instance(Tooltip)), default=None, help="""
Either a plain text or a tooltip with a rich HTML description of the function of this widget.
""")
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
# TODO mark this as a one way event from server to client
| InputWidget |
python | realpython__materials | tic-tac-toe-ai-python/source_code_final/tic-tac-toe/frontends/console/players.py | {
"start": 163,
"end": 1005
} | class ____(Player):
def get_move(self, game_state: GameState) -> Move | None:
while not game_state.game_over:
try:
index = grid_to_index(input(f"{self.mark}'s move: ").strip())
except ValueError:
print("Please provide coordinates in the form of A1 or 1A")
else:
try:
return game_state.make_move_to(index)
except InvalidMove:
print("That cell is already occupied.")
return None
def grid_to_index(grid: str) -> int:
if re.match(r"[abcABC][123]", grid):
col, row = grid
elif re.match(r"[123][abcABC]", grid):
row, col = grid
else:
raise ValueError("Invalid grid coordinates")
return 3 * (int(row) - 1) + (ord(col.upper()) - ord("A"))
| ConsolePlayer |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 231331,
"end": 240649
} | class ____(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = support.LOOPBACK_TIMEOUT
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(os_helper.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(os_helper.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
os_helper.unlink(os_helper.TESTFN)
def accept_conn(self):
self.serv.settimeout(support.LONG_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = os_helper.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(os_helper.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(os_helper.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(TimeoutError, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
time.sleep(1)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(os_helper.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(os_helper.TESTFN, encoding="utf-8") as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(os_helper.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
| SendfileUsingSendTest |
python | pallets__jinja | src/jinja2/filters.py | {
"start": 34797,
"end": 54599
} | class ____(t.NamedTuple):
grouper: t.Any
list: list[t.Any]
# Use the regular tuple repr to hide this subclass if users print
# out the value during debugging.
def __repr__(self) -> str:
return tuple.__repr__(self)
def __str__(self) -> str:
return tuple.__str__(self)
@pass_environment
def sync_do_groupby(
environment: "Environment",
value: "t.Iterable[V]",
attribute: str | int,
default: t.Any | None = None,
case_sensitive: bool = False,
) -> "list[_GroupTuple]":
"""Group a sequence of objects by an attribute using Python's
:func:`itertools.groupby`. The attribute can use dot notation for
nested access, like ``"address.city"``. Unlike Python's ``groupby``,
the values are sorted first so only one group is returned for each
unique value.
For example, a list of ``User`` objects with a ``city`` attribute
can be rendered in groups. In this example, ``grouper`` refers to
the ``city`` value of the group.
.. sourcecode:: html+jinja
<ul>{% for city, items in users|groupby("city") %}
<li>{{ city }}
<ul>{% for user in items %}
<li>{{ user.name }}
{% endfor %}</ul>
</li>
{% endfor %}</ul>
``groupby`` yields namedtuples of ``(grouper, list)``, which
can be used instead of the tuple unpacking above. ``grouper`` is the
value of the attribute, and ``list`` is the items with that value.
.. sourcecode:: html+jinja
<ul>{% for group in users|groupby("city") %}
<li>{{ group.grouper }}: {{ group.list|join(", ") }}
{% endfor %}</ul>
You can specify a ``default`` value to use if an object in the list
does not have the given attribute.
.. sourcecode:: jinja
<ul>{% for city, items in users|groupby("city", default="NY") %}
<li>{{ city }}: {{ items|map(attribute="name")|join(", ") }}</li>
{% endfor %}</ul>
Like the :func:`~jinja-filters.sort` filter, sorting and grouping is
case-insensitive by default. The ``key`` for each group will have
the case of the first item in that group of values. For example, if
a list of users has cities ``["CA", "NY", "ca"]``, the "CA" group
will have two values. This can be disabled by passing
``case_sensitive=True``.
.. versionchanged:: 3.1
Added the ``case_sensitive`` parameter. Sorting and grouping is
case-insensitive by default, matching other filters that do
comparisons.
.. versionchanged:: 3.0
Added the ``default`` parameter.
.. versionchanged:: 2.6
The attribute supports dot notation for nested access.
"""
expr = make_attrgetter(
environment,
attribute,
postprocess=ignore_case if not case_sensitive else None,
default=default,
)
out = [
_GroupTuple(key, list(values))
for key, values in groupby(sorted(value, key=expr), expr)
]
if not case_sensitive:
# Return the real key from the first value instead of the lowercase key.
output_expr = make_attrgetter(environment, attribute, default=default)
out = [_GroupTuple(output_expr(values[0]), values) for _, values in out]
return out
@async_variant(sync_do_groupby) # type: ignore
async def do_groupby(
environment: "Environment",
value: "t.AsyncIterable[V] | t.Iterable[V]",
attribute: str | int,
default: t.Any | None = None,
case_sensitive: bool = False,
) -> "list[_GroupTuple]":
expr = make_attrgetter(
environment,
attribute,
postprocess=ignore_case if not case_sensitive else None,
default=default,
)
out = [
_GroupTuple(key, await auto_to_list(values))
for key, values in groupby(sorted(await auto_to_list(value), key=expr), expr)
]
if not case_sensitive:
# Return the real key from the first value instead of the lowercase key.
output_expr = make_attrgetter(environment, attribute, default=default)
out = [_GroupTuple(output_expr(values[0]), values) for _, values in out]
return out
@pass_environment
def sync_do_sum(
environment: "Environment",
iterable: "t.Iterable[V]",
attribute: str | int | None = None,
start: V = 0, # type: ignore
) -> V:
"""Returns the sum of a sequence of numbers plus the value of parameter
'start' (which defaults to 0). When the sequence is empty it returns
start.
It is also possible to sum up only certain attributes:
.. sourcecode:: jinja
Total: {{ items|sum(attribute='price') }}
.. versionchanged:: 2.6
The ``attribute`` parameter was added to allow summing up over
attributes. Also the ``start`` parameter was moved on to the right.
"""
if attribute is not None:
iterable = map(make_attrgetter(environment, attribute), iterable)
return sum(iterable, start) # type: ignore[no-any-return, call-overload]
@async_variant(sync_do_sum) # type: ignore
async def do_sum(
environment: "Environment",
iterable: "t.AsyncIterable[V] | t.Iterable[V]",
attribute: str | int | None = None,
start: V = 0, # type: ignore
) -> V:
rv = start
if attribute is not None:
func = make_attrgetter(environment, attribute)
else:
def func(x: V) -> V:
return x
async for item in auto_aiter(iterable):
rv += func(item)
return rv
def sync_do_list(value: "t.Iterable[V]") -> "list[V]":
"""Convert the value into a list. If it was a string the returned list
will be a list of characters.
"""
return list(value)
@async_variant(sync_do_list) # type: ignore
async def do_list(value: "t.AsyncIterable[V] | t.Iterable[V]") -> "list[V]":
return await auto_to_list(value)
def do_mark_safe(value: str) -> Markup:
"""Mark the value as safe which means that in an environment with automatic
escaping enabled this variable will not be escaped.
"""
return Markup(value)
def do_mark_unsafe(value: str) -> str:
"""Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
return str(value)
@typing.overload
def do_reverse(value: str) -> str: ...
@typing.overload
def do_reverse(value: "t.Iterable[V]") -> "t.Iterable[V]": ...
def do_reverse(value: str | t.Iterable[V]) -> str | t.Iterable[V]:
"""Reverse the object or return an iterator that iterates over it the other
way round.
"""
if isinstance(value, str):
return value[::-1]
try:
return reversed(value) # type: ignore
except TypeError:
try:
rv = list(value)
rv.reverse()
return rv
except TypeError as e:
raise FilterArgumentError("argument must be iterable") from e
@pass_environment
def do_attr(environment: "Environment", obj: t.Any, name: str) -> Undefined | t.Any:
"""Get an attribute of an object. ``foo|attr("bar")`` works like
``foo.bar``, but returns undefined instead of falling back to ``foo["bar"]``
if the attribute doesn't exist.
See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
"""
# Environment.getattr will fall back to obj[name] if obj.name doesn't exist.
# But we want to call env.getattr to get behavior such as sandboxing.
# Determine if the attr exists first, so we know the fallback won't trigger.
try:
# This avoids executing properties/descriptors, but misses __getattr__
# and __getattribute__ dynamic attrs.
getattr_static(obj, name)
except AttributeError:
# This finds dynamic attrs, and we know it's not a descriptor at this point.
if not hasattr(obj, name):
return environment.undefined(obj=obj, name=name)
return environment.getattr(obj, name)
@typing.overload
def sync_do_map(
context: "Context",
value: t.Iterable[t.Any],
name: str,
*args: t.Any,
**kwargs: t.Any,
) -> t.Iterable[t.Any]: ...
@typing.overload
def sync_do_map(
context: "Context",
value: t.Iterable[t.Any],
*,
attribute: str = ...,
default: t.Any | None = None,
) -> t.Iterable[t.Any]: ...
@pass_context
def sync_do_map(
context: "Context", value: t.Iterable[t.Any], *args: t.Any, **kwargs: t.Any
) -> t.Iterable[t.Any]:
"""Applies a filter on a sequence of objects or looks up an attribute.
This is useful when dealing with lists of objects but you are really
only interested in a certain value of it.
The basic usage is mapping on an attribute. Imagine you have a list
of users but you are only interested in a list of usernames:
.. sourcecode:: jinja
Users on this page: {{ users|map(attribute='username')|join(', ') }}
You can specify a ``default`` value to use if an object in the list
does not have the given attribute.
.. sourcecode:: jinja
{{ users|map(attribute="username", default="Anonymous")|join(", ") }}
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
text conversion filter on a sequence:
.. sourcecode:: jinja
Users on this page: {{ titles|map('lower')|join(', ') }}
Similar to a generator comprehension such as:
.. code-block:: python
(u.username for u in users)
(getattr(u, "username", "Anonymous") for u in users)
(do_lower(x) for x in titles)
.. versionchanged:: 2.11.0
Added the ``default`` parameter.
.. versionadded:: 2.7
"""
if value:
func = prepare_map(context, args, kwargs)
for item in value:
yield func(item)
@typing.overload
def do_map(
context: "Context",
value: t.AsyncIterable[t.Any] | t.Iterable[t.Any],
name: str,
*args: t.Any,
**kwargs: t.Any,
) -> t.Iterable[t.Any]: ...
@typing.overload
def do_map(
context: "Context",
value: t.AsyncIterable[t.Any] | t.Iterable[t.Any],
*,
attribute: str = ...,
default: t.Any | None = None,
) -> t.Iterable[t.Any]: ...
@async_variant(sync_do_map) # type: ignore
async def do_map(
context: "Context",
value: t.AsyncIterable[t.Any] | t.Iterable[t.Any],
*args: t.Any,
**kwargs: t.Any,
) -> t.AsyncIterable[t.Any]:
if value:
func = prepare_map(context, args, kwargs)
async for item in auto_aiter(value):
yield await auto_await(func(item))
@pass_context
def sync_do_select(
context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to each object,
and only selecting the objects with the test succeeding.
If no test is specified, each object will be evaluated as a boolean.
Example usage:
.. sourcecode:: jinja
{{ numbers|select("odd") }}
{{ numbers|select("odd") }}
{{ numbers|select("divisibleby", 3) }}
{{ numbers|select("lessthan", 42) }}
{{ strings|select("equalto", "mystring") }}
Similar to a generator comprehension such as:
.. code-block:: python
(n for n in numbers if test_odd(n))
(n for n in numbers if test_divisibleby(n, 3))
.. versionadded:: 2.7
"""
return select_or_reject(context, value, args, kwargs, lambda x: x, False)
@async_variant(sync_do_select) # type: ignore
async def do_select(
context: "Context",
value: "t.AsyncIterable[V] | t.Iterable[V]",
*args: t.Any,
**kwargs: t.Any,
) -> "t.AsyncIterator[V]":
return async_select_or_reject(context, value, args, kwargs, lambda x: x, False)
@pass_context
def sync_do_reject(
context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to each object,
and rejecting the objects with the test succeeding.
If no test is specified, each object will be evaluated as a boolean.
Example usage:
.. sourcecode:: jinja
{{ numbers|reject("odd") }}
Similar to a generator comprehension such as:
.. code-block:: python
(n for n in numbers if not test_odd(n))
.. versionadded:: 2.7
"""
return select_or_reject(context, value, args, kwargs, lambda x: not x, False)
@async_variant(sync_do_reject) # type: ignore
async def do_reject(
context: "Context",
value: "t.AsyncIterable[V] | t.Iterable[V]",
*args: t.Any,
**kwargs: t.Any,
) -> "t.AsyncIterator[V]":
return async_select_or_reject(context, value, args, kwargs, lambda x: not x, False)
@pass_context
def sync_do_selectattr(
context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to the specified
attribute of each object, and only selecting the objects with the
test succeeding.
If no test is specified, the attribute's value will be evaluated as
a boolean.
Example usage:
.. sourcecode:: jinja
{{ users|selectattr("is_active") }}
{{ users|selectattr("email", "none") }}
Similar to a generator comprehension such as:
.. code-block:: python
(user for user in users if user.is_active)
(user for user in users if test_none(user.email))
.. versionadded:: 2.7
"""
return select_or_reject(context, value, args, kwargs, lambda x: x, True)
@async_variant(sync_do_selectattr) # type: ignore
async def do_selectattr(
context: "Context",
value: "t.AsyncIterable[V] | t.Iterable[V]",
*args: t.Any,
**kwargs: t.Any,
) -> "t.AsyncIterator[V]":
return async_select_or_reject(context, value, args, kwargs, lambda x: x, True)
@pass_context
def sync_do_rejectattr(
context: "Context", value: "t.Iterable[V]", *args: t.Any, **kwargs: t.Any
) -> "t.Iterator[V]":
"""Filters a sequence of objects by applying a test to the specified
attribute of each object, and rejecting the objects with the test
succeeding.
If no test is specified, the attribute's value will be evaluated as
a boolean.
.. sourcecode:: jinja
{{ users|rejectattr("is_active") }}
{{ users|rejectattr("email", "none") }}
Similar to a generator comprehension such as:
.. code-block:: python
(user for user in users if not user.is_active)
(user for user in users if not test_none(user.email))
.. versionadded:: 2.7
"""
return select_or_reject(context, value, args, kwargs, lambda x: not x, True)
@async_variant(sync_do_rejectattr) # type: ignore
async def do_rejectattr(
context: "Context",
value: "t.AsyncIterable[V] | t.Iterable[V]",
*args: t.Any,
**kwargs: t.Any,
) -> "t.AsyncIterator[V]":
return async_select_or_reject(context, value, args, kwargs, lambda x: not x, True)
@pass_eval_context
def do_tojson(
eval_ctx: "EvalContext", value: t.Any, indent: int | None = None
) -> Markup:
"""Serialize an object to a string of JSON, and mark it safe to
render in HTML. This filter is only for use in HTML documents.
The returned string is safe to render in HTML documents and
``<script>`` tags. The exception is in HTML attributes that are
double quoted; either use single quotes or the ``|forceescape``
filter.
:param value: The object to serialize to JSON.
:param indent: The ``indent`` parameter passed to ``dumps``, for
pretty-printing the value.
.. versionadded:: 2.9
"""
policies = eval_ctx.environment.policies
dumps = policies["json.dumps_function"]
kwargs = policies["json.dumps_kwargs"]
if indent is not None:
kwargs = kwargs.copy()
kwargs["indent"] = indent
return htmlsafe_json_dumps(value, dumps=dumps, **kwargs)
def prepare_map(
context: "Context", args: tuple[t.Any, ...], kwargs: dict[str, t.Any]
) -> t.Callable[[t.Any], t.Any]:
if not args and "attribute" in kwargs:
attribute = kwargs.pop("attribute")
default = kwargs.pop("default", None)
if kwargs:
raise FilterArgumentError(
f"Unexpected keyword argument {next(iter(kwargs))!r}"
)
func = make_attrgetter(context.environment, attribute, default=default)
else:
try:
name = args[0]
args = args[1:]
except LookupError:
raise FilterArgumentError("map requires a filter argument") from None
def func(item: t.Any) -> t.Any:
return context.environment.call_filter(
name, item, args, kwargs, context=context
)
return func
def prepare_select_or_reject(
context: "Context",
args: tuple[t.Any, ...],
kwargs: dict[str, t.Any],
modfunc: t.Callable[[t.Any], t.Any],
lookup_attr: bool,
) -> t.Callable[[t.Any], t.Any]:
if lookup_attr:
try:
attr = args[0]
except LookupError:
raise FilterArgumentError("Missing parameter for attribute name") from None
transfunc = make_attrgetter(context.environment, attr)
off = 1
else:
off = 0
def transfunc(x: V) -> V:
return x
try:
name = args[off]
args = args[1 + off :]
def func(item: t.Any) -> t.Any:
return context.environment.call_test(name, item, args, kwargs, context)
except LookupError:
func = bool # type: ignore
return lambda item: modfunc(func(transfunc(item)))
def select_or_reject(
context: "Context",
value: "t.Iterable[V]",
args: tuple[t.Any, ...],
kwargs: dict[str, t.Any],
modfunc: t.Callable[[t.Any], t.Any],
lookup_attr: bool,
) -> "t.Iterator[V]":
if value:
func = prepare_select_or_reject(context, args, kwargs, modfunc, lookup_attr)
for item in value:
if func(item):
yield item
async def async_select_or_reject(
context: "Context",
value: "t.AsyncIterable[V] | t.Iterable[V]",
args: tuple[t.Any, ...],
kwargs: dict[str, t.Any],
modfunc: t.Callable[[t.Any], t.Any],
lookup_attr: bool,
) -> "t.AsyncIterator[V]":
if value:
func = prepare_select_or_reject(context, args, kwargs, modfunc, lookup_attr)
async for item in auto_aiter(value):
if func(item):
yield item
FILTERS = {
"abs": abs,
"attr": do_attr,
"batch": do_batch,
"capitalize": do_capitalize,
"center": do_center,
"count": len,
"d": do_default,
"default": do_default,
"dictsort": do_dictsort,
"e": escape,
"escape": escape,
"filesizeformat": do_filesizeformat,
"first": do_first,
"float": do_float,
"forceescape": do_forceescape,
"format": do_format,
"groupby": do_groupby,
"indent": do_indent,
"int": do_int,
"join": do_join,
"last": do_last,
"length": len,
"list": do_list,
"lower": do_lower,
"items": do_items,
"map": do_map,
"min": do_min,
"max": do_max,
"pprint": do_pprint,
"random": do_random,
"reject": do_reject,
"rejectattr": do_rejectattr,
"replace": do_replace,
"reverse": do_reverse,
"round": do_round,
"safe": do_mark_safe,
"select": do_select,
"selectattr": do_selectattr,
"slice": do_slice,
"sort": do_sort,
"string": soft_str,
"striptags": do_striptags,
"sum": do_sum,
"title": do_title,
"trim": do_trim,
"truncate": do_truncate,
"unique": do_unique,
"upper": do_upper,
"urlencode": do_urlencode,
"urlize": do_urlize,
"wordcount": do_wordcount,
"wordwrap": do_wordwrap,
"xmlattr": do_xmlattr,
"tojson": do_tojson,
}
| _GroupTuple |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataplex.py | {
"start": 4787,
"end": 6047
} | class ____:
@mock.patch(HOOK_STR)
@mock.patch(TASK_STR)
def test_execute(self, task_mock, hook_mock):
op = DataplexCreateTaskOperator(
task_id="create_dataplex_task",
project_id=PROJECT_ID,
region=REGION,
lake_id=LAKE_ID,
body=BODY,
dataplex_task_id=DATAPLEX_TASK_ID,
validate_only=None,
api_version=API_VERSION,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
hook_mock.return_value.wait_for_operation.return_value = None
task_mock.return_value.to_dict.return_value = None
op.execute(context=mock.MagicMock())
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
api_version=API_VERSION,
impersonation_chain=IMPERSONATION_CHAIN,
)
hook_mock.return_value.create_task.assert_called_once_with(
project_id=PROJECT_ID,
region=REGION,
lake_id=LAKE_ID,
body=BODY,
dataplex_task_id=DATAPLEX_TASK_ID,
validate_only=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestDataplexCreateTaskOperator |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/kinesis_analytics.py | {
"start": 1554,
"end": 5199
} | class ____(AwsBaseOperator[KinesisAnalyticsV2Hook]):
"""
Creates an AWS Managed Service for Apache Flink application.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:KinesisAnalyticsV2CreateApplicationOperator`
:param application_name: The name of application. (templated)
:param runtime_environment: The runtime environment for the application. (templated)
:param service_execution_role: The IAM role used by the application to access services. (templated)
:param create_application_kwargs: Create application extra properties. (templated)
:param application_description: A summary description of the application. (templated)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = KinesisAnalyticsV2Hook
ui_color = "#44b5e2"
template_fields: Sequence[str] = aws_template_fields(
"application_name",
"runtime_environment",
"service_execution_role",
"create_application_kwargs",
"application_description",
)
template_fields_renderers: ClassVar[dict] = {
"create_application_kwargs": "json",
}
def __init__(
self,
application_name: str,
runtime_environment: str,
service_execution_role: str,
create_application_kwargs: dict[str, Any] | None = None,
application_description: str = "Managed Service for Apache Flink application created from Airflow",
**kwargs,
):
super().__init__(**kwargs)
self.application_name = application_name
self.runtime_environment = runtime_environment
self.service_execution_role = service_execution_role
self.create_application_kwargs = create_application_kwargs or {}
self.application_description = application_description
def execute(self, context: Context) -> dict[str, str]:
self.log.info("Creating AWS Managed Service for Apache Flink application %s.", self.application_name)
try:
response = self.hook.conn.create_application(
ApplicationName=self.application_name,
ApplicationDescription=self.application_description,
RuntimeEnvironment=self.runtime_environment,
ServiceExecutionRole=self.service_execution_role,
**self.create_application_kwargs,
)
except ClientError as error:
raise AirflowException(
f"AWS Managed Service for Apache Flink application creation failed: {error.response['Error']['Message']}"
)
self.log.info(
"AWS Managed Service for Apache Flink application created successfully %s.",
self.application_name,
)
return {"ApplicationARN": response["ApplicationDetail"]["ApplicationARN"]}
| KinesisAnalyticsV2CreateApplicationOperator |
python | pypa__warehouse | tests/unit/admin/views/test_organizations.py | {
"start": 28848,
"end": 33822
} | class ____:
def test_update_role(self, db_request, monkeypatch):
organization = OrganizationFactory.create(name="pypi")
user = UserFactory.create(username="testuser")
role = OrganizationRoleFactory.create(
organization=organization, user=user, role_name=OrganizationRoleType.Member
)
# Mock record_event
record_event = pretend.call_recorder(lambda **kwargs: None)
monkeypatch.setattr(organization, "record_event", record_event)
db_request.matchdict = {
"organization_id": str(organization.id),
"role_id": str(role.id),
}
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "/admin/organizations/"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST = {"role_name": "Manager"}
result = views.update_organization_role(db_request)
assert isinstance(result, HTTPSeeOther)
assert result.location == "/admin/organizations/"
assert db_request.session.flash.calls == [
pretend.call(
f"Changed '{user.username}' from 'Member' to 'Manager' "
f"in '{organization.name}'",
queue="success",
)
]
db_request.db.refresh(role)
assert role.role_name == OrganizationRoleType.Manager
# Check event was recorded
assert record_event.calls == [
pretend.call(
request=db_request,
tag="admin:organization:role:change",
additional={
"action": f"change {user.username} from Member to Manager",
"user_id": str(user.id),
"old_role_name": "Member",
"new_role_name": "Manager",
},
)
]
def test_update_role_not_found(self, db_request):
organization = OrganizationFactory.create(name="pypi")
db_request.matchdict = {
"organization_id": str(organization.id),
"role_id": "00000000-0000-0000-0000-000000000000",
}
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "/admin/organizations/"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.update_organization_role(db_request)
assert isinstance(result, HTTPSeeOther)
assert db_request.session.flash.calls == [
pretend.call("This role no longer exists", queue="error")
]
def test_update_role_no_role_name(self, db_request):
organization = OrganizationFactory.create(name="pypi")
user = UserFactory.create(username="testuser")
role = OrganizationRoleFactory.create(
organization=organization, user=user, role_name=OrganizationRoleType.Member
)
db_request.matchdict = {
"organization_id": str(organization.id),
"role_id": str(role.id),
}
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "/admin/organizations/"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST = {}
result = views.update_organization_role(db_request)
assert isinstance(result, HTTPSeeOther)
assert db_request.session.flash.calls == [
pretend.call("Provide a role", queue="error")
]
def test_update_role_same_role(self, db_request):
organization = OrganizationFactory.create(name="pypi")
user = UserFactory.create(username="testuser")
role = OrganizationRoleFactory.create(
organization=organization, user=user, role_name=OrganizationRoleType.Member
)
db_request.matchdict = {
"organization_id": str(organization.id),
"role_id": str(role.id),
}
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "/admin/organizations/"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST = {"role_name": "Member"}
result = views.update_organization_role(db_request)
assert isinstance(result, HTTPSeeOther)
assert db_request.session.flash.calls == [
pretend.call("Role is already set to this value", queue="error")
]
def test_update_role_organization_not_found(self, db_request):
db_request.matchdict = {
"organization_id": "00000000-0000-0000-0000-000000000000",
"role_id": "00000000-0000-0000-0000-000000000000",
}
with pytest.raises(HTTPNotFound):
views.update_organization_role(db_request)
| TestUpdateOrganizationRole |
python | doocs__leetcode | solution/1800-1899/1845.Seat Reservation Manager/Solution.py | {
"start": 0,
"end": 386
} | class ____:
def __init__(self, n: int):
self.q = list(range(1, n + 1))
def reserve(self) -> int:
return heappop(self.q)
def unreserve(self, seatNumber: int) -> None:
heappush(self.q, seatNumber)
# Your SeatManager object will be instantiated and called as such:
# obj = SeatManager(n)
# param_1 = obj.reserve()
# obj.unreserve(seatNumber)
| SeatManager |
python | django__django | tests/delete_regress/models.py | {
"start": 397,
"end": 527
} | class ____(models.Model):
award = models.ForeignKey(Award, models.CASCADE)
note = models.CharField(max_length=100)
| AwardNote |
python | PyCQA__pylint | tests/functional/s/slots_checks.py | {
"start": 814,
"end": 963
} | class ____:
"""Multiple __slots__ declared in the class"""
x = 1
if x:
__slots__: str
else:
__slots__ = ("y",)
| EigthGood |
python | pytorch__pytorch | test/dynamo/cpython/3_13/typinganndata/ann_module8.py | {
"start": 67,
"end": 177
} | class ____:
class Inner:
x: int
def NoTypeCheck_function(arg: int) -> int:
...
| NoTypeCheck_Outer |
python | PrefectHQ__prefect | src/prefect/futures.py | {
"start": 953,
"end": 4632
} | class ____(abc.ABC, Generic[R]):
"""
Abstract base class for Prefect futures. A Prefect future is a handle to the
asynchronous execution of a run. It provides methods to wait for the
to complete and to retrieve the result of the run.
"""
def __init__(self, task_run_id: uuid.UUID):
warnings.warn(
"The __init__ method of PrefectFuture is deprecated and will be removed in a future release. "
"If you are subclassing PrefectFuture, please implement the __init__ method in your subclass or "
"subclass PrefectTaskRunFuture instead.",
DeprecationWarning,
)
self._task_run_id = task_run_id
self._final_state: State[R] | None = None
@property
def task_run_id(self) -> uuid.UUID:
"""The ID of the task run associated with this future"""
warnings.warn(
"The task_run_id property of PrefectFuture is deprecated and will be removed in a future release. "
"If you are subclassing PrefectFuture, please implement the task_run_id property in your subclass or "
"subclass PrefectTaskRunFuture instead.",
DeprecationWarning,
)
return self._task_run_id
@property
def state(self) -> State:
"""The current state of the task run associated with this future"""
warnings.warn(
"The state property of PrefectFuture is deprecated and will be removed in a future release. "
"If you are subclassing PrefectFuture, please implement the state property in your subclass or "
"subclass PrefectTaskRunFuture instead.",
DeprecationWarning,
)
if self._final_state:
return self._final_state
client = get_client(sync_client=True)
try:
task_run = client.read_task_run(task_run_id=self.task_run_id)
except ObjectNotFound:
# We'll be optimistic and assume this task will eventually start
# TODO: Consider using task run events to wait for the task to start
return Pending()
return task_run.state or Pending()
@abc.abstractmethod
def wait(self, timeout: float | None = None) -> None:
...
"""
Wait for the task run to complete.
If the task run has already completed, this method will return immediately.
Args:
timeout: The maximum number of seconds to wait for the task run to complete.
If the task run has not completed after the timeout has elapsed, this method will return.
"""
@abc.abstractmethod
def result(
self,
timeout: float | None = None,
raise_on_failure: bool = True,
) -> R:
...
"""
Get the result of the task run associated with this future.
If the task run has not completed, this method will wait for the task run to complete.
Args:
timeout: The maximum number of seconds to wait for the task run to complete.
If the task run has not completed after the timeout has elapsed, this method will return.
raise_on_failure: If `True`, an exception will be raised if the task run fails.
Returns:
The result of the task run.
"""
@abc.abstractmethod
def add_done_callback(self, fn: Callable[["PrefectFuture[R]"], None]) -> None:
"""
Add a callback to be run when the future completes or is cancelled.
Args:
fn: A callable that will be called with this future as its only argument when the future completes or is cancelled.
"""
...
| PrefectFuture |
python | huggingface__transformers | src/transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py | {
"start": 14384,
"end": 15215
} | class ____(GroundingDinoContrastiveEmbedding):
def __init__(self, config):
super().__init__(config)
self.bias = nn.Parameter(torch.tensor(0.0))
def forward(
self,
vision_hidden_state: torch.FloatTensor,
text_hidden_state: torch.FloatTensor,
text_token_mask: torch.BoolTensor,
) -> torch.FloatTensor:
res = vision_hidden_state @ text_hidden_state.transpose(-1, -2)
res = res / math.sqrt(vision_hidden_state.shape[-1])
res = res + self.bias
res.masked_fill_(~text_token_mask[:, None, :], float("-inf"))
# padding to max_text_len
new_res = torch.full((*res.shape[:-1], self.max_text_len), float("-inf"), device=res.device)
new_res[..., : res.shape[-1]] = res
return new_res
| MMGroundingDinoContrastiveEmbedding |
python | tensorflow__tensorflow | tensorflow/lite/tools/convert_image_to_csv_test.py | {
"start": 1108,
"end": 3384
} | class ____(test_util.TensorFlowTestCase):
def testGetImageRaisesMissingFile(self):
image_path = os.path.join(PREFIX_PATH, "jpeg", "testdata", "no_such.jpg")
with self.assertRaises(NotFoundError):
_ = convert_image_to_csv.get_image(64, 96, False, image_path)
def testGetImageSizeIsCorrect(self):
image_path = os.path.join(PREFIX_PATH, "jpeg", "testdata", "small.jpg")
image_data = convert_image_to_csv.get_image(64, 96, False, image_path)
self.assertEqual((96, 64, 3), image_data.shape)
def testGetImageConvertsToGrayscale(self):
image_path = os.path.join(PREFIX_PATH, "jpeg", "testdata", "medium.jpg")
image_data = convert_image_to_csv.get_image(40, 20, True, image_path)
self.assertEqual((20, 40, 1), image_data.shape)
def testGetImageCanLoadPng(self):
image_path = os.path.join(PREFIX_PATH, "png", "testdata", "lena_rgba.png")
image_data = convert_image_to_csv.get_image(10, 10, False, image_path)
self.assertEqual((10, 10, 3), image_data.shape)
def testGetImageConvertsGrayscaleToColor(self):
image_path = os.path.join(PREFIX_PATH, "png", "testdata", "lena_gray.png")
image_data = convert_image_to_csv.get_image(23, 19, False, image_path)
self.assertEqual((19, 23, 3), image_data.shape)
def testGetImageColorValuesInRange(self):
image_path = os.path.join(PREFIX_PATH, "jpeg", "testdata", "small.jpg")
image_data = convert_image_to_csv.get_image(47, 31, False, image_path)
self.assertLessEqual(0, np.min(image_data))
self.assertGreaterEqual(255, np.max(image_data))
def testGetImageGrayscaleValuesInRange(self):
image_path = os.path.join(PREFIX_PATH, "jpeg", "testdata", "small.jpg")
image_data = convert_image_to_csv.get_image(27, 33, True, image_path)
self.assertLessEqual(0, np.min(image_data))
self.assertGreaterEqual(255, np.max(image_data))
def testArrayToIntCsv(self):
csv_string = convert_image_to_csv.array_to_int_csv(
np.array([[1, 2], [3, 4]]))
self.assertEqual("1,2,3,4", csv_string)
def testArrayToIntCsvRounding(self):
csv_string = convert_image_to_csv.array_to_int_csv(
np.array([[1.0, 2.0], [3.0, 4.0]]))
self.assertEqual("1,2,3,4", csv_string)
if __name__ == "__main__":
test.main()
| ConvertImageToCsvTest |
python | FactoryBoy__factory_boy | tests/test_faker.py | {
"start": 599,
"end": 5836
} | class ____(unittest.TestCase):
def setUp(self):
self._real_fakers = factory.Faker._FAKER_REGISTRY
factory.Faker._FAKER_REGISTRY = {}
def tearDown(self):
factory.Faker._FAKER_REGISTRY = self._real_fakers
def _setup_mock_faker(self, locale=None, **definitions):
if locale is None:
locale = factory.Faker._DEFAULT_LOCALE
factory.Faker._FAKER_REGISTRY[locale] = MockFaker(definitions)
def _setup_advanced_mock_faker(self, locale=None, **handlers):
if locale is None:
locale = factory.Faker._DEFAULT_LOCALE
factory.Faker._FAKER_REGISTRY[locale] = AdvancedMockFaker(handlers)
def test_simple_biased(self):
self._setup_mock_faker(name="John Doe")
faker_field = factory.Faker('name')
self.assertEqual("John Doe", faker_field.evaluate(None, None, {'locale': None}))
def test_full_factory(self):
class Profile:
def __init__(self, first_name, last_name, email):
self.first_name = first_name
self.last_name = last_name
self.email = email
class ProfileFactory(factory.Factory):
class Meta:
model = Profile
first_name = factory.Faker('first_name')
last_name = factory.Faker('last_name', locale='fr_FR')
email = factory.Faker('email')
self._setup_mock_faker(first_name="John", last_name="Doe", email="john.doe@example.org")
self._setup_mock_faker(first_name="Jean", last_name="Valjean", email="jvaljean@exemple.fr", locale='fr_FR')
profile = ProfileFactory()
self.assertEqual("John", profile.first_name)
self.assertEqual("Valjean", profile.last_name)
self.assertEqual('john.doe@example.org', profile.email)
def test_override_locale(self):
class Profile:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
class ProfileFactory(factory.Factory):
class Meta:
model = Profile
first_name = factory.Faker('first_name')
last_name = factory.Faker('last_name', locale='fr_FR')
self._setup_mock_faker(first_name="John", last_name="Doe")
self._setup_mock_faker(first_name="Jean", last_name="Valjean", locale='fr_FR')
self._setup_mock_faker(first_name="Johannes", last_name="Brahms", locale='de_DE')
profile = ProfileFactory()
self.assertEqual("John", profile.first_name)
self.assertEqual("Valjean", profile.last_name)
with factory.Faker.override_default_locale('de_DE'):
profile = ProfileFactory()
self.assertEqual("Johannes", profile.first_name)
self.assertEqual("Valjean", profile.last_name)
profile = ProfileFactory()
self.assertEqual("John", profile.first_name)
self.assertEqual("Valjean", profile.last_name)
def test_add_provider(self):
class Face:
def __init__(self, smiley, french_smiley):
self.smiley = smiley
self.french_smiley = french_smiley
class FaceFactory(factory.Factory):
class Meta:
model = Face
smiley = factory.Faker('smiley')
french_smiley = factory.Faker('smiley', locale='fr_FR')
class SmileyProvider(faker.providers.BaseProvider):
def smiley(self):
return ':)'
class FrenchSmileyProvider(faker.providers.BaseProvider):
def smiley(self):
return '(:'
factory.Faker.add_provider(SmileyProvider)
factory.Faker.add_provider(FrenchSmileyProvider, 'fr_FR')
face = FaceFactory()
self.assertEqual(":)", face.smiley)
self.assertEqual("(:", face.french_smiley)
def test_faker_customization(self):
"""Factory declarations in Faker parameters should be accepted."""
Trip = collections.namedtuple('Trip', ['departure', 'transfer', 'arrival'])
may_4th = datetime.date(1977, 5, 4)
may_25th = datetime.date(1977, 5, 25)
october_19th = datetime.date(1977, 10, 19)
class TripFactory(factory.Factory):
class Meta:
model = Trip
departure = may_4th
arrival = may_25th
transfer = factory.Faker(
'date_between_dates',
start_date=factory.SelfAttribute('..departure'),
end_date=factory.SelfAttribute('..arrival'),
)
def fake_select_date(start_date, end_date):
"""Fake date_between_dates."""
# Ensure that dates have been transferred from the factory
# to Faker parameters.
self.assertEqual(start_date, may_4th)
self.assertEqual(end_date, may_25th)
return october_19th
self._setup_advanced_mock_faker(
date_between_dates=fake_select_date,
)
trip = TripFactory()
self.assertEqual(may_4th, trip.departure)
self.assertEqual(october_19th, trip.transfer)
self.assertEqual(may_25th, trip.arrival)
| FakerTests |
python | redis__redis-py | redis/commands/bf/commands.py | {
"start": 13405,
"end": 18862
} | class ____:
def create(self, key, compression=100):
"""
Allocate the memory and initialize the t-digest.
For more information see `TDIGEST.CREATE <https://redis.io/commands/tdigest.create>`_.
""" # noqa
return self.execute_command(TDIGEST_CREATE, key, "COMPRESSION", compression)
def reset(self, key):
"""
Reset the sketch `key` to zero - empty out the sketch and re-initialize it.
For more information see `TDIGEST.RESET <https://redis.io/commands/tdigest.reset>`_.
""" # noqa
return self.execute_command(TDIGEST_RESET, key)
def add(self, key, values):
"""
Adds one or more observations to a t-digest sketch `key`.
For more information see `TDIGEST.ADD <https://redis.io/commands/tdigest.add>`_.
""" # noqa
return self.execute_command(TDIGEST_ADD, key, *values)
def merge(self, destination_key, num_keys, *keys, compression=None, override=False):
"""
Merges all of the values from `keys` to 'destination-key' sketch.
It is mandatory to provide the `num_keys` before passing the input keys and
the other (optional) arguments.
If `destination_key` already exists its values are merged with the input keys.
If you wish to override the destination key contents use the `OVERRIDE` parameter.
For more information see `TDIGEST.MERGE <https://redis.io/commands/tdigest.merge>`_.
""" # noqa
params = [destination_key, num_keys, *keys]
if compression is not None:
params.extend(["COMPRESSION", compression])
if override:
params.append("OVERRIDE")
return self.execute_command(TDIGEST_MERGE, *params)
def min(self, key):
"""
Return minimum value from the sketch `key`. Will return DBL_MAX if the sketch is empty.
For more information see `TDIGEST.MIN <https://redis.io/commands/tdigest.min>`_.
""" # noqa
return self.execute_command(TDIGEST_MIN, key)
def max(self, key):
"""
Return maximum value from the sketch `key`. Will return DBL_MIN if the sketch is empty.
For more information see `TDIGEST.MAX <https://redis.io/commands/tdigest.max>`_.
""" # noqa
return self.execute_command(TDIGEST_MAX, key)
def quantile(self, key, quantile, *quantiles):
"""
Returns estimates of one or more cutoffs such that a specified fraction of the
observations added to this t-digest would be less than or equal to each of the
specified cutoffs. (Multiple quantiles can be returned with one call)
For more information see `TDIGEST.QUANTILE <https://redis.io/commands/tdigest.quantile>`_.
""" # noqa
return self.execute_command(TDIGEST_QUANTILE, key, quantile, *quantiles)
def cdf(self, key, value, *values):
"""
Return double fraction of all points added which are <= value.
For more information see `TDIGEST.CDF <https://redis.io/commands/tdigest.cdf>`_.
""" # noqa
return self.execute_command(TDIGEST_CDF, key, value, *values)
def info(self, key):
"""
Return Compression, Capacity, Merged Nodes, Unmerged Nodes, Merged Weight, Unmerged Weight
and Total Compressions.
For more information see `TDIGEST.INFO <https://redis.io/commands/tdigest.info>`_.
""" # noqa
return self.execute_command(TDIGEST_INFO, key)
def trimmed_mean(self, key, low_cut_quantile, high_cut_quantile):
"""
Return mean value from the sketch, excluding observation values outside
the low and high cutoff quantiles.
For more information see `TDIGEST.TRIMMED_MEAN <https://redis.io/commands/tdigest.trimmed_mean>`_.
""" # noqa
return self.execute_command(
TDIGEST_TRIMMED_MEAN, key, low_cut_quantile, high_cut_quantile
)
def rank(self, key, value, *values):
"""
Retrieve the estimated rank of value (the number of observations in the sketch
that are smaller than value + half the number of observations that are equal to value).
For more information see `TDIGEST.RANK <https://redis.io/commands/tdigest.rank>`_.
""" # noqa
return self.execute_command(TDIGEST_RANK, key, value, *values)
def revrank(self, key, value, *values):
"""
Retrieve the estimated rank of value (the number of observations in the sketch
that are larger than value + half the number of observations that are equal to value).
For more information see `TDIGEST.REVRANK <https://redis.io/commands/tdigest.revrank>`_.
""" # noqa
return self.execute_command(TDIGEST_REVRANK, key, value, *values)
def byrank(self, key, rank, *ranks):
"""
Retrieve an estimation of the value with the given rank.
For more information see `TDIGEST.BY_RANK <https://redis.io/commands/tdigest.by_rank>`_.
""" # noqa
return self.execute_command(TDIGEST_BYRANK, key, rank, *ranks)
def byrevrank(self, key, rank, *ranks):
"""
Retrieve an estimation of the value with the given reverse rank.
For more information see `TDIGEST.BY_REVRANK <https://redis.io/commands/tdigest.by_revrank>`_.
""" # noqa
return self.execute_command(TDIGEST_BYREVRANK, key, rank, *ranks)
| TDigestCommands |
python | PyCQA__pylint | tests/functional/b/bad_staticmethod_argument.py | {
"start": 64,
"end": 321
} | class ____:
def method1(self): # [bad-staticmethod-argument]
pass
method1 = staticmethod(method1)
def method2(cls): # [bad-staticmethod-argument]
pass
method2 = staticmethod(method2)
def __init__(self):
pass
| Abcd |
python | walkccc__LeetCode | solutions/1734. Decode XORed Permutation/1734.py | {
"start": 0,
"end": 955
} | class ____:
def decode(self, encoded: list[int]) -> list[int]:
# Our goal is to find the value of a1, which will allow us to decode a2, a3,
# ..., an. This can be achieved by performing XOR operation between each
# element in `encoded` and a1.
#
# e.g. n = 3, perm = [a1, a2, a3] is a permutation of [1, 2, 3].
# encoded = [a1^a2, a2^a3]
# accumulatedEncoded = [a1^a2, a1^a3]
# a1 = (a1^a2)^(a1^a3)^(a1^a2^a3)
# a2 = a1^(a1^a2)
# a3 = a2^(a2^a3)
n = len(encoded) + 1
nXors = functools.reduce(operator.xor, [i for i in range(1, n + 1)])
# Instead of constructing the array, we can track of the running XOR value
# of `accumulatedEncoded`.
xors = 0 # xors(accumulatedEncoded)
for encode in encoded:
runningXors ^= encode
xors ^= runningXors
ans = [xors ^ nXors]
for encode in encoded:
ans.append(ans[-1] ^ encode)
return ans
| Solution |
python | celery__celery | celery/app/utils.py | {
"start": 8914,
"end": 13171
} | class ____:
"""Old application pickler/unpickler (< 3.1)."""
def __call__(self, cls, *args):
kwargs = self.build_kwargs(*args)
app = self.construct(cls, **kwargs)
self.prepare(app, **kwargs)
return app
def prepare(self, app, **kwargs):
app.conf.update(kwargs['changes'])
def build_kwargs(self, *args):
return self.build_standard_kwargs(*args)
def build_standard_kwargs(self, main, changes, loader, backend, amqp,
events, log, control, accept_magic_kwargs,
config_source=None):
return {'main': main, 'loader': loader, 'backend': backend,
'amqp': amqp, 'changes': changes, 'events': events,
'log': log, 'control': control, 'set_as_current': False,
'config_source': config_source}
def construct(self, cls, **kwargs):
return cls(**kwargs)
def _unpickle_app(cls, pickler, *args):
"""Rebuild app for versions 2.5+."""
return pickler()(cls, *args)
def _unpickle_app_v2(cls, kwargs):
"""Rebuild app for versions 3.1+."""
kwargs['set_as_current'] = False
return cls(**kwargs)
def filter_hidden_settings(conf):
"""Filter sensitive settings."""
def maybe_censor(key, value, mask='*' * 8):
if isinstance(value, Mapping):
return filter_hidden_settings(value)
if isinstance(key, str):
if HIDDEN_SETTINGS.search(key):
return mask
elif 'broker_url' in key.lower():
from kombu import Connection
return Connection(value).as_uri(mask=mask)
elif 'backend' in key.lower():
return maybe_sanitize_url(value, mask=mask)
return value
return {k: maybe_censor(k, v) for k, v in conf.items()}
def bugreport(app):
"""Return a string containing information useful in bug-reports."""
import billiard
import kombu
import celery
try:
conn = app.connection()
driver_v = '{}:{}'.format(conn.transport.driver_name,
conn.transport.driver_version())
transport = conn.transport_cls
except Exception: # pylint: disable=broad-except
transport = driver_v = ''
return BUGREPORT_INFO.format(
system=_platform.system(),
arch=', '.join(x for x in _platform.architecture() if x),
kernel_version=_platform.release(),
py_i=pyimplementation(),
celery_v=celery.VERSION_BANNER,
kombu_v=kombu.__version__,
billiard_v=billiard.__version__,
py_v=_platform.python_version(),
driver_v=driver_v,
transport=transport,
results=maybe_sanitize_url(app.conf.result_backend or 'disabled'),
human_settings=app.conf.humanize(),
loader=qualname(app.loader.__class__),
)
def find_app(app, symbol_by_name=symbol_by_name, imp=import_from_cwd):
"""Find app by name."""
from .base import Celery
try:
sym = symbol_by_name(app, imp=imp)
except AttributeError:
# last part was not an attribute, but a module
sym = imp(app)
if isinstance(sym, ModuleType) and ':' not in app:
try:
found = sym.app
if isinstance(found, ModuleType):
raise AttributeError()
except AttributeError:
try:
found = sym.celery
if isinstance(found, ModuleType):
raise AttributeError(
"attribute 'celery' is the celery module not the instance of celery")
except AttributeError:
if getattr(sym, '__path__', None):
try:
return find_app(
f'{app}.celery',
symbol_by_name=symbol_by_name, imp=imp,
)
except ImportError:
pass
for suspect in vars(sym).values():
if isinstance(suspect, Celery):
return suspect
raise
else:
return found
else:
return found
return sym
| AppPickler |
python | huggingface__transformers | src/transformers/models/efficientloftr/modular_efficientloftr.py | {
"start": 232,
"end": 3295
} | class ____(SuperGlueImageProcessorFast):
def post_process_keypoint_matching(
self,
outputs: "EfficientLoFTRKeypointMatchingOutput",
target_sizes: Union[TensorType, list[tuple]],
threshold: float = 0.0,
) -> list[dict[str, torch.Tensor]]:
"""
Converts the raw output of [`EfficientLoFTRKeypointMatchingOutput`] into lists of keypoints, scores and descriptors
with coordinates absolute to the original image sizes.
Args:
outputs ([`EfficientLoFTRKeypointMatchingOutput`]):
Raw outputs of the model.
target_sizes (`torch.Tensor` or `List[Tuple[Tuple[int, int]]]`, *optional*):
Tensor of shape `(batch_size, 2, 2)` or list of tuples of tuples (`Tuple[int, int]`) containing the
target size `(height, width)` of each image in the batch. This must be the original image size (before
any processing).
threshold (`float`, *optional*, defaults to 0.0):
Threshold to filter out the matches with low scores.
Returns:
`List[Dict]`: A list of dictionaries, each dictionary containing the keypoints in the first and second image
of the pair, the matching scores and the matching indices.
"""
if outputs.matches.shape[0] != len(target_sizes):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the mask")
if not all(len(target_size) == 2 for target_size in target_sizes):
raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
if isinstance(target_sizes, list):
image_pair_sizes = torch.tensor(target_sizes, device=outputs.matches.device)
else:
if target_sizes.shape[1] != 2 or target_sizes.shape[2] != 2:
raise ValueError(
"Each element of target_sizes must contain the size (h, w) of each image of the batch"
)
image_pair_sizes = target_sizes
keypoints = outputs.keypoints.clone()
keypoints = keypoints * image_pair_sizes.flip(-1).reshape(-1, 2, 1, 2)
keypoints = keypoints.to(torch.int32)
results = []
for keypoints_pair, matches, scores in zip(keypoints, outputs.matches, outputs.matching_scores):
# Filter out matches with low scores
valid_matches = torch.logical_and(scores > threshold, matches > -1)
matched_keypoints0 = keypoints_pair[0][valid_matches[0]]
matched_keypoints1 = keypoints_pair[1][valid_matches[1]]
matching_scores = scores[0][valid_matches[0]]
results.append(
{
"keypoints0": matched_keypoints0,
"keypoints1": matched_keypoints1,
"matching_scores": matching_scores,
}
)
return results
__all__ = ["EfficientLoFTRImageProcessorFast"]
| EfficientLoFTRImageProcessorFast |
python | walkccc__LeetCode | solutions/2092. Find All People With Secret/2092.py | {
"start": 0,
"end": 681
} | class ____:
def __init__(self, n: int):
self.id = list(range(n))
self.rank = [0] * n
def unionByRank(self, u: int, v: int) -> None:
i = self._find(u)
j = self._find(v)
if i == j:
return
if self.rank[i] < self.rank[j]:
self.id[i] = j
elif self.rank[i] > self.rank[j]:
self.id[j] = i
else:
self.id[i] = j
self.rank[j] += 1
def connected(self, u: int, v: int) -> bool:
return self._find(self.id[u]) == self._find(self.id[v])
def reset(self, u: int) -> None:
self.id[u] = u
def _find(self, u: int) -> int:
if self.id[u] != u:
self.id[u] = self._find(self.id[u])
return self.id[u]
| UnionFind |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 128881,
"end": 131792
} | class ____(torch.nn.Module):
"""An implementation of the speaker embedding model in a paper.
"ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in
TDNN Based Speaker Verification" (https://huggingface.co/papers/2005.07143).
"""
def __init__(self, config: Qwen2_5OmniDiTConfig):
super().__init__()
if len(config.enc_channels) != len(config.enc_kernel_sizes) or len(config.enc_channels) != len(
config.enc_dilations
):
raise ValueError("enc_channels, enc_kernel_sizes and enc_dilations should have same length")
self.channels = config.enc_channels
self.blocks = nn.ModuleList()
# The initial TDNN layer
self.blocks.append(
TimeDelayNetBlock(
config.mel_dim,
config.enc_channels[0],
config.enc_kernel_sizes[0],
config.enc_dilations[0],
)
)
# SE-Res2Net layers
for i in range(1, len(config.enc_channels) - 1):
self.blocks.append(
SqueezeExcitationRes2NetBlock(
config.enc_channels[i - 1],
config.enc_channels[i],
res2net_scale=config.enc_res2net_scale,
se_channels=config.enc_se_channels,
kernel_size=config.enc_kernel_sizes[i],
dilation=config.enc_dilations[i],
)
)
# Multi-layer feature aggregation
self.mfa = TimeDelayNetBlock(
config.enc_channels[-1],
config.enc_channels[-1],
config.enc_kernel_sizes[-1],
config.enc_dilations[-1],
)
# Attentive Statistical Pooling
self.asp = AttentiveStatisticsPooling(
config.enc_channels[-1],
attention_channels=config.enc_attention_channels,
)
# Final linear transformation
self.fc = nn.Conv1d(
in_channels=config.enc_channels[-1] * 2,
out_channels=config.enc_dim,
kernel_size=1,
padding="same",
padding_mode="reflect",
)
def forward(self, hidden_states):
# Minimize transpose for efficiency
hidden_states = hidden_states.transpose(1, 2)
hidden_states_list = []
for layer in self.blocks:
hidden_states = layer(hidden_states)
hidden_states_list.append(hidden_states)
# Multi-layer feature aggregation
hidden_states = torch.cat(hidden_states_list[1:], dim=1)
hidden_states = self.mfa(hidden_states)
# Attentive Statistical Pooling
hidden_states = self.asp(hidden_states)
# Final linear transformation
hidden_states = self.fc(hidden_states)
hidden_states = hidden_states.squeeze(-1)
return hidden_states
| ECAPA_TimeDelayNet |
python | django__django | django/template/library.py | {
"start": 10068,
"end": 10678
} | class ____(TagHelperNode):
child_nodelists = ()
def __init__(self, func, takes_context, args, kwargs, target_var):
super().__init__(func, takes_context, args, kwargs)
self.target_var = target_var
def render(self, context):
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
output = self.func(*resolved_args, **resolved_kwargs)
if self.target_var is not None:
context[self.target_var] = output
return ""
if context.autoescape:
output = conditional_escape(output)
return output
| SimpleNode |
python | openai__openai-python | src/openai/types/chat/completion_create_params.py | {
"start": 15887,
"end": 16186
} | class ____(TypedDict, total=False):
approximate: Required[WebSearchOptionsUserLocationApproximate]
"""Approximate location parameters for the search."""
type: Required[Literal["approximate"]]
"""The type of location approximation. Always `approximate`."""
| WebSearchOptionsUserLocation |
python | walkccc__LeetCode | solutions/2927. Distribute Candies Among Children III/2927.py | {
"start": 0,
"end": 803
} | class ____:
def distributeCandies(self, n: int, limit: int) -> int:
def ways(n: int) -> int:
"""Returns the number of ways to distribute n candies to 3 children."""
if n < 0:
return 0
# Stars and bars method:
# e.g. '**|**|*' means to distribute 5 candies to 3 children, where
# stars (*) := candies and bars (|) := dividers between children.
return math.comb(n + 2, 2)
limitPlusOne = limit + 1
oneChildExceedsLimit = ways(n - limitPlusOne)
twoChildrenExceedLimit = ways(n - 2 * limitPlusOne)
threeChildrenExceedLimit = ways(n - 3 * limitPlusOne)
# Principle of Inclusion-Exclusion (PIE)
return (ways(n)
- 3 * oneChildExceedsLimit
+ 3 * twoChildrenExceedLimit
- threeChildrenExceedLimit)
| Solution |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/bricks.py | {
"start": 2900,
"end": 7542
} | class ____(queue.Queue):
"""Thread-safe implementation of an ordered set queue.
Disallows adding a duplicate item while maintaining the
order of items in the queue. The implementation leverages
locking already implemented in the base class
redefining only the primitives. Since the internal queue
is not replaced, the order is maintained. The set is used
merely to check for the existence of an item.
Queued items must be immutable and hashable so that they can be used
as dictionary keys. You must implement **only read-only properties** and
the :meth:`Item.__hash__()`, :meth:`Item.__eq__()`, and
:meth:`Item.__ne__()` methods for items to be hashable.
An example implementation follows::
class Item(object):
def __init__(self, a, b):
self._a = a
self._b = b
@property
def a(self):
return self._a
@property
def b(self):
return self._b
def _key(self):
return (self._a, self._b)
def __eq__(self, item):
return self._key() == item._key()
def __ne__(self, item):
return self._key() != item._key()
def __hash__(self):
return hash(self._key())
:author: lalinsky@gmail.com (Lukáš Lalinský)
:url: http://stackoverflow.com/questions/1581895/how-check-if-a-task-is-already-in-python-queue
"""
def _init(self, maxsize):
queue.Queue._init(self, maxsize)
self._set_of_items = set()
def _put(self, item):
if item not in self._set_of_items:
queue.Queue._put(self, item)
self._set_of_items.add(item)
else:
# `put` increments `unfinished_tasks` even if we did not put
# anything into the queue here
self.unfinished_tasks -= 1
def _get(self):
item = queue.Queue._get(self)
self._set_of_items.remove(item)
return item
if sys.version_info >= (2, 6, 0):
KEY, PREV, NEXT = list(range(3))
class OrderedSet(MutableSet):
"""
Implementation based on a doubly-linked link and an internal dictionary.
This design gives :class:`OrderedSet` the same big-Oh running times as
regular sets including O(1) adds, removes, and lookups as well as
O(n) iteration.
.. ADMONITION:: Implementation notes
Runs on Python 2.6 or later (and runs on Python 3.0 or later
without any modifications).
:author: python@rcn.com (Raymond Hettinger)
:url: http://code.activestate.com/recipes/576694/
"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[PREV]
curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, _next = self.map.pop(key)
prev[NEXT] = _next
_next[PREV] = prev
def __iter__(self):
end = self.end
curr = end[NEXT]
while curr is not end:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self):
end = self.end
curr = end[PREV]
while curr is not end:
yield curr[KEY]
curr = curr[PREV]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = next(reversed(self)) if last else next(iter(self))
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
def __del__(self):
self.clear() # remove circular references
| OrderedSetQueue |
python | sqlalchemy__sqlalchemy | test/dialect/oracle/test_reflection.py | {
"start": 12118,
"end": 18054
} | class ____(AssertsCompiledSQL, fixtures.TestBase):
__only_on__ = "oracle"
__sparse_driver_backend__ = True
@testing.fixture
def plain_foo_table(self, metadata, connection):
foo = Table("foo", metadata, Column("id", Integer, primary_key=True))
foo.create(connection)
return foo
def test_oracle_has_no_on_update_cascade(
self, metadata, connection, plain_foo_table
):
bar = Table(
"bar",
metadata,
Column("id", Integer, primary_key=True),
Column(
"foo_id", Integer, ForeignKey("foo.id", onupdate="CASCADE")
),
)
assert_warns(exc.SAWarning, bar.create, connection)
bat = Table(
"bat",
metadata,
Column("id", Integer, primary_key=True),
Column("foo_id", Integer),
ForeignKeyConstraint(["foo_id"], ["foo.id"], onupdate="CASCADE"),
)
assert_warns(exc.SAWarning, bat.create, connection)
def test_reflect_check_include_all(
self, metadata, connection, plain_foo_table
):
insp = inspect(connection)
eq_(insp.get_check_constraints("foo"), [])
eq_(
[
rec["sqltext"]
for rec in insp.get_check_constraints("foo", include_all=True)
],
['"ID" IS NOT NULL'],
)
@testing.fixture
def invisible_fk_fixture(self, metadata, connection):
Table("table_b", metadata, Column("id", Integer, primary_key=True))
Table(
"table_a",
metadata,
Column("id", Integer, primary_key=True),
Column("a_col1", Integer),
)
metadata.create_all(connection)
connection.exec_driver_sql(
"alter table table_a modify (a_col1 invisible)"
)
connection.exec_driver_sql(
"alter table table_a add constraint FK_table_a_a_col1 "
"foreign key(a_col1) references table_b"
)
@testing.fixture
def invisible_index_fixture(self, metadata, connection):
Table(
"table_a",
metadata,
Column("id", Integer, primary_key=True),
Column("a_col1", Integer),
Index("idx_col1", "a_col1"),
)
metadata.create_all(connection)
connection.exec_driver_sql(
"alter table table_a modify (a_col1 invisible)"
)
@testing.fixture
def invisible_uq_fixture(self, metadata, connection):
Table(
"table_a",
metadata,
Column("id", Integer, primary_key=True),
Column("a_col1", Integer),
UniqueConstraint("a_col1", name="uq_col1"),
)
metadata.create_all(connection)
connection.exec_driver_sql(
"alter table table_a modify (a_col1 invisible)"
)
@testing.fixture
def invisible_pk_fixture(self, metadata, connection):
Table(
"table_a",
metadata,
Column("id", Integer, primary_key=True),
Column("a_col1", Integer),
)
Table(
"table_b",
metadata,
Column("comp_id1", Integer, primary_key=True),
Column("comp_id2", Integer, primary_key=True),
Column("a_col1", Integer),
)
metadata.create_all(connection)
connection.exec_driver_sql("alter table table_a modify (id invisible)")
connection.exec_driver_sql(
"alter table table_b modify (comp_id2 invisible)"
)
def test_no_resolve_fks_w_invisible(
self, connection, invisible_fk_fixture
):
metadata_reflect = MetaData()
with expect_warnings(
r"On reflected table table_a, skipping reflection of foreign key "
r"constraint fk_table_a_a_col1; one or more subject columns "
r"within name\(s\) a_col1 are not present in the table"
):
metadata_reflect.reflect(connection)
ta = metadata_reflect.tables["table_a"]
tb = metadata_reflect.tables["table_b"]
self.assert_compile(
select(ta, tb),
"SELECT table_a.id, table_b.id AS id_1 FROM table_a, table_b",
)
def test_no_resolve_idx_w_invisible(
self, connection, invisible_index_fixture
):
metadata_reflect = MetaData()
with expect_warnings(
r"index key 'a_col1' was not located in columns "
r"for table 'table_a'"
):
metadata_reflect.reflect(connection)
ta = metadata_reflect.tables["table_a"]
self.assert_compile(
select(ta),
"SELECT table_a.id FROM table_a",
)
def test_no_resolve_uq_w_invisible(self, connection, invisible_uq_fixture):
metadata_reflect = MetaData()
with expect_warnings(
r"index key 'a_col1' was not located in columns "
r"for table 'table_a'"
):
metadata_reflect.reflect(connection)
ta = metadata_reflect.tables["table_a"]
self.assert_compile(
select(ta),
"SELECT table_a.id FROM table_a",
)
def test_no_resolve_pk_w_invisible(self, connection, invisible_pk_fixture):
metadata_reflect = MetaData()
metadata_reflect.reflect(connection)
# single col pk fully invisible
ta = metadata_reflect.tables["table_a"]
eq_(list(ta.primary_key), [])
self.assert_compile(
select(ta),
"SELECT table_a.a_col1 FROM table_a",
)
# composite pk one col invisible
tb = metadata_reflect.tables["table_b"]
eq_(list(tb.primary_key), [tb.c.comp_id1])
self.assert_compile(
select(tb),
"SELECT table_b.comp_id1, table_b.a_col1 FROM table_b",
)
| ConstraintTest |
python | scipy__scipy | scipy/integrate/_quadpack_py.py | {
"start": 52820,
"end": 54603
} | class ____:
def __init__(self, func, ranges, opts, full_output):
self.abserr = 0
self.func = func
self.ranges = ranges
self.opts = opts
self.maxdepth = len(ranges)
self.full_output = full_output
if self.full_output:
self.out_dict = {'neval': 0}
def integrate(self, *args, **kwargs):
depth = kwargs.pop('depth', 0)
if kwargs:
raise ValueError('unexpected kwargs')
# Get the integration range and options for this depth.
ind = -(depth + 1)
fn_range = self.ranges[ind]
low, high = fn_range(*args)
fn_opt = self.opts[ind]
opt = dict(fn_opt(*args))
if 'points' in opt:
opt['points'] = [x for x in opt['points'] if low <= x <= high]
if depth + 1 == self.maxdepth:
f = self.func
else:
f = partial(self.integrate, depth=depth+1)
quad_r = quad(f, low, high, args=args, full_output=self.full_output,
**opt)
value = quad_r[0]
abserr = quad_r[1]
if self.full_output:
infodict = quad_r[2]
# The 'neval' parameter in full_output returns the total
# number of times the integrand function was evaluated.
# Therefore, only the innermost integration loop counts.
if depth + 1 == self.maxdepth:
self.out_dict['neval'] += infodict['neval']
self.abserr = max(self.abserr, abserr)
if depth > 0:
return value
else:
# Final result of N-D integration with error
if self.full_output:
return value, self.abserr, self.out_dict
else:
return value, self.abserr
| _NQuad |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-mcp/llama_index/tools/mcp/client.py | {
"start": 1815,
"end": 2854
} | class ____(TokenStorage):
"""
Simple in-memory token storage implementation for OAuth authentication.
This is the default storage used when none is provided to with_oauth().
Not suitable for production use across restarts as tokens are only stored
in memory.
"""
def __init__(self):
self._tokens: Optional[OAuthToken] = None
self._client_info: Optional[OAuthClientInformationFull] = None
async def get_tokens(self) -> Optional[OAuthToken]:
"""Get the stored OAuth tokens."""
return self._tokens
async def set_tokens(self, tokens: OAuthToken) -> None:
"""Store OAuth tokens."""
self._tokens = tokens
async def get_client_info(self) -> Optional[OAuthClientInformationFull]:
"""Get the stored client information."""
return self._client_info
async def set_client_info(self, client_info: OAuthClientInformationFull) -> None:
"""Store client information."""
self._client_info = client_info
| DefaultInMemoryTokenStorage |
python | huggingface__transformers | src/transformers/models/markuplm/modeling_markuplm.py | {
"start": 17069,
"end": 18560
} | class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = MarkupLMAttention(config)
self.intermediate = MarkupLMIntermediate(config)
self.output = MarkupLMOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
**kwargs,
) -> tuple[torch.Tensor]:
self_attention_outputs = self.attention(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
**kwargs,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.align.modeling_align.AlignTextEncoder with AlignText->MarkupLM
| MarkupLMLayer |
python | scipy__scipy | benchmarks/benchmarks/stats.py | {
"start": 13305,
"end": 15433
} | class ____(Benchmark):
# though there is a new version of this benchmark that runs all the
# distributions, at the time of writing there was odd behavior on
# the asv for this benchmark, so it is retained.
# https://pv.github.io/scipy-bench/#stats.Distribution.time_distribution
param_names = ['distribution', 'properties']
params = [
['cauchy', 'gamma', 'beta'],
['pdf', 'cdf', 'rvs', 'fit']
]
def setup(self, distribution, properties):
rng = np.random.default_rng(12345678)
self.x = rng.random(100)
def time_distribution(self, distribution, properties):
if distribution == 'gamma':
if properties == 'pdf':
stats.gamma.pdf(self.x, a=5, loc=4, scale=10)
elif properties == 'cdf':
stats.gamma.cdf(self.x, a=5, loc=4, scale=10)
elif properties == 'rvs':
stats.gamma.rvs(size=1000, a=5, loc=4, scale=10)
elif properties == 'fit':
stats.gamma.fit(self.x, loc=4, scale=10)
elif distribution == 'cauchy':
if properties == 'pdf':
stats.cauchy.pdf(self.x, loc=4, scale=10)
elif properties == 'cdf':
stats.cauchy.cdf(self.x, loc=4, scale=10)
elif properties == 'rvs':
stats.cauchy.rvs(size=1000, loc=4, scale=10)
elif properties == 'fit':
stats.cauchy.fit(self.x, loc=4, scale=10)
elif distribution == 'beta':
if properties == 'pdf':
stats.beta.pdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'cdf':
stats.beta.cdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'rvs':
stats.beta.rvs(size=1000, a=5, b=3, loc=4, scale=10)
elif properties == 'fit':
stats.beta.fit(self.x, loc=4, scale=10)
# Retain old benchmark results (remove this if changing the benchmark)
time_distribution.version = (
"fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0"
)
| Distribution |
python | readthedocs__readthedocs.org | readthedocs/core/mixins.py | {
"start": 669,
"end": 724
} | class ____(LoginRequiredMixin):
pass
| PrivateViewMixin |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.