language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | walkccc__LeetCode | solutions/3483. Unique 3-Digit Even Numbers/3483.py | {
"start": 0,
"end": 233
} | class ____:
def totalNumbers(self, digits: list[int]) -> int:
nums = set()
for a, b, c in itertools.permutations(digits, 3):
if a != 0 and c % 2 == 0:
nums.add(a * 100 + b * 10 + c)
return len(nums)
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-gcp/prefect_gcp/utilities.py | {
"start": 4973,
"end": 6568
} | class ____(BaseModel):
"""
Utility class to call GCP `executions` API and
interact with the returned objects.
"""
name: str
namespace: str
metadata: dict
spec: dict
status: dict
log_uri: str
def is_running(self) -> bool:
"""Returns True if Execution is not completed."""
return self.status.get("completionTime") is None
def condition_after_completion(self):
"""Returns Execution condition if Execution has completed."""
for condition in self.status["conditions"]:
if condition["type"] == "Completed":
return condition
def succeeded(self):
"""Whether or not the Execution completed is a successful state."""
completed_condition = self.condition_after_completion()
if completed_condition and completed_condition["status"] == "True":
return True
return False
@classmethod
def get(cls, client: Resource, namespace: str, execution_name: str):
"""
Make a get request to the GCP executions API
and return an Execution instance.
"""
request = client.executions().get(
name=f"namespaces/{namespace}/executions/{execution_name}"
)
response = request.execute()
return cls(
name=response["metadata"]["name"],
namespace=response["metadata"]["namespace"],
metadata=response["metadata"],
spec=response["spec"],
status=response["status"],
log_uri=response["status"]["logUri"],
)
| Execution |
python | agronholm__apscheduler | src/apscheduler/_events.py | {
"start": 1346,
"end": 1563
} | class ____(DataStoreEvent):
"""
Signals that a task was updated in a data store.
:ivar task_id: ID of the task that was updated
"""
task_id: str
@attrs.define(kw_only=True, frozen=True)
| TaskUpdated |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1166979,
"end": 1170359
} | class ____(sgqlc.types.Type, Node):
"""A user account on an Enterprise Server installation."""
__schema__ = github_schema
__field_names__ = (
"created_at",
"emails",
"enterprise_server_installation",
"is_site_admin",
"login",
"profile_name",
"remote_created_at",
"remote_user_id",
"updated_at",
)
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
emails = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseServerUserAccountEmailConnection),
graphql_name="emails",
args=sgqlc.types.ArgDict(
(
(
"order_by",
sgqlc.types.Arg(
EnterpriseServerUserAccountEmailOrder, graphql_name="orderBy", default={"field": "EMAIL", "direction": "ASC"}
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""User emails belonging to this user account.
Arguments:
* `order_by` (`EnterpriseServerUserAccountEmailOrder`): Ordering
options for Enterprise Server user account emails returned from
the connection. (default: `{field: EMAIL, direction: ASC}`)
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
enterprise_server_installation = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseServerInstallation), graphql_name="enterpriseServerInstallation"
)
"""The Enterprise Server installation on which this user account
exists.
"""
is_site_admin = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isSiteAdmin")
"""Whether the user account is a site administrator on the Enterprise
Server installation.
"""
login = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="login")
"""The login of the user account on the Enterprise Server
installation.
"""
profile_name = sgqlc.types.Field(String, graphql_name="profileName")
"""The profile name of the user account on the Enterprise Server
installation.
"""
remote_created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="remoteCreatedAt")
"""The date and time when the user account was created on the
Enterprise Server installation.
"""
remote_user_id = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="remoteUserId")
"""The ID of the user account on the Enterprise Server installation."""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
| EnterpriseServerUserAccount |
python | numba__numba | numba/cuda/tests/cudapy/test_serialize.py | {
"start": 264,
"end": 2321
} | class ____(CUDATestCase):
def check_call(self, callee):
arr = np.array([100])
expected = callee[1, 1](arr)
# serialize and rebuild
foo1 = pickle.loads(pickle.dumps(callee))
del callee
# call rebuild function
got1 = foo1[1, 1](arr)
np.testing.assert_equal(got1, expected)
del got1
# test serialization of previously serialized object
foo2 = pickle.loads(pickle.dumps(foo1))
del foo1
# call rebuild function
got2 = foo2[1, 1](arr)
np.testing.assert_equal(got2, expected)
del got2
# test propagation of thread, block config
foo3 = pickle.loads(pickle.dumps(foo2[5, 8]))
del foo2
self.assertEqual(foo3.griddim, (5, 1, 1))
self.assertEqual(foo3.blockdim, (8, 1, 1))
def test_pickling_jit_typing(self):
@cuda.jit(device=True)
def inner(a):
return a + 1
@cuda.jit('void(intp[:])')
def foo(arr):
arr[0] = inner(arr[0])
self.check_call(foo)
def test_pickling_jit(self):
@cuda.jit(device=True)
def inner(a):
return a + 1
@cuda.jit
def foo(arr):
arr[0] = inner(arr[0])
self.check_call(foo)
def test_pickling_vectorize(self):
@vectorize(['intp(intp)', 'float64(float64)'], target='cuda')
def cuda_vect(x):
return x * 2
# accommodate int representations in np.arange
npty = numpy_support.as_dtype(types.intp)
# get expected result
ary = np.arange(10, dtype=npty)
expected = cuda_vect(ary)
# first pickle
foo1 = pickle.loads(pickle.dumps(cuda_vect))
del cuda_vect
got1 = foo1(ary)
np.testing.assert_equal(expected, got1)
# second pickle
foo2 = pickle.loads(pickle.dumps(foo1))
del foo1
got2 = foo2(ary)
np.testing.assert_equal(expected, got2)
if __name__ == '__main__':
unittest.main()
| TestPickle |
python | google__flatbuffers | tests/optional_scalars/OptionalByte.py | {
"start": 101,
"end": 167
} | class ____(object):
None_ = 0
One = 1
Two = 2
| OptionalByte |
python | encode__django-rest-framework | tests/test_validators.py | {
"start": 6174,
"end": 6332
} | class ____(serializers.ModelSerializer):
class Meta:
model = NullUniquenessTogetherModel
fields = '__all__'
| NullUniquenessTogetherSerializer |
python | kubernetes-client__python | kubernetes/client/models/v1_ephemeral_volume_source.py | {
"start": 383,
"end": 3778
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'volume_claim_template': 'V1PersistentVolumeClaimTemplate'
}
attribute_map = {
'volume_claim_template': 'volumeClaimTemplate'
}
def __init__(self, volume_claim_template=None, local_vars_configuration=None): # noqa: E501
"""V1EphemeralVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._volume_claim_template = None
self.discriminator = None
if volume_claim_template is not None:
self.volume_claim_template = volume_claim_template
@property
def volume_claim_template(self):
"""Gets the volume_claim_template of this V1EphemeralVolumeSource. # noqa: E501
:return: The volume_claim_template of this V1EphemeralVolumeSource. # noqa: E501
:rtype: V1PersistentVolumeClaimTemplate
"""
return self._volume_claim_template
@volume_claim_template.setter
def volume_claim_template(self, volume_claim_template):
"""Sets the volume_claim_template of this V1EphemeralVolumeSource.
:param volume_claim_template: The volume_claim_template of this V1EphemeralVolumeSource. # noqa: E501
:type: V1PersistentVolumeClaimTemplate
"""
self._volume_claim_template = volume_claim_template
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1EphemeralVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1EphemeralVolumeSource):
return True
return self.to_dict() != other.to_dict()
| V1EphemeralVolumeSource |
python | google__jax | jax/experimental/jax2tf/examples/mnist_lib.py | {
"start": 3161,
"end": 6576
} | class ____:
"""An MNIST model written using pure JAX.
There is an option for the model to skip the classifier layer, for
demonstrating reuse of the classifier-less model into a larger model.
See README.md.
"""
name = "mnist_pure_jax"
@staticmethod
def predict(params: Sequence[tuple[Any, Any]], inputs, with_classifier=True):
"""The prediction function.
Args:
params: a list with pairs of weights and biases for each layer.
inputs: the batch of images (B, 28, 28, 1)
with_classifier: whether to include the classifier layer.
Returns:
either the predictions (B, 10) if with_classifier=True, or the
final set of logits of shape (B, 512).
"""
x = inputs.reshape((inputs.shape[0], -1)) # flatten to f32[B, 784]
for w, b in params[:-1]:
x = jnp.dot(x, w) + b
x = jnp.tanh(x)
if not with_classifier:
return x
final_w, final_b = params[-1]
logits = jnp.dot(x, final_w) + final_b
return logits - jax.scipy.special.logsumexp(
logits, axis=1, keepdims=True)
@staticmethod
def loss(params, inputs, labels):
predictions = PureJaxMNIST.predict(params, inputs, with_classifier=True)
return -jnp.mean(jnp.sum(predictions * labels, axis=1))
@staticmethod
def accuracy(predict: Callable, params, dataset):
@jax.jit
def _per_batch(inputs, labels):
target_class = jnp.argmax(labels, axis=1)
predicted_class = jnp.argmax(predict(params, inputs), axis=1)
return jnp.mean(predicted_class == target_class)
batched = [
_per_batch(inputs, labels) for inputs, labels in tfds.as_numpy(dataset)
]
return jnp.mean(jnp.stack(batched))
@staticmethod
def update(params, inputs, labels):
grads = jax.grad(PureJaxMNIST.loss)(params, inputs, labels)
return [(w - step_size * dw, b - step_size * db)
for (w, b), (dw, db) in zip(params, grads)]
@staticmethod
def train(train_ds, test_ds, num_epochs, with_classifier=True):
"""Trains a pure JAX MNIST predictor.
Returns:
a tuple with two elements:
- a predictor function with signature "(Params, ImagesBatch) ->
Predictions".
If `with_classifier=False` then the output of the predictor function
is the last layer of logits.
- the parameters "Params" for the predictor function
"""
rng = jax.random.PRNGKey(0)
params = [(param_scale * jax.random.normal(rng, (m, n)),
param_scale * jax.random.normal(rng, (n,)))
for m, n, in zip(layer_sizes[:-1], layer_sizes[1:])]
for epoch in range(num_epochs):
start_time = time.time()
for inputs, labels in tfds.as_numpy(train_ds):
params = jax.jit(PureJaxMNIST.update)(params, inputs, labels)
epoch_time = time.time() - start_time
train_acc = PureJaxMNIST.accuracy(PureJaxMNIST.predict, params, train_ds)
test_acc = PureJaxMNIST.accuracy(PureJaxMNIST.predict, params, test_ds)
logging.info("%s: Epoch %d in %0.2f sec", PureJaxMNIST.name, epoch,
epoch_time)
logging.info("%s: Training set accuracy %0.2f%%", PureJaxMNIST.name,
100. * train_acc)
logging.info("%s: Test set accuracy %0.2f%%", PureJaxMNIST.name,
100. * test_acc)
return (functools.partial(
PureJaxMNIST.predict, with_classifier=with_classifier), params)
| PureJaxMNIST |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/data_version.py | {
"start": 12216,
"end": 31984
} | class ____:
"""Used to resolve data version information. Avoids redundant database
calls that would otherwise occur. Intended for use within the scope of a
single "request" (e.g. GQL request, RunRequest resolution).
"""
_instance: "DagsterInstance"
_instance_queryer: Optional["CachingInstanceQueryer"]
_asset_graph: Optional["BaseAssetGraph"]
_asset_graph_load_fn: Optional[Callable[[], "BaseAssetGraph"]]
def __init__(
self,
instance: "DagsterInstance",
asset_graph: Union["BaseAssetGraph", Callable[[], "BaseAssetGraph"]],
loading_context: LoadingContext,
instance_queryer: Optional["CachingInstanceQueryer"] = None,
):
from dagster._core.definitions.assets.graph.base_asset_graph import BaseAssetGraph
self._instance = instance
self._instance_queryer = instance_queryer
self._loading_context = loading_context
self._partition_loading_context = PartitionLoadingContext(
temporal_context=TemporalContext(
effective_dt=get_current_datetime(), last_event_id=None
),
dynamic_partitions_store=None,
).updated(
effective_dt=self._instance_queryer.evaluation_time if self._instance_queryer else None,
dynamic_partitions_store=self._instance_queryer or self._instance,
)
if isinstance(asset_graph, BaseAssetGraph):
self._asset_graph = asset_graph
self._asset_graph_load_fn = None
else:
self._asset_graph = None
self._asset_graph_load_fn = asset_graph
@use_partition_loading_context
def get_status(self, key: "AssetKey", partition_key: Optional[str] = None) -> StaleStatus:
from dagster._core.definitions.events import AssetKeyPartitionKey
return self._get_status(key=AssetKeyPartitionKey(key, partition_key))
@use_partition_loading_context
def get_stale_causes(
self, key: "AssetKey", partition_key: Optional[str] = None
) -> Sequence[StaleCause]:
from dagster._core.definitions.events import AssetKeyPartitionKey
return self._get_stale_causes(key=AssetKeyPartitionKey(key, partition_key))
@use_partition_loading_context
def get_stale_root_causes(
self, key: "AssetKey", partition_key: Optional[str] = None
) -> Sequence[StaleCause]:
from dagster._core.definitions.events import AssetKeyPartitionKey
return self._get_stale_root_causes(key=AssetKeyPartitionKey(key, partition_key))
@use_partition_loading_context
def get_current_data_version(
self, key: "AssetKey", partition_key: Optional[str] = None
) -> DataVersion:
from dagster._core.definitions.events import AssetKeyPartitionKey
return self._get_current_data_version(key=AssetKeyPartitionKey(key, partition_key))
@cached_method
def _get_status(self, key: "AssetKeyPartitionKey") -> StaleStatus:
# The status loader does not support querying for the stale status of a
# partitioned asset without specifying a partition, so we return here.
asset = self.asset_graph.get(key.asset_key)
if asset.is_partitioned and not key.partition_key:
return StaleStatus.FRESH
else:
current_version = self._get_current_data_version(key=key)
if current_version == NULL_DATA_VERSION:
return StaleStatus.MISSING
elif asset.is_external:
return StaleStatus.FRESH
else:
causes = self._get_stale_causes(key=key)
return StaleStatus.FRESH if len(causes) == 0 else StaleStatus.STALE
@cached_method
def _get_stale_causes(self, key: "AssetKeyPartitionKey") -> Sequence[StaleCause]:
# Querying for the stale status of a partitioned asset without specifying a partition key
# is strictly speaking undefined, but we return an empty list here (from which FRESH status
# is inferred) for backcompat.
asset = self.asset_graph.get(key.asset_key)
if asset.is_partitioned and not key.partition_key:
return []
elif asset.is_external:
return []
else:
current_version = self._get_current_data_version(key=key)
if current_version == NULL_DATA_VERSION:
return []
else:
return sorted(
self._get_stale_causes_materialized(key=key), key=lambda cause: cause.sort_key
)
def _is_dep_updated(self, provenance: DataProvenance, dep_key: "AssetKeyPartitionKey") -> bool:
dep_asset = self.asset_graph.get(dep_key.asset_key)
if dep_key.partition_key is None:
current_data_version = self._get_current_data_version(key=dep_key)
return self._data_versions_differ(
provenance.input_data_versions[dep_key.asset_key], current_data_version
)
else:
cursor = provenance.input_storage_ids[dep_key.asset_key]
updated_record = self._instance.get_latest_data_version_record(
dep_key.asset_key,
dep_asset.is_external,
dep_key.partition_key,
after_cursor=cursor,
)
if updated_record:
previous_record = self._instance.get_latest_data_version_record(
dep_key.asset_key,
dep_asset.is_external,
dep_key.partition_key,
before_cursor=cursor + 1 if cursor else None,
)
previous_version = (
extract_data_version_from_entry(previous_record.event_log_entry)
if previous_record
else None
)
updated_version = extract_data_version_from_entry(updated_record.event_log_entry)
return self._data_versions_differ(previous_version, updated_version)
else:
return False
def _data_versions_differ(
self, prev_data_version: Optional[DataVersion], curr_data_version: Optional[DataVersion]
) -> bool:
# We special case this to handle a complex niche scenario:
#
# - Data version system uses
# - INITIAL for assets that are assumed to have a value despite no recorded data version
# (external assets are always assumed to have a value)
# - NULL for assets that are assumed to have no value (dagster-managed assets that have
# never been materialized)
# - Suppose we have an asset X and two code locs, A and B. Asset X is defined as a
# materializable asset in A and external asset in B.
# - Because there is no record for the asset, the framework generates a default data version.
# - In A, this will be NULL (because it is a dagster-managed asset with no materialization)
# - In B, this will be INITIAL (because it is an external asset assumed to have a value)
# - This discrepancy is fine until you introduce another asset Y that depends on X in code
# location B. When you materialize Y (in code loc B), it will save X's data version as
# INITIAL in the data provenance
# - Now when we compare the version of X on Y provenance (INITIAL) to the current version in
# this code (NULL), they will look different, though X was never changed.
#
# To mitigate this, we treat the case where the previous data version is
# DEFAULT_DATA_VERSION and current data version is NULL_DATA_VERSION as unchanged.
if prev_data_version == DEFAULT_DATA_VERSION and curr_data_version == NULL_DATA_VERSION:
return False
else:
return prev_data_version != curr_data_version
def _get_stale_causes_materialized(self, key: "AssetKeyPartitionKey") -> Iterator[StaleCause]:
from dagster._core.definitions.events import AssetKeyPartitionKey
code_version = self.asset_graph.get(key.asset_key).code_version
provenance = self._get_current_data_provenance(key=key)
asset_deps = self.asset_graph.get(key.asset_key).parent_keys
# only used if no provenance available
materialization = check.not_none(self._get_latest_data_version_record(key=key))
materialization_time = materialization.timestamp
if provenance:
if code_version and code_version != provenance.code_version:
yield StaleCause(key, StaleCauseCategory.CODE, "has a new code version")
removed_deps = set(provenance.input_data_versions.keys()) - set(asset_deps)
for dep_key in removed_deps:
yield StaleCause(
key,
StaleCauseCategory.DEPENDENCIES,
f"removed dependency on {dep_key.to_user_string()}",
AssetKeyPartitionKey(dep_key, None),
)
# If a partition has greater than or equal to SKIP_PARTITION_DATA_VERSION_DEPENDENCY_THRESHOLD of
# dependencies, it is not included in partition_deps. This is for performance reasons. This
# constraint can be removed when we have thoroughly tested performance for large upstream
# partition counts.
partition_deps = self._get_partition_dependencies(key=key)
for dep_key in sorted(partition_deps):
dep_asset = self.asset_graph.get(dep_key.asset_key)
if provenance:
if not provenance.has_input_asset(dep_key.asset_key):
yield StaleCause(
key,
StaleCauseCategory.DEPENDENCIES,
f"has a new dependency on {dep_key.asset_key.to_user_string()}",
dep_key,
)
elif self._is_dep_updated(provenance, dep_key):
report_data_version = (
dep_asset.code_version is not None
or self._is_current_data_version_user_provided(key=dep_key)
)
yield StaleCause(
key,
StaleCauseCategory.DATA,
(
"has a new dependency data version"
if report_data_version
else "has a new dependency materialization"
),
dep_key,
[
StaleCause(
dep_key,
StaleCauseCategory.DATA,
(
# Assets with no defined code version will get a new data version on
# every materialization, so we just report new materialization for
# this case since the user likely hasn't engaged with data versions.
"has a new data version"
if report_data_version
else "has a new materialization"
),
)
],
)
# If no provenance and dep is a materializable asset, then use materialization
# timestamps instead of versions this should be removable eventually since
# provenance is on all newer materializations. If dep is a source, then we'll never
# provide a stale reason here.
elif not dep_asset.is_external:
dep_materialization = self._get_latest_data_version_record(key=dep_key)
if dep_materialization is None:
# The input must be new if it has no materialization
yield StaleCause(key, StaleCauseCategory.DATA, "has a new input", dep_key)
elif dep_materialization.timestamp > materialization_time:
yield StaleCause(
key,
StaleCauseCategory.DATA,
"has a new dependency materialization",
dep_key,
[
StaleCause(
dep_key,
StaleCauseCategory.DATA,
"has a new materialization",
)
],
)
@cached_method
def _get_stale_root_causes(self, key: "AssetKeyPartitionKey") -> Sequence[StaleCause]:
candidates = self._get_stale_causes(key=key)
visited = set()
root_causes = []
while candidates:
next_candidates: list[StaleCause] = []
for cause in candidates:
if cause.dedupe_key not in visited:
if cause.children is None:
root_causes.append(cause)
else:
next_candidates.extend(cause.children)
visited.add(cause.dedupe_key)
candidates = next_candidates
return root_causes
@property
def asset_graph(self) -> "BaseAssetGraph":
if self._asset_graph is None:
self._asset_graph = check.not_none(self._asset_graph_load_fn)()
return self._asset_graph
# This is lazily constructed because it depends on the asset graph, which needs to be lazily
# constructed for GQL performance reasons.
@property
def instance_queryer(self) -> "CachingInstanceQueryer":
from dagster._utils.caching_instance_queryer import CachingInstanceQueryer
if self._instance_queryer is None:
self._instance_queryer = CachingInstanceQueryer(
self._instance, self.asset_graph, self._loading_context
)
return self._instance_queryer
@cached_method
def _get_current_data_version(self, *, key: "AssetKeyPartitionKey") -> DataVersion:
# Currently we can only use asset records, which are fetched in one shot, for non-source
# assets. This is because the most recent AssetObservation is not stored on the AssetRecord.
record = self._get_latest_data_version_record(key=key)
if self.asset_graph.get(key.asset_key).is_external and record is None:
return DEFAULT_DATA_VERSION
elif record is None:
return NULL_DATA_VERSION
else:
data_version = extract_data_version_from_entry(record.event_log_entry)
return data_version or DEFAULT_DATA_VERSION
@cached_method
def _is_current_data_version_user_provided(self, *, key: "AssetKeyPartitionKey") -> bool:
if self.asset_graph.get(key.asset_key).is_external:
return True
else:
provenance = self._get_current_data_provenance(key=key)
return provenance is not None and provenance.is_user_provided
@cached_method
def _get_current_data_provenance(
self, *, key: "AssetKeyPartitionKey"
) -> Optional[DataProvenance]:
record = self._get_latest_data_version_record(key=key)
if record is None:
return None
else:
return extract_data_provenance_from_entry(record.event_log_entry)
# Volatility means that an asset is assumed to be constantly changing. We assume that observable
# source assets are non-volatile, since the primary purpose of the observation function is to
# determine if a source asset has changed. We assume that regular assets are volatile if they
# are at the root of the graph (have no dependencies) or are downstream of a volatile asset.
@cached_method
def _is_volatile(self, *, key: "AssetKey") -> bool:
asset = self.asset_graph.get(key)
if asset.is_external:
return asset.is_observable
else:
deps = asset.get(key).parent_keys
return len(deps) == 0 or any(self._is_volatile(key=dep_key) for dep_key in deps)
@cached_method
def _get_latest_data_version_event(
self, *, key: "AssetKeyPartitionKey"
) -> Optional[Union["AssetMaterialization", "AssetObservation"]]:
record = self._get_latest_data_version_record(key=key)
if record:
entry = record.event_log_entry
return entry.asset_materialization or entry.asset_observation
else:
return None
@cached_method
def _get_latest_data_version_record(
self, key: "AssetKeyPartitionKey"
) -> Optional["EventLogRecord"]:
return self.instance_queryer.get_latest_materialization_or_observation_record(
asset_partition=key
)
# If a partition has greater than or equal to SKIP_PARTITION_DATA_VERSION_DEPENDENCY_THRESHOLD
# of dependencies, or is downstream of a time window partition with an AllPartitionsMapping,
# it is not included in partition_deps. This is for performance reasons. Besides this, the
# logic here largely replicates `asset_graph.get_parents_partitions`.
#
# Similarly, If an asset is self-dependent and has greater than or equal to
# SKIP_PARTITION_DATA_VERSION_SELF_DEPENDENCY_THRESHOLD partitions, we don't check the
# self-edge for updated data or propagate other stale causes through the edge. That is because
# the current logic will recurse to the first partition, potentially throwing a recursion error.
@cached_method
def _get_partition_dependencies(
self, *, key: "AssetKeyPartitionKey"
) -> Sequence["AssetKeyPartitionKey"]:
from dagster._core.definitions.events import AssetKeyPartitionKey
from dagster._core.definitions.partitions.definition import TimeWindowPartitionsDefinition
from dagster._core.definitions.partitions.mapping import AllPartitionMapping
asset_deps = self.asset_graph.get(key.asset_key).parent_keys
deps = []
for dep_asset_key in asset_deps:
dep_asset = self.asset_graph.get(dep_asset_key)
if not dep_asset.is_partitioned:
deps.append(AssetKeyPartitionKey(dep_asset_key, None))
elif key.asset_key == dep_asset_key and self._exceeds_self_partition_limit(
key.asset_key
):
continue
elif isinstance(
dep_asset.partitions_def, TimeWindowPartitionsDefinition
) and isinstance(
self.asset_graph.get_partition_mapping(key.asset_key, dep_asset_key),
AllPartitionMapping,
):
continue
else:
upstream_partition_keys = list(
self.asset_graph.get_parent_partition_keys_for_child(
key.partition_key, dep_asset_key, key.asset_key
).partitions_subset.get_partition_keys()
)
if len(upstream_partition_keys) < SKIP_PARTITION_DATA_VERSION_DEPENDENCY_THRESHOLD:
deps.extend(
[
AssetKeyPartitionKey(dep_asset_key, partition_key)
for partition_key in upstream_partition_keys
]
)
return deps
def _exceeds_self_partition_limit(self, asset_key: "AssetKey") -> bool:
return (
check.not_none(self.asset_graph.get(asset_key).partitions_def).get_num_partitions()
>= SKIP_PARTITION_DATA_VERSION_SELF_DEPENDENCY_THRESHOLD
)
| CachingStaleStatusResolver |
python | google__pytype | pytype_extensions/instrumentation_for_testing_test.py | {
"start": 1666,
"end": 1860
} | class ____:
def __init__(self):
self.state = 3
def Mul100(self, i):
return self.state * i * 102
def ProductionCodePassNoCtor(obj: NoCtor):
return obj.Mul100(2)
| FakeNoCtorSealedAs |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 696867,
"end": 697369
} | class ____(sgqlc.types.Type, Node, Actor, UniformResourceLocatable):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("created_at", "database_id", "updated_at")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
updated_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="updatedAt"
)
| Bot |
python | getsentry__sentry | src/sentry/unmerge.py | {
"start": 619,
"end": 2867
} | class ____(abc.ABC):
"""
A type defining how and by which criteria a subset of events can be
moved out of a group into a new, different group.
"""
@staticmethod
def parse_arguments(fingerprints: Any = None, replacement: Any = None) -> "UnmergeReplacement":
if replacement is not None:
if isinstance(replacement, dict):
replacement = _REPLACEMENT_TYPE_LABELS.get_key(replacement.pop("type"))(
**replacement
)
assert isinstance(replacement, UnmergeReplacement)
return replacement
elif fingerprints is not None:
# TODO(markus): Deprecate once we no longer use `fingerprints` arg
# (need to change group_hashes endpoint first)
return PrimaryHashUnmergeReplacement(fingerprints=fingerprints)
else:
raise TypeError("Either fingerprints or replacement argument is required.")
@abc.abstractmethod
def get_unmerge_key(
self, event: GroupEvent, locked_primary_hashes: Collection[str]
) -> str | None:
"""
The unmerge task iterates through all events of a group. This function
should return which of them should land in the new group.
If the event should be moved, a string should be returned. Events with
the same string are moved into the same issue.
"""
raise NotImplementedError()
@abc.abstractproperty
def primary_hashes_to_lock(self) -> Collection[str]:
raise NotImplementedError()
@abc.abstractmethod
def start_snuba_replacement(
self, project: Project, source_id: int, destination_id: int
) -> EventstreamState:
raise NotImplementedError()
@abc.abstractmethod
def stop_snuba_replacement(self, eventstream_state: EventstreamState) -> None:
raise NotImplementedError()
@abc.abstractmethod
def run_postgres_replacement(
self, project: Project, destination_id: int, locked_primary_hashes: Collection[str]
) -> None:
raise NotImplementedError()
@abc.abstractmethod
def get_activity_args(self) -> Mapping[str, Any]:
raise NotImplementedError()
@dataclass(frozen=True)
| UnmergeReplacement |
python | Lightning-AI__lightning | src/lightning/pytorch/tuner/lr_finder.py | {
"start": 14301,
"end": 18133
} | class ____(Callback):
"""Special callback used by the learning rate finder. This callback logs the learning rate before each batch and
logs the corresponding loss after each batch.
Args:
num_training: number of iterations done by the learning rate finder
early_stop_threshold: threshold for stopping the search. If the
loss at any point is larger than ``early_stop_threshold*best_loss``
then the search is stopped. To disable, set to ``None``.
progress_bar_refresh_rate: rate to refresh the progress bar for
the learning rate finder
beta: smoothing value, the loss being logged is a running average of
loss values logged until now. ``beta`` controls the forget rate i.e.
if ``beta=0`` all past information is ignored.
"""
def __init__(
self,
num_training: int,
early_stop_threshold: Optional[float] = 4.0,
progress_bar_refresh_rate: int = 0,
beta: float = 0.98,
):
self.num_training = num_training
self.early_stop_threshold = early_stop_threshold
self.beta = beta
self.losses: list[float] = []
self.lrs: list[float] = []
self.avg_loss = 0.0
self.best_loss = 0.0
self.progress_bar_refresh_rate = progress_bar_refresh_rate
self.progress_bar = None
@override
def on_train_batch_start(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", batch: Any, batch_idx: int
) -> None:
"""Called before each training batch, logs the lr that will be used."""
if (trainer.fit_loop.batch_idx + 1) % trainer.accumulate_grad_batches != 0:
return
if self.progress_bar_refresh_rate and self.progress_bar is None:
self.progress_bar = tqdm(desc="Finding best initial lr", total=self.num_training)
self.lrs.append(trainer.lr_scheduler_configs[0].scheduler.lr[0]) # type: ignore[union-attr]
@override
def on_train_batch_end(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs: STEP_OUTPUT, batch: Any, batch_idx: int
) -> None:
"""Called when the training batch ends, logs the calculated loss."""
if (trainer.fit_loop.batch_idx + 1) % trainer.accumulate_grad_batches != 0:
return
# _AutomaticOptimization.run turns None STEP_OUTPUT into an empty dict
if not outputs:
# need to add an element, because we also added one element to lrs in on_train_batch_start
# so add nan, because they are not considered when computing the suggestion
self.losses.append(float("nan"))
return
if self.progress_bar:
self.progress_bar.update()
loss_tensor = outputs if isinstance(outputs, torch.Tensor) else outputs["loss"]
assert loss_tensor is not None
current_loss = loss_tensor.item()
current_step = trainer.global_step
# Avg loss (loss with momentum) + smoothing
self.avg_loss = self.beta * self.avg_loss + (1 - self.beta) * current_loss
smoothed_loss = self.avg_loss / (1 - self.beta ** (current_step + 1))
# Check if we diverging
if (
self.early_stop_threshold is not None
and current_step > 1
and smoothed_loss > self.early_stop_threshold * self.best_loss
):
trainer.should_stop = True # stop signal
if self.progress_bar:
self.progress_bar.close()
trainer.should_stop = trainer.strategy.broadcast(trainer.should_stop)
# Save best loss for diverging checking
if smoothed_loss < self.best_loss or current_step == 1:
self.best_loss = smoothed_loss
self.losses.append(smoothed_loss)
| _LRCallback |
python | huggingface__transformers | src/transformers/models/lfm2_vl/modular_lfm2_vl.py | {
"start": 3028,
"end": 9030
} | class ____(LlavaModel):
_checkpoint_conversion_mapping = {}
def __init__(self, config: Lfm2VlConfig):
super().__init__(config)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
spatial_shapes: torch.Tensor,
pixel_attention_mask: torch.Tensor,
**kwargs,
) -> list[torch.Tensor]:
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):
The tensors corresponding to the input images.
spatial_shapes (`torch.Tensor` of shape `(batch_size, 2)`):
The spatial shapes of the input images.
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, height, width)`):
The pixel attention mask of the input images.
Returns:
image_features (`list[torch.Tensor]`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
"""
image_outputs = self.vision_tower(
pixel_values=pixel_values,
spatial_shapes=spatial_shapes,
pixel_attention_mask=pixel_attention_mask,
).last_hidden_state
img_feature_lengths = pixel_attention_mask.sum(dim=1)
image_features = []
for img_idx in range(image_outputs.size(0)):
feature = image_outputs[img_idx]
# unpad the image representation
feature = feature[: img_feature_lengths[img_idx], :].unsqueeze(0)
# reshape to original height and width
feature_org_h, feature_org_w = spatial_shapes[img_idx]
feature = feature.reshape(1, feature_org_h, feature_org_w, -1)
# project the image representation
img_embedding = self.multi_modal_projector(feature)
# flatten here to handle variable length in naflex
img_embedding = img_embedding.reshape(-1, img_embedding.size(-1))
image_features.append(img_embedding)
return image_features
def get_placeholder_mask(
self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
n_image_features = image_features.shape[0]
if inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
return special_image_mask
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
spatial_shapes: Optional[torch.Tensor] = None,
pixel_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Lfm2VlModelOutputWithPast]:
r"""
spatial_shapes (`torch.Tensor` of shape `(batch_size, 2)`, *optional*):
The spatial shapes of the input images.
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, height, width)`, *optional*):
The pixel attention mask of the input images.
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(
pixel_values=pixel_values,
spatial_shapes=spatial_shapes,
pixel_attention_mask=pixel_attention_mask,
)
image_features = torch.cat(image_features, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids=input_ids,
inputs_embeds=inputs_embeds,
image_features=image_features,
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
return Lfm2VlModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
| Lfm2VlModel |
python | django__django | tests/admin_filters/tests.py | {
"start": 2262,
"end": 2409
} | class ____(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
pass
| DecadeListFilterWithNoneReturningLookups |
python | pytorch__pytorch | test/fx/test_fx_param_shape_control_flow.py | {
"start": 751,
"end": 990
} | class ____(MyModuleBase):
def __init__(self, in_channels):
super().__init__()
self.param = torch.nn.Parameter(torch.randn(in_channels, 3))
def no_relu(self):
return self.param.size()[0] < 10
| MyModuleParamSize |
python | pypa__build | src/build/env.py | {
"start": 4332,
"end": 4618
} | class ____(typing.Protocol): # pragma: no cover
python_executable: str
scripts_dir: str
def create(self, path: str) -> None: ...
def install_requirements(self, requirements: Collection[str]) -> None: ...
@property
def display_name(self) -> str: ...
| _EnvBackend |
python | altair-viz__altair | altair/datasets/_reader.py | {
"start": 12629,
"end": 18938
} | class ____(Reader[IntoDataFrameT, IntoFrameT]):
def __repr__(self) -> str:
return f"{super().__repr__()}\ncsv_cache\n {self.csv_cache!r}"
@property
def csv_cache(self) -> CsvCache:
if not hasattr(self, "_csv_cache"):
self._csv_cache = CsvCache()
return self._csv_cache
@property
def _metadata_frame(self) -> nw.LazyFrame[IntoFrameT]:
data = cast("dict[str, Any]", self.csv_cache.rotated)
impl = self._implementation
return nw.maybe_convert_dtypes(nw.from_dict(data, backend=impl)).lazy()
@overload
def reader(
read_fns: Sequence[Read[IntoDataFrameT]],
scan_fns: tuple[()] = ...,
*,
name: str | None = ...,
implementation: nw.Implementation = ...,
) -> Reader[IntoDataFrameT, nw.LazyFrame[IntoDataFrameT]]: ...
@overload
def reader(
read_fns: Sequence[Read[IntoDataFrameT]],
scan_fns: Sequence[Scan[IntoFrameT]],
*,
name: str | None = ...,
implementation: nw.Implementation = ...,
) -> Reader[IntoDataFrameT, IntoFrameT]: ...
def reader(
read_fns: Sequence[Read[IntoDataFrameT]],
scan_fns: Sequence[Scan[IntoFrameT]] = (),
*,
name: str | None = None,
implementation: nw.Implementation = nw.Implementation.UNKNOWN,
) -> (
Reader[IntoDataFrameT, IntoFrameT]
| Reader[IntoDataFrameT, nw.LazyFrame[IntoDataFrameT]]
):
name = name or Counter(el._inferred_package for el in read_fns).most_common(1)[0][0]
if implementation is nw.Implementation.UNKNOWN:
implementation = _into_implementation(Requirement(name))
if scan_fns:
return Reader(read_fns, scan_fns, name, implementation)
if stolen := _steal_eager_parquet(read_fns):
return Reader(read_fns, stolen, name, implementation)
else:
return _NoParquetReader[IntoDataFrameT](read_fns, (), name, implementation)
def infer_backend(
*, priority: Sequence[_Backend] = ("polars", "pandas[pyarrow]", "pandas", "pyarrow")
) -> Reader[Any, Any]:
"""
Return the first available reader in order of `priority`.
Notes
-----
- ``"polars"``: can natively load every dataset (including ``(Geo|Topo)JSON``)
- ``"pandas[pyarrow]"``: can load *most* datasets, guarantees ``.parquet`` support
- ``"pandas"``: supports ``.parquet``, if `fastparquet`_ is installed
- ``"pyarrow"``: least reliable
.. _fastparquet:
https://github.com/dask/fastparquet
"""
it = (_from_backend(name) for name in priority if is_available(_requirements(name)))
if reader := next(it, None):
return reader
raise AltairDatasetsError.from_priority(priority)
@overload
def _from_backend(name: _Polars, /) -> Reader[pl.DataFrame, pl.LazyFrame]: ...
@overload
def _from_backend(name: _PandasAny, /) -> Reader[pd.DataFrame, pd.DataFrame]: ...
@overload
def _from_backend(name: _PyArrow, /) -> Reader[pa.Table, pa.Table]: ...
# FIXME: The order this is defined in makes splitting the module complicated
# - Can't use a classmethod, since some result in a subclass used
def _from_backend(name: _Backend, /) -> Reader[Any, Any]:
"""
Reader initialization dispatcher.
FIXME: Works, but defining these in mixed shape functions seems off.
"""
if not _is_backend(name):
msg = f"Unknown backend {name!r}"
raise TypeError(msg)
implementation = _into_implementation(name)
if name == "polars":
rd, sc = _readimpl.pl_only()
return reader(rd, sc, name=name, implementation=implementation)
elif name == "pandas[pyarrow]":
return reader(_readimpl.pd_pyarrow(), name=name, implementation=implementation)
elif name == "pandas":
return reader(_readimpl.pd_only(), name=name, implementation=implementation)
elif name == "pyarrow":
return reader(_readimpl.pa_any(), name=name, implementation=implementation)
def _is_backend(obj: Any) -> TypeIs[_Backend]:
return obj in {"polars", "pandas", "pandas[pyarrow]", "pyarrow"}
def _is_err(obj: Any) -> TypeIs[type[AltairDatasetsError]]:
return obj is AltairDatasetsError
def _into_constraints(
name: Dataset | LiteralString, suffix: Extension | None, /
) -> Metadata:
"""Transform args into a mapping to column names."""
m: Metadata = {}
if "." in name:
m["file_name"] = name
elif suffix is None:
m["dataset_name"] = name
elif suffix.startswith("."):
m = {"dataset_name": name, "suffix": suffix}
else:
from typing import get_args
from altair.datasets._typing import Extension
msg = (
f"Expected 'suffix' to be one of {get_args(Extension)!r},\n"
f"but got: {suffix!r}"
)
raise TypeError(msg)
return m
def _into_implementation(
backend: _NwSupport | _PandasAny | Requirement, /
) -> nw.Implementation:
primary = _import_guarded(backend)
impl = nw.Implementation.from_backend(primary)
if impl is not nw.Implementation.UNKNOWN:
return impl
msg = f"Package {primary!r} is not supported by `narwhals`."
raise ValueError(msg)
def _into_suffix(obj: Path | str, /) -> Any:
if isinstance(obj, Path):
return obj.suffix
elif isinstance(obj, str):
return obj
else:
msg = f"Unexpected type {type(obj).__name__!r}"
raise TypeError(msg)
def _steal_eager_parquet(
read_fns: Sequence[Read[IntoDataFrameT]], /
) -> Sequence[Scan[nw.LazyFrame[IntoDataFrameT]]] | None:
if convertable := next((rd for rd in read_fns if rd.include <= is_parquet), None):
return (_readimpl.into_scan(convertable),)
return None
@overload
def _import_guarded(req: _PandasAny, /) -> _Pandas: ...
@overload
def _import_guarded(req: _NwSupportT, /) -> _NwSupportT: ...
@overload
def _import_guarded(req: Requirement, /) -> LiteralString: ...
def _import_guarded(req: Any, /) -> LiteralString:
requires = _requirements(req)
for name in requires:
if spec := find_spec(name):
import_module(spec.name)
else:
raise module_not_found(str(req), requires, missing=name)
return requires[0]
def _requirements(req: Requirement | str, /) -> tuple[Any, ...]:
req = Requirement(req) if isinstance(req, str) else req
return (req.name, *req.extras)
| _NoParquetReader |
python | tensorflow__tensorflow | tensorflow/core/function/trace_type/serialization.py | {
"start": 979,
"end": 3717
} | class ____(metaclass=abc.ABCMeta):
"""TraceTypes implementing this additional interface are portable."""
@classmethod
@abc.abstractmethod
def experimental_type_proto(cls) -> Type[message.Message]:
"""Returns the unique type of proto associated with this class."""
raise NotImplementedError
@classmethod
@abc.abstractmethod
def experimental_from_proto(cls, proto: message.Message) -> "Serializable":
"""Returns an instance based on a proto."""
raise NotImplementedError
@abc.abstractmethod
def experimental_as_proto(self) -> message.Message:
"""Returns a proto representing this instance."""
raise NotImplementedError
def register_serializable(cls: Type[Serializable]):
"""Registers a Python class to support serialization.
Only register standard TF types. Custom types should NOT be registered.
Args:
cls: Python class to register.
"""
if cls.experimental_type_proto() in PROTO_CLASS_TO_PY_CLASS:
raise ValueError(
"Existing Python class " +
PROTO_CLASS_TO_PY_CLASS[cls.experimental_type_proto()].__name__ +
" already has " + cls.experimental_type_proto().__name__ +
" as its associated proto representation. Please ensure " +
cls.__name__ + " has a unique proto representation.")
PROTO_CLASS_TO_PY_CLASS[cls.experimental_type_proto()] = cls
def serialize(to_serialize: Serializable) -> SerializedTraceType:
"""Converts Serializable to a proto SerializedTraceType."""
if not isinstance(to_serialize, Serializable):
raise ValueError("Can not serialize " + type(to_serialize).__name__ +
" since it is not Serializable. For object " +
str(to_serialize))
actual_proto = to_serialize.experimental_as_proto()
if not isinstance(actual_proto, to_serialize.experimental_type_proto()):
raise ValueError(
type(to_serialize).__name__ +
" returned different type of proto than specified by " +
"experimental_type_proto()")
serialized = SerializedTraceType()
serialized.representation.Pack(actual_proto)
return serialized
def deserialize(proto: SerializedTraceType) -> Serializable:
"""Converts a proto SerializedTraceType to instance of Serializable."""
for proto_class in PROTO_CLASS_TO_PY_CLASS:
if proto.representation.Is(proto_class.DESCRIPTOR):
actual_proto = proto_class()
proto.representation.Unpack(actual_proto)
return PROTO_CLASS_TO_PY_CLASS[proto_class].experimental_from_proto(
actual_proto)
raise ValueError(
"Can not deserialize proto of url: ", proto.representation.type_url,
" since no matching Python class could be found. For value ",
proto.representation.value)
| Serializable |
python | huggingface__transformers | src/transformers/models/deit/image_processing_deit.py | {
"start": 1438,
"end": 15081
} | class ____(BaseImageProcessor):
r"""
Constructs a DeiT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in `preprocess`.
size (`dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`):
Size of the image after `resize`. Can be overridden by `size` in `preprocess`.
resample (`PILImageResampling` filter, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in `preprocess`.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
Desired output size when applying center-cropping. Can be overridden by `crop_size` in `preprocess`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PIL.Image.BICUBIC,
do_center_crop: bool = True,
crop_size: Optional[dict[str, int]] = None,
rescale_factor: Union[int, float] = 1 / 255,
do_rescale: bool = True,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 256, "width": 256}
size = get_size_dict(size)
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
crop_size = get_size_dict(crop_size, param_name="crop_size")
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
# Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
output_size = (size["height"], size["width"])
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample=None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[dict[str, int]] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after `resize`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
PILImageResampling filter to use if resizing the image Only has an effect if `do_resize` is set to
`True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
padded with zeros and then cropped
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- `None`: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
size = size if size is not None else self.size
size = get_size_dict(size)
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size")
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_resize=do_resize,
size=size,
resample=resample,
)
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["DeiTImageProcessor"]
| DeiTImageProcessor |
python | ansible__ansible | test/units/module_utils/facts/test_collectors.py | {
"start": 11045,
"end": 11897
} | class ____(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'pkg_mgr']
valid_subsets = ['pkg_mgr']
fact_namespace = 'ansible_pkgmgr'
collector_class = PkgMgrFactCollector
collected_facts = {
"ansible_distribution": "Fedora",
"ansible_distribution_major_version": "28",
"ansible_os_family": "RedHat",
"ansible_pkg_mgr": "apt"
}
@patch('ansible.module_utils.facts.system.pkg_mgr.os.path.exists', side_effect=_sanitize_os_path_apt_get)
def test_collect(self, mock_os_path_exists):
module = self._mock_module()
fact_collector = self.collector_class()
facts_dict = fact_collector.collect(module=module, collected_facts=self.collected_facts)
self.assertIsInstance(facts_dict, dict)
self.assertIn('pkg_mgr', facts_dict)
| TestPkgMgrFactsAptFedora |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/utils_tests/test_dataloader.py | {
"start": 1025,
"end": 3854
} | class ____(DataLoader[str, Thing]):
def __init__(self):
super().__init__(batch_load_fn=batch_load_fn) # pyright: ignore[reportArgumentType]
def test_basic() -> None:
async def two_round_trips(loader: ThingLoader, key: str):
thing = await loader.load(key)
repeat = await loader.load(thing.key)
# test caching
assert repeat is thing
return thing
async def main() -> None:
loader = ThingLoader()
thing1, thing2, thing3 = await asyncio.gather(
two_round_trips(loader, "key_0"),
two_round_trips(loader, "key_1"),
two_round_trips(loader, "key_2"),
)
keys = ["key_0", "key_1", "key_2"]
# test batching
assert thing1.batch_keys == keys
assert thing2.batch_keys == keys
assert thing3.batch_keys == keys
asyncio.run(main())
def test_event_loop_change() -> None:
loader_cache = {}
async def _load_memoized(k):
if "thing" not in loader_cache:
loader_cache["thing"] = ThingLoader()
return await loader_cache["thing"].load(k)
# first one is fine
result = asyncio.run(_load_memoized("test_m"))
assert result.key
# second throws
with pytest.raises(Exception, match="event loop has changed"):
_ = asyncio.run(_load_memoized("test_m_2"))
def test_exception() -> None:
class TestException(Exception): ...
async def batch_load_fn(keys: list[str]):
raise TestException()
class Thrower(DataLoader[str, str]):
def __init__(self):
super().__init__(batch_load_fn=batch_load_fn) # pyright: ignore[reportArgumentType]
async def _test():
loader = Thrower()
with pytest.raises(TestException):
_ = await loader.load("foo")
asyncio.run(_test())
def test_deep_tree():
async def _fetch(context: Context, key: str):
thing = await Thing.gen(context, key)
return await thing.gen_other_other_other_thing(ctx)
async def _test(context):
return await asyncio.gather(
_fetch(context, "one"),
_fetch(context, "two"),
)
ctx = Context()
one, two = asyncio.run(_test(ctx))
assert one.batch_keys == two.batch_keys
assert one.batch_keys == ["one_other_other_other", "two_other_other_other"]
def test_bad_load_fn():
async def _oops(wrong, args, here): ...
async def _test():
loader = DataLoader(_oops) # pyright: ignore[reportArgumentType]
done, pending = await asyncio.wait(
(loader.load(1),),
timeout=0.01,
)
assert not pending
assert len(done) == 1
with pytest.raises(TypeError):
done[0].result() # pyright: ignore[reportIndexIssue]
asyncio.run(_test())
| ThingLoader |
python | keras-team__keras | keras/src/losses/losses.py | {
"start": 13899,
"end": 15516
} | class ____(LossFunctionWrapper):
"""Computes the hinge loss between `y_true` & `y_pred`.
Formula:
```python
loss = maximum(1 - y_true * y_pred, 0)
```
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self,
reduction="sum_over_batch_size",
name="hinge",
dtype=None,
):
super().__init__(hinge, name=name, reduction=reduction, dtype=dtype)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.SquaredHinge")
| Hinge |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/test_organization_detector_details.py | {
"start": 1670,
"end": 3956
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-detector-details"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.environment = self.create_environment(
organization_id=self.organization.id, name="production"
)
with self.tasks():
self.snuba_query = create_snuba_query(
query_type=SnubaQuery.Type.ERROR,
dataset=Dataset.Events,
query="hello",
aggregate="count()",
time_window=timedelta(minutes=1),
resolution=timedelta(minutes=1),
environment=self.environment,
event_types=[SnubaQueryEventType.EventType.ERROR],
)
self.query_subscription = create_snuba_subscription(
project=self.project,
subscription_type=INCIDENTS_SNUBA_SUBSCRIPTION_TYPE,
snuba_query=self.snuba_query,
)
self.data_source = self.create_data_source(
organization=self.organization, source_id=self.query_subscription.id
)
self.data_condition_group = self.create_data_condition_group(
organization_id=self.organization.id,
logic_type=DataConditionGroup.Type.ANY,
)
self.condition = self.create_data_condition(
condition_group=self.data_condition_group,
type=Condition.LESS,
comparison=50,
condition_result=DetectorPriorityLevel.LOW,
)
self.resolve_condition = self.create_data_condition(
condition_group=self.data_condition_group,
type=Condition.GREATER_OR_EQUAL,
comparison=50,
condition_result=DetectorPriorityLevel.OK,
)
self.detector = self.create_detector(
project=self.project,
name="Test Detector",
type=MetricIssue.slug,
workflow_condition_group=self.data_condition_group,
)
self.data_source_detector = self.create_data_source_detector(
data_source=self.data_source, detector=self.detector
)
assert self.detector.data_sources is not None
@region_silo_test
| OrganizationDetectorDetailsBaseTest |
python | bokeh__bokeh | src/bokeh/models/ui/panels.py | {
"start": 1568,
"end": 3188
} | class ____(Pane):
""" A DOM-based UI element that allows for controlling its bounding box. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
position = Required(Instance(Coordinate), help="""
A computed coordinate representing the position of this panel, either
with respect to its parent or the viewport of a web browser.
""")
anchor = Anchor(default="top_left", help="""
The anchor point this panel is positioned at.
This can be either a named anchor like ``"top_left"`` or ``"center"``,
or a tuple of named positions or percentages along the axes of the panel
""")
width = Either(Auto, Int, Instance(Node), help="""
A computed value defining the width of the panel.
Use ``"auto"`` to let CSS determine the width (based on a stylesheet).
""")
height = Either(Auto, Int, Instance(Node), help="""
A computed value defining the height of the panel.
Use ``"auto"`` to let CSS determine the height (based on a stylesheet).
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Panel |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 104954,
"end": 106204
} | class ____(Response):
"""
Response of events.next_debug_image_sample endpoint.
"""
_service = "events"
_action = "next_debug_image_sample"
_version = "2.13"
_schema = {
"$ref": "#/definitions/debug_image_sample_reposnse",
"definitions": {
"debug_image_sample_reposnse": {
"properties": {
"event": {
"description": "Debugimageevent",
"type": ["object", "null"],
},
"max_iteration": {
"description": "maximalvaliditerationforthevariant",
"type": ["integer", "null"],
},
"min_iteration": {
"description": "minimalvaliditerationforthevariant",
"type": ["integer", "null"],
},
"scroll_id": {
"description": "ScrollIDtopasstothenextcallstoget_debug_image_sampleornext_debug_image_sample",
"type": ["string", "null"],
},
},
"type": "object",
}
},
}
| NextDebugImageSampleResponse |
python | keras-team__keras | keras/src/distribution/distribution_lib_test.py | {
"start": 506,
"end": 1867
} | class ____(testing.TestCase):
def tearDown(self):
super().tearDown()
os.environ.clear()
def test_initialize_with_explicit_param(self, mock_backend_initialize):
job_addresses = "10.0.0.1:1234,10.0.0.2:2345"
num_processes = 2
current_process_id = 0
distribution_lib.initialize(
job_addresses, num_processes, current_process_id
)
mock_backend_initialize.assert_called_once_with(
job_addresses, num_processes, current_process_id
)
def test_initialize_with_env_vars(self, mock_backend_initialize):
job_addresses = "10.0.0.1:1234,10.0.0.2:2345"
num_processes = 2
current_process_id = 0
os.environ["KERAS_DISTRIBUTION_JOB_ADDRESSES"] = job_addresses
os.environ["KERAS_DISTRIBUTION_NUM_PROCESSES"] = str(num_processes)
os.environ["KERAS_DISTRIBUTION_PROCESS_ID"] = str(current_process_id)
distribution_lib.initialize()
mock_backend_initialize.assert_called_once_with(
job_addresses, num_processes, current_process_id
)
def test_init_with_nones(self, mock_backend_initialize):
# This is also valid case for Cloud TPU on JAX
distribution_lib.initialize()
mock_backend_initialize.assert_called_once_with(None, None, None)
| MultiProcessInitializeTest |
python | jina-ai__jina | jina/jaml/__init__.py | {
"start": 1752,
"end": 17578
} | class ____:
"""A Jina YAML parser supports loading and dumping and substituting variables.
To use it:
.. highlight:: python
.. code-block:: python
from jina.jaml import JAML
JAML.load(...)
JAML.dump(...)
class DummyClass:
pass
JAML.register(DummyClass)
You can use expressions to programmatically set variables in YAML files and access contexts.
An expression can be any combination of literal values, references to a context, or functions.
You can combine literals, context references, and functions using operators.
You need to use specific syntax to tell Jina to evaluate an expression rather than treat it as a string,
which is based on GitHub actions syntax, and looks like this:
.. highlight:: yaml
.. code-block:: yaml
${{ <expression> }}
This expression can be evaluated directly (i.e. substituted by the real value) when being loaded,
by using :meth:`load(substitute=True)`
JAML supports three different kinds of variables to be used as expressions: `Environment variables`
(coming form the environment itself), `context variables` (being passed as a dict),
and `internal references` (included in the .yaml file itself).
An environment variable `var` is accessed through the following syntax:
.. highlight:: yaml
.. code-block:: yaml
${{ env.var }}
Note the mandatory spaces before and after the variable denotation.
Context variables can be accessed using the following syntax:
.. highlight:: yaml
.. code-block:: yaml
${{ context_var }}
Or, if you want to be explicit:
.. highlight:: yaml
.. code-block:: yaml
${{ context.context_var }}
These context variables are passed as a dict:
.. highlight:: python
.. code-block:: python
obj = JAML.load(
fp, substitute=True, context={'context_var': 3.14, 'context_var2': 'hello-world'}
)
Internal references point to other variables in the yaml file itself, and can be accessed using the following syntax:
.. highlight:: yaml
.. code-block:: yaml
${{root.path.to.var}}
Note omission of spaces in this syntax.
.. note::
:class:`BaseFlow`, :class:`BaseExecutor`, :class:`BaseGateway`
and all their subclasses have already implemented JAML interfaces,
to load YAML config into objects, please use :meth:`Flow.load_config`,
:meth:`BaseExecutor.load_config`, etc.
"""
@staticmethod
def load(
stream,
substitute: bool = False,
context: Dict[str, Any] = None,
runtime_args: Optional[Dict[str, Any]] = None,
):
"""Parse the first YAML document in a stream and produce the corresponding Python object.
.. note::
:class:`BaseFlow`, :class:`BaseExecutor`, :class:`BaseGateway`
and all their subclasses have already implemented JAML interfaces,
to load YAML config into objects, please use :meth:`Flow.load_config`,
:meth:`BaseExecutor.load_config`, etc.
:param stream: the stream to load
:param substitute: substitute environment, internal reference and context variables.
:param context: context replacement variables in a dict, the value of the dict is the replacement.
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
:return: the Python object
"""
r = yaml.load(stream, Loader=get_jina_loader_with_runtime(runtime_args))
if substitute:
r = JAML.expand_dict(r, context)
return r
@staticmethod
def escape(value: str, include_unknown_tags: bool = True) -> str:
"""
Escape the YAML content by replacing all customized tags ``!`` to ``jtype: ``.
:param value: the original YAML content
:param include_unknown_tags: if to include unknown tags during escaping
:return: escaped YAML
"""
if include_unknown_tags:
r = r'!(\w+)\b'
else:
r = '|'.join(JAML.registered_tags())
r = rf'!({r})\b'
return re.sub(r, r'jtype: \1', value)
@staticmethod
def unescape(
value: str,
include_unknown_tags: bool = True,
jtype_whitelist: Tuple[str, ...] = None,
) -> str:
"""
Unescape the YAML content by replacing all ``jtype: `` to tags.
:param value: the escaped YAML content
:param include_unknown_tags: if to include unknown tags during unescaping
:param jtype_whitelist: the list of jtype to be unescaped
:return: unescaped YAML
"""
if include_unknown_tags:
r = r'jtype: (\w+)\b'
elif jtype_whitelist:
r = '|'.join(jtype_whitelist)
r = rf'jtype: ({r})\b'
else:
r = '|'.join(JAML.registered_tags())
r = rf'jtype: ({r})\b'
return re.sub(r, r'!\1', value)
@staticmethod
def registered_tags() -> List[str]:
"""
Return a list of :class:`JAMLCompatible` classes that have been registered.
:return: tags
"""
return list(
v[1:]
for v in set(JinaLoader.yaml_constructors.keys())
if v and v.startswith('!')
)
@staticmethod
def registered_classes() -> Dict:
"""
Return a dict of tags and :class:`JAMLCompatible` classes that have been registered.
:return: tags and classes
"""
return {
k[1:]: v
for k, v in JinaLoader.yaml_constructors.items()
if k and k.startswith('!')
}
@staticmethod
def cls_from_tag(tag: str) -> Optional['JAMLCompatible']:
"""Fetch class from yaml tag
:param tag: yaml tag
:return: class object from tag
"""
if not tag.startswith('!'):
tag = '!' + tag
bound = JinaLoader.yaml_constructors.get(tag, None)
return bound.__self__ if bound else None
@staticmethod
def load_no_tags(stream, **kwargs):
"""
Load yaml object but ignore all customized tags, e.g. !Executor, !Driver, !Flow.
:param stream: the output stream
:param kwargs: other kwargs
:return: the Python object
"""
safe_yml = JAML.escape('\n'.join(v for v in stream))
return JAML.load(safe_yml, **kwargs)
@staticmethod
def expand_dict(
d: Dict,
context: Optional[Union[Dict, SimpleNamespace]] = None,
resolve_cycle_ref=True,
resolve_passes: int = 3,
) -> Dict[str, Any]:
"""
Expand variables from YAML file.
:param d: yaml file loaded as python dict
:param context: context replacement variables in a dict, the value of the dict is the replacement.
:param resolve_cycle_ref: resolve internal reference if True.
:param resolve_passes: number of rounds to resolve internal reference.
:return: expanded dict.
"""
from jina.helper import parse_arg
expand_map = SimpleNamespace()
env_map = SimpleNamespace()
def _scan(sub_d, p):
if isinstance(sub_d, dict):
for k, v in sub_d.items():
if isinstance(v, dict):
p.__dict__[k] = SimpleNamespace()
_scan(v, p.__dict__[k])
elif isinstance(v, list):
p.__dict__[k] = list()
_scan(v, p.__dict__[k])
else:
p.__dict__[k] = v
elif isinstance(sub_d, list):
for idx, v in enumerate(sub_d):
if isinstance(v, dict):
p.append(SimpleNamespace())
_scan(v, p[idx])
elif isinstance(v, list):
p.append(list())
_scan(v, p[idx])
else:
p.append(v)
def _replace(sub_d, p, resolve_ref=False):
if isinstance(sub_d, dict):
for k, v in sub_d.items():
if isinstance(v, (dict, list)):
_replace(v, p.__dict__[k], resolve_ref)
else:
if isinstance(v, str):
if resolve_ref and yaml_ref_regex.findall(v):
sub_d[k] = _resolve_yaml_reference(v, p)
else:
sub_d[k] = _sub(v)
elif isinstance(sub_d, list):
for idx, v in enumerate(sub_d):
if isinstance(v, (dict, list)):
_replace(v, p[idx], resolve_ref)
else:
if isinstance(v, str):
if resolve_ref and yaml_ref_regex.findall(v):
sub_d[idx] = _resolve_yaml_reference(v, p)
else:
sub_d[idx] = _sub(v)
def _var_to_substitutable(v, exp=context_var_regex):
def repl_fn(matchobj):
return '$$' + matchobj.group(0)[4:-3]
return re.sub(exp, repl_fn, v)
def _to_env_var_synatx(v):
v = _var_to_substitutable(v, new_env_var_regex)
def repl_fn(matchobj):
match_str = matchobj.group(0)
match_str = match_str.replace('ENV.', '')
match_str = match_str.replace('env.', '')
return match_str[1:]
return re.sub(r'\$\$[a-zA-Z0-9_.]*', repl_fn, v)
def _to_normal_context_var(v):
def repl_fn(matchobj):
match_str = matchobj.group(0)
match_str = match_str.replace('CONTEXT.', '')
match_str = match_str.replace('context.', '')
return match_str
return re.sub(context_dot_regex, repl_fn, v)
def _sub(v):
# substitute template with actual value either from context or env variable
# v could contain template of the form
#
# 1) ${{ var }},${{ context.var }},${{ CONTEXT.var }} when need to be parsed with the context dict
# or
# 2 ) ${{ ENV.var }},${{ env.var }},$var ( deprecated) when need to be parsed with env
#
#
# internally env var (1) and context var (2) are treated differently, both of them are cast to a unique and
# normalize template format and then are parsed
# 1) context variables placeholder are cast to $$var then we use the ContextVarTemplate to parse the context
# variables
# 2) env variables placeholder are cast to $var then we leverage the os.path.expandvars to replace by
# environment variables.
if env_var_deprecated_regex.findall(v) and not env_var_regex.findall(
v
): # catch expressions of form '$var'
warnings.warn(
'Specifying environment variables via the syntax `$var` is deprecated.'
'Use `${{ ENV.var }}` instead.',
category=DeprecationWarning,
)
if new_env_var_regex.findall(
v
): # handle expressions of form '${{ ENV.var}}',
v = _to_env_var_synatx(v)
if context_dot_regex.findall(v):
v = _to_normal_context_var(v)
if context_var_regex.findall(v): # handle expressions of form '${{ var }}'
v = _var_to_substitutable(v)
if context:
v = ContextVarTemplate(v).safe_substitute(
context
) # use vars provided in context
v = os.path.expandvars(
v
) # gets env var and parses to python objects if neededd
return parse_arg(v)
def _resolve_yaml_reference(v, p):
org_v = v
# internal references are of the form ${{path}} where path is a yaml path like root.executors[0].name
def repl_fn(matchobj):
match_str = matchobj.group(0)
match_str_origin = match_str
match_str = re.sub(
yaml_ref_regex, '{\\1}', match_str
) # from ${{var}} to {var} to leverage python formatter
try:
# "root" context is now the global namespace
# "this" context is now the current node namespace
match_str = match_str.format(root=expand_map, this=p, ENV=env_map)
except AttributeError as ex:
raise AttributeError(
'variable replacement is failed, please check your YAML file.'
) from ex
except KeyError:
return match_str_origin
return match_str
v = re.sub(yaml_ref_regex, repl_fn, v)
return parse_arg(v)
_scan(d, expand_map)
_scan(dict(os.environ), env_map)
# first do var replacement
_replace(d, expand_map)
# do `resolve_passes` rounds of scan-replace to resolve internal references
for _ in range(resolve_passes):
# rebuild expand_map
expand_map = SimpleNamespace()
_scan(d, expand_map)
# resolve internal reference
if resolve_cycle_ref:
_replace(d, expand_map, resolve_ref=True)
return d
@staticmethod
def dump(data, stream=None, **kwargs):
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
:param data: the data to serialize
:param stream: the output stream
:param kwargs: other kwargs
:return: the yaml output
"""
return yaml.dump(
data, stream=stream, default_flow_style=False, sort_keys=False, **kwargs
)
@staticmethod
def register(cls):
"""
Register a class for dumping loading.
- if it has attribute yaml_tag use that to register, else use class name
- if it has methods to_yaml/from_yaml use those to dump/load else dump attributes
as mapping
:param cls: the class to register
:return: the registered class
"""
tag = getattr(cls, 'yaml_tag', '!' + cls.__name__)
try:
yaml.add_representer(cls, cls._to_yaml)
except AttributeError:
def t_y(representer, data):
"""
Wrapper function for the representer.
:param representer: yaml representer
:param data: state of the representer
:return: node
"""
return representer.represent_yaml_object(
tag, data, cls, flow_style=representer.default_flow_style
)
yaml.add_representer(cls, t_y)
try:
yaml.add_constructor(tag, cls._from_yaml, JinaLoader)
except AttributeError:
def f_y(constructor, node):
"""
Wrapper function for the constructor.
:param constructor: yaml constructor
:param node: to be added
:return: generator
"""
return constructor.construct_yaml_object(node, cls)
yaml.add_constructor(tag, f_y, JinaLoader)
return cls
| JAML |
python | mahmoud__glom | glom/mutation.py | {
"start": 8551,
"end": 12874
} | class ____:
"""
In addition to glom's core "deep-get" and ``Assign``'s "deep-set",
the ``Delete`` specifier type performs a "deep-del", which can
remove items from larger data structures by key, attribute, and
index.
>>> target = {'dict': {'x': [5, 6, 7]}}
>>> glom(target, Delete('dict.x.1'))
{'dict': {'x': [5, 7]}}
>>> glom(target, Delete('dict.x'))
{'dict': {}}
If a target path is missing, a :exc:`PathDeleteError` will be
raised. To ignore missing targets, use the ``ignore_missing``
flag:
>>> glom(target, Delete('does_not_exist', ignore_missing=True))
{'dict': {}}
``Delete`` has built-in support for deleting attributes of
objects, keys of dicts, and indexes of sequences
(like lists). Additional types can be registered through
:func:`~glom.register()` using the ``"delete"`` operation name.
.. versionadded:: 20.5.0
"""
def __init__(self, path, ignore_missing=False):
if isinstance(path, basestring):
path = Path.from_text(path)
elif type(path) is TType:
path = Path(path)
elif not isinstance(path, Path):
raise TypeError('path argument must be a .-delimited string, Path, T, or S')
try:
self.op, self.arg = path.items()[-1]
except IndexError:
raise ValueError('path must have at least one element')
self._orig_path = path
self.path = path[:-1]
if self.op not in '[.P':
raise ValueError('last part of path must be an attribute or index')
self.ignore_missing = ignore_missing
def _del_one(self, dest, op, arg, scope):
if op == '[':
try:
del dest[arg]
except IndexError as e:
if not self.ignore_missing:
raise PathDeleteError(e, self.path, arg)
elif op == '.':
try:
delattr(dest, arg)
except AttributeError as e:
if not self.ignore_missing:
raise PathDeleteError(e, self.path, arg)
elif op == 'P':
_delete = scope[TargetRegistry].get_handler('delete', dest)
try:
_delete(dest, arg)
except Exception as e:
if not self.ignore_missing:
raise PathDeleteError(e, self.path, arg)
def glomit(self, target, scope):
op, arg, path = self.op, self.arg, self.path
if self.path.startswith(S):
dest_target = scope[UP]
dest_path = self.path.from_t()
else:
dest_target = target
dest_path = self.path
try:
dest = scope[glom](dest_target, dest_path, scope)
except PathAccessError as pae:
if not self.ignore_missing:
raise
else:
_apply_for_each(lambda dest: self._del_one(dest, op, arg, scope), path, dest)
return target
def __repr__(self):
cn = self.__class__.__name__
return f'{cn}({self._orig_path!r})'
def delete(obj, path, ignore_missing=False):
"""
The ``delete()`` function provides "deep del" functionality,
modifying nested data structures in-place::
>>> target = {'a': [{'b': 'c'}, {'d': None}]}
>>> delete(target, 'a.0.b')
{'a': [{}, {'d': None}]}
Attempting to delete missing keys, attributes, and indexes will
raise a :exc:`PathDeleteError`. To ignore these errors, use the
*ignore_missing* argument::
>>> delete(target, 'does_not_exist', ignore_missing=True)
{'a': [{}, {'d': None}]}
For more information and examples, see the :class:`~glom.Delete`
specifier type, which this convenience function wraps.
.. versionadded:: 20.5.0
"""
return glom(obj, Delete(path, ignore_missing=ignore_missing))
def _del_sequence_item(target, idx):
del target[int(idx)]
def _delete_autodiscover(type_obj):
if issubclass(type_obj, _UNASSIGNABLE_BASE_TYPES):
return False
if callable(getattr(type_obj, '__delitem__', None)):
if callable(getattr(type_obj, 'index', None)):
return _del_sequence_item
return operator.delitem
return delattr
register_op('delete', auto_func=_delete_autodiscover, exact=False)
| Delete |
python | scikit-learn__scikit-learn | sklearn/externals/array_api_extra/_lib/_utils/_helpers.py | {
"start": 15480,
"end": 18965
} | class ____(Generic[T]): # numpydoc ignore=PR01
"""
Helper of :func:`jax_autojit`.
Wrap arbitrary inputs and outputs of the jitted function and
convert them to/from PyTrees.
"""
obj: T
_registered: ClassVar[bool] = False
__slots__: tuple[str, ...] = ("obj",)
def __init__(self, obj: T) -> None: # numpydoc ignore=GL08
self._register()
self.obj = obj
@classmethod
def _register(cls) -> None: # numpydoc ignore=SS06
"""
Register upon first use instead of at import time, to avoid
globally importing JAX.
"""
if not cls._registered:
import jax
jax.tree_util.register_pytree_node(
cls,
lambda obj: pickle_flatten(obj, jax.Array), # pyright: ignore[reportUnknownArgumentType]
lambda aux_data, children: pickle_unflatten(children, aux_data), # pyright: ignore[reportUnknownArgumentType]
)
cls._registered = True
def jax_autojit(
func: Callable[P, T],
) -> Callable[P, T]: # numpydoc ignore=PR01,RT01,SS03
"""
Wrap `func` with ``jax.jit``, with the following differences:
- Python scalar arguments and return values are not automatically converted to
``jax.Array`` objects.
- All non-array arguments are automatically treated as static.
Unlike ``jax.jit``, static arguments must be either hashable or serializable with
``pickle``.
- Unlike ``jax.jit``, non-array arguments and return values are not limited to
tuple/list/dict, but can be any object serializable with ``pickle``.
- Automatically descend into non-array arguments and find ``jax.Array`` objects
inside them, then rebuild the arguments when entering `func`, swapping the JAX
concrete arrays with tracer objects.
- Automatically descend into non-array return values and find ``jax.Array`` objects
inside them, then rebuild them downstream of exiting the JIT, swapping the JAX
tracer objects with concrete arrays.
See Also
--------
jax.jit : JAX JIT compilation function.
Notes
-----
These are useful choices *for testing purposes only*, which is how this function is
intended to be used. The output of ``jax.jit`` is a C++ level callable, that
directly dispatches to the compiled kernel after the initial call. In comparison,
``jax_autojit`` incurs a much higher dispatch time.
Additionally, consider::
def f(x: Array, y: float, plus: bool) -> Array:
return x + y if plus else x - y
j1 = jax.jit(f, static_argnames="plus")
j2 = jax_autojit(f)
In the above example, ``j2`` requires a lot less setup to be tested effectively than
``j1``, but on the flip side it means that it will be re-traced for every different
value of ``y``, which likely makes it not fit for purpose in production.
"""
import jax
@jax.jit # type: ignore[misc] # pyright: ignore[reportUntypedFunctionDecorator]
def inner( # numpydoc ignore=GL08
wargs: _AutoJITWrapper[Any],
) -> _AutoJITWrapper[T]:
args, kwargs = wargs.obj
res = func(*args, **kwargs) # pyright: ignore[reportCallIssue]
return _AutoJITWrapper(res)
@wraps(func)
def outer(*args: P.args, **kwargs: P.kwargs) -> T: # numpydoc ignore=GL08
wargs = _AutoJITWrapper((args, kwargs))
return inner(wargs).obj
return outer
| _AutoJITWrapper |
python | fsspec__filesystem_spec | fsspec/implementations/cache_metadata.py | {
"start": 433,
"end": 8502
} | class ____:
"""Cache metadata.
All reading and writing of cache metadata is performed by this class,
accessing the cached files and blocks is not.
Metadata is stored in a single file per storage directory in JSON format.
For backward compatibility, also reads metadata stored in pickle format
which is converted to JSON when next saved.
"""
def __init__(self, storage: list[str]):
"""
Parameters
----------
storage: list[str]
Directories containing cached files, must be at least one. Metadata
is stored in the last of these directories by convention.
"""
if not storage:
raise ValueError("CacheMetadata expects at least one storage location")
self._storage = storage
self.cached_files: list[Detail] = [{}]
# Private attribute to force saving of metadata in pickle format rather than
# JSON for use in tests to confirm can read both pickle and JSON formats.
self._force_save_pickle = False
def _load(self, fn: str) -> Detail:
"""Low-level function to load metadata from specific file"""
try:
with open(fn, "r") as f:
loaded = json.load(f)
except ValueError:
with open(fn, "rb") as f:
loaded = pickle.load(f)
for c in loaded.values():
if isinstance(c.get("blocks"), list):
c["blocks"] = set(c["blocks"])
return loaded
def _save(self, metadata_to_save: Detail, fn: str) -> None:
"""Low-level function to save metadata to specific file"""
if self._force_save_pickle:
with atomic_write(fn) as f:
pickle.dump(metadata_to_save, f)
else:
with atomic_write(fn, mode="w") as f:
json.dump(metadata_to_save, f)
def _scan_locations(
self, writable_only: bool = False
) -> Iterator[tuple[str, str, bool]]:
"""Yield locations (filenames) where metadata is stored, and whether
writable or not.
Parameters
----------
writable: bool
Set to True to only yield writable locations.
Returns
-------
Yields (str, str, bool)
"""
n = len(self._storage)
for i, storage in enumerate(self._storage):
writable = i == n - 1
if writable_only and not writable:
continue
yield os.path.join(storage, "cache"), storage, writable
def check_file(
self, path: str, cfs: CachingFileSystem | None
) -> Literal[False] | tuple[Detail, str]:
"""If path is in cache return its details, otherwise return ``False``.
If the optional CachingFileSystem is specified then it is used to
perform extra checks to reject possible matches, such as if they are
too old.
"""
for (fn, base, _), cache in zip(self._scan_locations(), self.cached_files):
if path not in cache:
continue
detail = cache[path].copy()
if cfs is not None:
if cfs.check_files and detail["uid"] != cfs.fs.ukey(path):
# Wrong file as determined by hash of file properties
continue
if cfs.expiry and time.time() - detail["time"] > cfs.expiry:
# Cached file has expired
continue
fn = os.path.join(base, detail["fn"])
if os.path.exists(fn):
return detail, fn
return False
def clear_expired(self, expiry_time: int) -> tuple[list[str], bool]:
"""Remove expired metadata from the cache.
Returns names of files corresponding to expired metadata and a boolean
flag indicating whether the writable cache is empty. Caller is
responsible for deleting the expired files.
"""
expired_files = []
for path, detail in self.cached_files[-1].copy().items():
if time.time() - detail["time"] > expiry_time:
fn = detail.get("fn", "")
if not fn:
raise RuntimeError(
f"Cache metadata does not contain 'fn' for {path}"
)
fn = os.path.join(self._storage[-1], fn)
expired_files.append(fn)
self.cached_files[-1].pop(path)
if self.cached_files[-1]:
cache_path = os.path.join(self._storage[-1], "cache")
self._save(self.cached_files[-1], cache_path)
writable_cache_empty = not self.cached_files[-1]
return expired_files, writable_cache_empty
def load(self) -> None:
"""Load all metadata from disk and store in ``self.cached_files``"""
cached_files = []
for fn, _, _ in self._scan_locations():
if os.path.exists(fn):
# TODO: consolidate blocks here
cached_files.append(self._load(fn))
else:
cached_files.append({})
self.cached_files = cached_files or [{}]
def on_close_cached_file(self, f: Any, path: str) -> None:
"""Perform side-effect actions on closing a cached file.
The actual closing of the file is the responsibility of the caller.
"""
# File must be writeble, so in self.cached_files[-1]
c = self.cached_files[-1][path]
if c["blocks"] is not True and len(c["blocks"]) * f.blocksize >= f.size:
c["blocks"] = True
def pop_file(self, path: str) -> str | None:
"""Remove metadata of cached file.
If path is in the cache, return the filename of the cached file,
otherwise return ``None``. Caller is responsible for deleting the
cached file.
"""
details = self.check_file(path, None)
if not details:
return None
_, fn = details
if fn.startswith(self._storage[-1]):
self.cached_files[-1].pop(path)
self.save()
else:
raise PermissionError(
"Can only delete cached file in last, writable cache location"
)
return fn
def save(self) -> None:
"""Save metadata to disk"""
for (fn, _, writable), cache in zip(self._scan_locations(), self.cached_files):
if not writable:
continue
if os.path.exists(fn):
cached_files = self._load(fn)
for k, c in cached_files.items():
if k in cache:
if c["blocks"] is True or cache[k]["blocks"] is True:
c["blocks"] = True
else:
# self.cached_files[*][*]["blocks"] must continue to
# point to the same set object so that updates
# performed by MMapCache are propagated back to
# self.cached_files.
blocks = cache[k]["blocks"]
blocks.update(c["blocks"])
c["blocks"] = blocks
c["time"] = max(c["time"], cache[k]["time"])
c["uid"] = cache[k]["uid"]
# Files can be added to cache after it was written once
for k, c in cache.items():
if k not in cached_files:
cached_files[k] = c
else:
cached_files = cache
cache = {k: v.copy() for k, v in cached_files.items()}
for c in cache.values():
if isinstance(c["blocks"], set):
c["blocks"] = list(c["blocks"])
self._save(cache, fn)
self.cached_files[-1] = cached_files
def update_file(self, path: str, detail: Detail) -> None:
"""Update metadata for specific file in memory, do not save"""
self.cached_files[-1][path] = detail
| CacheMetadata |
python | redis__redis-py | tests/test_ssl.py | {
"start": 292,
"end": 15688
} | class ____:
"""Tests for SSL connections
This relies on the --redis-ssl-url purely for rebuilding the client
and connecting to the appropriate port.
"""
@pytest.fixture(autouse=True)
def _set_ssl_certs(self, request):
tls_cert_subdir = request.session.config.REDIS_INFO["tls_cert_subdir"]
self.client_certs = get_tls_certificates(tls_cert_subdir)
self.server_certs = get_tls_certificates(
tls_cert_subdir, cert_type=CertificateType.server
)
def test_ssl_with_invalid_cert(self, request):
ssl_url = request.config.option.redis_ssl_url
sslclient = redis.from_url(ssl_url)
with pytest.raises(ConnectionError) as e:
sslclient.ping()
assert "SSL: CERTIFICATE_VERIFY_FAILED" in str(e)
sslclient.close()
def test_ssl_connection(self, request):
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_cert_reqs="none",
)
assert r.ping()
r.close()
def test_ssl_connection_without_ssl(self, request):
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(host=p[0], port=p[1], ssl=False)
with pytest.raises(ConnectionError) as e:
r.ping()
assert "Connection closed by server" in str(e)
r.close()
def test_validating_self_signed_certificate(self, request):
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_certfile=self.client_certs.certfile,
ssl_keyfile=self.client_certs.keyfile,
ssl_cert_reqs="required",
ssl_ca_certs=self.client_certs.ca_certfile,
)
assert r.ping()
r.close()
def test_validating_self_signed_string_certificate(self, request):
with open(self.client_certs.ca_certfile) as f:
cert_data = f.read()
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_certfile=self.client_certs.certfile,
ssl_keyfile=self.client_certs.keyfile,
ssl_cert_reqs="required",
ssl_ca_data=cert_data,
)
assert r.ping()
r.close()
@pytest.mark.parametrize(
"ssl_ciphers",
[
"AES256-SHA:DHE-RSA-AES256-SHA:AES128-SHA:DHE-RSA-AES128-SHA",
"DHE-RSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305",
],
)
def test_ssl_connection_tls12_custom_ciphers(self, request, ssl_ciphers):
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_cert_reqs="none",
ssl_min_version=ssl.TLSVersion.TLSv1_3,
ssl_ciphers=ssl_ciphers,
)
assert r.ping()
r.close()
def test_ssl_connection_tls12_custom_ciphers_invalid(self, request):
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_cert_reqs="none",
ssl_min_version=ssl.TLSVersion.TLSv1_2,
ssl_ciphers="foo:bar",
)
with pytest.raises(RedisError) as e:
r.ping()
assert "No cipher can be selected" in str(e)
r.close()
@pytest.mark.parametrize(
"ssl_ciphers",
[
"TLS_CHACHA20_POLY1305_SHA256",
"TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256",
],
)
def test_ssl_connection_tls13_custom_ciphers(self, request, ssl_ciphers):
# TLSv1.3 does not support changing the ciphers
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_cert_reqs="none",
ssl_min_version=ssl.TLSVersion.TLSv1_2,
ssl_ciphers=ssl_ciphers,
)
with pytest.raises(RedisError) as e:
r.ping()
assert "No cipher can be selected" in str(e)
r.close()
def _create_oscp_conn(self, request):
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_certfile=self.client_certs.certfile,
ssl_keyfile=self.client_certs.keyfile,
ssl_cert_reqs="required",
ssl_ca_certs=self.client_certs.ca_certfile,
ssl_validate_ocsp=True,
)
return r
@skip_if_cryptography()
def test_ssl_ocsp_called(self, request):
r = self._create_oscp_conn(request)
with pytest.raises(RedisError) as e:
r.ping()
assert "cryptography is not installed" in str(e)
r.close()
@skip_if_nocryptography()
def test_ssl_ocsp_called_withcrypto(self, request):
r = self._create_oscp_conn(request)
with pytest.raises(ConnectionError) as e:
assert r.ping()
assert "No AIA information present in ssl certificate" in str(e)
r.close()
@skip_if_nocryptography()
def test_valid_ocsp_cert_http(self):
from redis.ocsp import OCSPVerifier
hostnames = ["github.com", "aws.amazon.com", "ynet.co.il"]
for hostname in hostnames:
context = ssl.create_default_context()
with socket.create_connection((hostname, 443)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
ocsp = OCSPVerifier(wrapped, hostname, 443)
assert ocsp.is_valid()
@skip_if_nocryptography()
def test_revoked_ocsp_certificate(self):
from redis.ocsp import OCSPVerifier
context = ssl.create_default_context()
hostname = "revoked.badssl.com"
with socket.create_connection((hostname, 443)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
ocsp = OCSPVerifier(wrapped, hostname, 443)
with pytest.raises(ConnectionError) as e:
assert ocsp.is_valid()
assert "REVOKED" in str(e)
@skip_if_nocryptography()
def test_unauthorized_ocsp(self):
from redis.ocsp import OCSPVerifier
context = ssl.create_default_context()
hostname = "stackoverflow.com"
with socket.create_connection((hostname, 443)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
ocsp = OCSPVerifier(wrapped, hostname, 443)
with pytest.raises(ConnectionError):
ocsp.is_valid()
@skip_if_nocryptography()
def test_ocsp_not_present_in_response(self):
from redis.ocsp import OCSPVerifier
context = ssl.create_default_context()
hostname = "google.co.il"
with socket.create_connection((hostname, 443)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
ocsp = OCSPVerifier(wrapped, hostname, 443)
with pytest.raises(ConnectionError) as e:
assert ocsp.is_valid()
assert "from the" in str(e)
@skip_if_nocryptography()
def test_unauthorized_then_direct(self):
from redis.ocsp import OCSPVerifier
# these certificates on the socket end return unauthorized
# then the second call succeeds
hostnames = ["wikipedia.org", "squarespace.com"]
for hostname in hostnames:
context = ssl.create_default_context()
with socket.create_connection((hostname, 443)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
ocsp = OCSPVerifier(wrapped, hostname, 443)
assert ocsp.is_valid()
@skip_if_nocryptography()
def test_mock_ocsp_staple(self, request):
import OpenSSL
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_certfile=self.client_certs.cert,
ssl_keyfile=self.client_certs.keyfile,
ssl_cert_reqs="required",
ssl_ca_certs=self.client_certs.ca_certfile,
ssl_validate_ocsp=True,
ssl_ocsp_context=p, # just needs to not be none
)
with pytest.raises(RedisError):
r.ping()
r.close()
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
ctx.use_certificate_file(self.client_certs.cert)
ctx.use_privatekey_file(self.client_certs.keyfile)
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_certfile=self.client_certs.cert,
ssl_keyfile=self.client_certs.keyfile,
ssl_cert_reqs="required",
ssl_ca_certs=self.client_certs.ca_certfile,
ssl_ocsp_context=ctx,
ssl_ocsp_expected_cert=open(self.server_certs.ca_certfile, "rb").read(),
ssl_validate_ocsp_stapled=True,
)
with pytest.raises(ConnectionError) as e:
r.ping()
assert "no ocsp response present" in str(e)
r.close()
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_certfile=self.client_certs.cert,
ssl_keyfile=self.client_certs.keyfile,
ssl_cert_reqs="required",
ssl_ca_certs=self.client_certs.ca_certfile,
ssl_validate_ocsp_stapled=True,
)
with pytest.raises(ConnectionError) as e:
r.ping()
assert "no ocsp response present" in str(e)
r.close()
def test_cert_reqs_none_with_check_hostname(self, request):
"""Test that when ssl_cert_reqs=none is used with ssl_check_hostname=True,
the connection is created successfully with check_hostname internally set to False"""
ssl_url = request.config.option.redis_ssl_url
parsed_url = urlparse(ssl_url)
r = redis.Redis(
host=parsed_url.hostname,
port=parsed_url.port,
ssl=True,
ssl_cert_reqs="none",
# Check that ssl_check_hostname is ignored, when ssl_cert_reqs=none
ssl_check_hostname=True,
)
try:
# Connection should be successful
assert r.ping()
# check_hostname should have been automatically set to False
assert r.connection_pool.connection_class == redis.SSLConnection
conn = r.connection_pool.make_connection()
assert conn.check_hostname is False
finally:
r.close()
def test_ssl_verify_flags_applied_to_context(self, request):
"""
Test that ssl_include_verify_flags and ssl_exclude_verify_flags
are properly applied to the SSL context
"""
ssl_url = request.config.option.redis_ssl_url
parsed_url = urlparse(ssl_url)
# Test with specific SSL verify flags
ssl_include_verify_flags = [
ssl.VerifyFlags.VERIFY_CRL_CHECK_LEAF, # Disable strict verification
ssl.VerifyFlags.VERIFY_CRL_CHECK_CHAIN, # Enable partial chain
]
ssl_exclude_verify_flags = [
ssl.VerifyFlags.VERIFY_X509_STRICT, # Disable trusted first
]
r = redis.Redis(
host=parsed_url.hostname,
port=parsed_url.port,
ssl=True,
ssl_cert_reqs="none",
ssl_include_verify_flags=ssl_include_verify_flags,
ssl_exclude_verify_flags=ssl_exclude_verify_flags,
)
try:
# Get the connection to trigger SSL context creation
conn = r.connection_pool.get_connection()
assert isinstance(conn, redis.SSLConnection)
# Verify the flags were processed by checking they're stored in connection
assert conn.ssl_include_verify_flags is not None
assert len(conn.ssl_include_verify_flags) == 2
assert conn.ssl_exclude_verify_flags is not None
assert len(conn.ssl_exclude_verify_flags) == 1
# Check each flag individually
for flag in ssl_include_verify_flags:
assert flag in conn.ssl_include_verify_flags, (
f"Flag {flag} not found in stored ssl_include_verify_flags"
)
for flag in ssl_exclude_verify_flags:
assert flag in conn.ssl_exclude_verify_flags, (
f"Flag {flag} not found in stored ssl_exclude_verify_flags"
)
# Test the actual SSL context created by the connection
# We need to create a mock socket and call _wrap_socket_with_ssl to get the context
import socket
import unittest.mock
mock_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Mock the wrap_socket method to capture the context
captured_context = None
def capture_context_wrap_socket(context_self, sock, **_kwargs):
nonlocal captured_context
captured_context = context_self
# Don't actually wrap the socket, just return the original socket
# to avoid connection errors
return sock
with unittest.mock.patch.object(
ssl.SSLContext, "wrap_socket", capture_context_wrap_socket
):
try:
conn._wrap_socket_with_ssl(mock_sock)
except Exception:
# We expect this to potentially fail since we're not actually connecting
# but we should have captured the context
pass
# Validate that we captured a context and it has the correct flags applied
assert captured_context is not None, "SSL context was not captured"
# Verify that VERIFY_X509_STRICT was disabled (bit cleared)
assert not (
captured_context.verify_flags & ssl.VerifyFlags.VERIFY_X509_STRICT
), "VERIFY_X509_STRICT should be disabled but is enabled"
# Verify that VERIFY_CRL_CHECK_CHAIN was enabled (bit set)
assert (
captured_context.verify_flags
& ssl.VerifyFlags.VERIFY_CRL_CHECK_CHAIN
), "VERIFY_CRL_CHECK_CHAIN should be enabled but is disabled"
finally:
mock_sock.close()
finally:
r.close()
| TestSSL |
python | sqlalchemy__sqlalchemy | test/orm/test_cycles.py | {
"start": 16936,
"end": 20026
} | class ____(fixtures.MappedTest):
"""Two mappers with a one-to-many relationship to each other,
with a second one-to-many on one of the mappers"""
run_define_tables = "each"
@classmethod
def define_tables(cls, metadata):
Table(
"t1",
metadata,
Column(
"c1", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("c2", Integer, ForeignKey("t2.c1")),
test_needs_autoincrement=True,
)
Table(
"t2",
metadata,
Column(
"c1", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("c2", Integer, ForeignKey("t1.c1", name="t1c1_fq")),
test_needs_autoincrement=True,
)
Table(
"t1_data",
metadata,
Column(
"c1", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("t1id", Integer, ForeignKey("t1.c1")),
Column("data", String(20)),
test_needs_autoincrement=True,
)
@classmethod
def setup_classes(cls):
class C1(cls.Basic):
pass
class C2(cls.Basic):
pass
class C1Data(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
t2, t1, C1Data, t1_data, C2, C1 = (
cls.tables.t2,
cls.tables.t1,
cls.classes.C1Data,
cls.tables.t1_data,
cls.classes.C2,
cls.classes.C1,
)
cls.mapper_registry.map_imperatively(
C2,
t2,
properties={
"c1s": relationship(
C1, primaryjoin=t2.c.c1 == t1.c.c2, uselist=True
)
},
)
cls.mapper_registry.map_imperatively(
C1,
t1,
properties={
"c2s": relationship(
C2, primaryjoin=t1.c.c1 == t2.c.c2, uselist=True
),
"data": relationship(
cls.mapper_registry.map_imperatively(C1Data, t1_data)
),
},
)
def test_cycle(self):
C2, C1, C1Data = (
self.classes.C2,
self.classes.C1,
self.classes.C1Data,
)
a = C1()
b = C2()
c = C1()
d = C2()
e = C2()
f = C2()
a.c2s.append(b)
d.c1s.append(c)
b.c1s.append(c)
a.data.append(C1Data(data="c1data1"))
a.data.append(C1Data(data="c1data2"))
c.data.append(C1Data(data="c1data3"))
sess = fixture_session()
sess.add_all((a, b, c, d, e, f))
sess.flush()
sess.delete(d)
sess.delete(c)
sess.flush()
@testing.combinations(
(
"legacy_style",
True,
),
(
"new_style",
False,
),
argnames="name, _legacy_inactive_history_style",
id_="sa",
)
| BiDirectionalOneToManyTest2 |
python | django__django | django/contrib/auth/migrations/0011_update_proxy_permissions.py | {
"start": 2540,
"end": 2860
} | class ____(migrations.Migration):
dependencies = [
("auth", "0010_alter_group_name_max_length"),
("contenttypes", "0002_remove_content_type_name"),
]
operations = [
migrations.RunPython(
update_proxy_model_permissions, revert_proxy_model_permissions
),
]
| Migration |
python | celery__celery | t/smoke/tests/test_canvas.py | {
"start": 2902,
"end": 6660
} | class ____:
def test_sanity(self, celery_setup: CeleryTestSetup):
upgraded_chord = signature(
group(
identity.si("header_task1"),
identity.si("header_task2"),
)
| identity.si("body_task"),
queue=celery_setup.worker.worker_queue,
)
sig = group(
[
upgraded_chord,
chord(
group(
identity.si("header_task3"),
identity.si("header_task4"),
),
identity.si("body_task"),
),
chord(
(
sig
for sig in [
identity.si("header_task5"),
identity.si("header_task6"),
]
),
identity.si("body_task"),
),
]
)
res = sig.apply_async(queue=celery_setup.worker.worker_queue)
assert res.get(timeout=RESULT_TIMEOUT) == ["body_task"] * 3
@pytest.mark.parametrize(
"input_body",
[
(lambda queue: add.si(9, 7).set(queue=queue)),
(
lambda queue: chain(
add.si(9, 7).set(queue=queue),
add.si(5, 7).set(queue=queue),
)
),
(
lambda queue: group(
[
add.si(9, 7).set(queue=queue),
add.si(5, 7).set(queue=queue),
]
)
),
(
lambda queue: chord(
group(
[
add.si(1, 1).set(queue=queue),
add.si(2, 2).set(queue=queue),
]
),
add.si(10, 10).set(queue=queue),
)
),
],
ids=[
"body is a single_task",
"body is a chain",
"body is a group",
"body is a chord",
],
)
def test_chord_error_propagation_with_different_body_types(
self, celery_setup: CeleryTestSetup, input_body
) -> None:
"""Reproduce issue #9773 with different chord body types.
This test verifies that the "task_id must not be empty" error is fixed
regardless of the chord body type. The issue occurs when:
1. A chord has a group with both succeeding and failing tasks
2. The chord body can be any signature type (single task, chain, group, chord)
3. When the group task fails, error propagation should work correctly
Args:
input_body (callable): A callable that returns a Celery signature for the chord body.
"""
queue = celery_setup.worker.worker_queue
# Create the failing group header (same for all tests)
failing_group = group(
[
add.si(15, 7).set(queue=queue),
# failing task
fail.si().set(queue=queue),
]
)
# Create the chord
test_chord = chord(failing_group, input_body(queue))
result = test_chord.apply_async()
# The worker should not log the "task_id must not be empty" error
celery_setup.worker.assert_log_does_not_exist(
"ValueError: task_id must not be empty. Got None instead."
)
# The chord should fail with the expected exception from the failing task
with pytest.raises(ExpectedException):
result.get(timeout=RESULT_TIMEOUT)
| test_chord |
python | numba__numba | numba/cuda/stubs.py | {
"start": 1794,
"end": 2097
} | class ____(Dim3):
'''
The shape of a block of threads, as declared when instantiating the kernel.
This value is the same for all threads in a given kernel launch, even if
they belong to different blocks (i.e. each block is "full").
'''
_description_ = '<blockDim.{x,y,z}>'
| blockDim |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/multimethod/package.py | {
"start": 229,
"end": 4294
} | class ____(MultimethodBase):
"""This package is designed for use with Spack's multimethod test.
It has a bunch of test cases for the @when decorator that the
test uses.
"""
homepage = "http://www.example.com/"
url = "http://www.example.com/example-1.0.tar.gz"
version("5.0", md5="0123456789abcdef0123456789abcdef")
version("4.0", md5="0123456789abcdef0123456789abcdef")
version("3.0", md5="0123456789abcdef0123456789abcdef")
version("2.0", md5="0123456789abcdef0123456789abcdef")
version("1.0", md5="0123456789abcdef0123456789abcdef")
variant("mpi", default=False, description="")
depends_on("mpi", when="+mpi")
depends_on("c", type="build")
#
# These functions are only valid for versions 1, 3, and 4.
#
@when("@1.0")
def no_version_2(self):
return 1
@when("@3.0")
def no_version_2(self):
return 3
@when("@4.0")
def no_version_2(self):
return 4
#
# These functions overlap, so there is ambiguity, but we'll take
# the first one.
#
@when("@:4")
def version_overlap(self):
return 1
@when("@2:")
def version_overlap(self):
return 2
#
# More complicated case with cascading versions.
#
def mpi_version(self):
return 0
@when("^mpi@3:")
def mpi_version(self):
return 3
@when("^mpi@2:")
def mpi_version(self):
return 2
@when("^mpi@1:")
def mpi_version(self):
return 1
#
# Use these to test whether the default method is called when no
# match is found. This also tests whether we can switch methods
# on compilers
#
def has_a_default(self):
return "default"
@when("%gcc@10:")
def has_a_default(self):
return "gcc"
@when("%clang")
def has_a_default(self):
return "clang"
#
# Make sure we can switch methods on different target
#
platform = spack.platforms.host()
targets = list(platform.targets.values())
if len(targets) > 1:
targets = targets[:-1]
for target in targets:
@when("target=" + target.name)
def different_by_target(self):
if isinstance(self.spec.architecture.target, str):
return self.spec.architecture.target
else:
return self.spec.architecture.target.name
#
# Make sure we can switch methods on different dependencies
#
@when("^mpich")
def different_by_dep(self):
return "mpich"
@when("^zmpi")
def different_by_dep(self):
return "zmpi"
#
# Make sure we can switch on virtual dependencies
#
def different_by_virtual_dep(self):
return 1
@when("^mpi@2:")
def different_by_virtual_dep(self):
return 2
#
# Make sure methods with a default implementation in a superclass
# will invoke that method when none in the subclass match.
#
@when("@2:")
def base_method(self):
return "multimethod"
#
# Make sure methods with non-default implementations in a superclass
# will invoke those methods when none in the subclass match but one in
# the superclass does.
#
@when("@1.0")
def inherited_and_overridden(self):
return "base@1.0"
@when("@2.0")
def inherited_and_overridden(self):
return "base@2.0"
#
# Make sure that multimethods follow MRO properly with diamond inheritance
#
@when("@2.0")
def diamond_inheritance(self):
return "first_parent"
@when("@4.0")
def diamond_inheritance(self):
return "should_not_be_reached by diamond inheritance test"
#
# Check that multimethods work with boolean values
#
@when(True)
def boolean_true_first(self):
return "True"
@when(False)
def boolean_true_first(self):
return "False"
@when(False)
def boolean_false_first(self):
return "False"
@when(True)
def boolean_false_first(self):
return "True"
| Multimethod |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_pdf.py | {
"start": 105409,
"end": 106801
} | class ____(FigureCanvasBase):
# docstring inherited
fixed_dpi = 72
filetypes = {'pdf': 'Portable Document Format'}
def get_default_filetype(self):
return 'pdf'
def print_pdf(self, filename, *,
bbox_inches_restore=None, metadata=None):
dpi = self.figure.dpi
self.figure.dpi = 72 # there are 72 pdf points to an inch
width, height = self.figure.get_size_inches()
if isinstance(filename, PdfPages):
file = filename._ensure_file()
else:
file = PdfFile(filename, metadata=metadata)
try:
file.newPage(width, height)
renderer = MixedModeRenderer(
self.figure, width, height, dpi,
RendererPdf(file, dpi, height, width),
bbox_inches_restore=bbox_inches_restore)
self.figure.draw(renderer)
renderer.finalize()
if not isinstance(filename, PdfPages):
file.finalize()
finally:
if isinstance(filename, PdfPages): # finish off this page
file.endStream()
else: # we opened the file above; now finish it off
file.close()
def draw(self):
self.figure.draw_without_rendering()
return super().draw()
FigureManagerPdf = FigureManagerBase
@_Backend.export
| FigureCanvasPdf |
python | django__django | tests/template_tests/syntax_tests/test_builtins.py | {
"start": 68,
"end": 628
} | class ____(SimpleTestCase):
@setup({"builtins01": "{{ True }}"})
def test_builtins01(self):
output = self.engine.render_to_string("builtins01")
self.assertEqual(output, "True")
@setup({"builtins02": "{{ False }}"})
def test_builtins02(self):
output = self.engine.render_to_string("builtins02")
self.assertEqual(output, "False")
@setup({"builtins03": "{{ None }}"})
def test_builtins03(self):
output = self.engine.render_to_string("builtins03")
self.assertEqual(output, "None")
| BuiltinsTests |
python | walkccc__LeetCode | solutions/3395. Subsequences with a Unique Middle Mode I/3395-3.py | {
"start": 937,
"end": 2673
} | class ____:
def subsequencesWithMiddleMode(self, nums: list[int]) -> int:
MOD = 1_000_000_007
ans = 0
p = collections.Counter() # prefix counter
s = collections.Counter(nums) # suffix counter
def nC2(n: int) -> int:
return n * (n - 1) // 2
pss = 0
spp = 0
pp = 0
ss = sum(freq**2 for freq in s.values())
ps = 0
for i, a in enumerate(nums):
# Update running sums after decrementing s[a].
pss += p[a] * (-s[a]**2 + (s[a] - 1)**2)
spp += -p[a]**2 # (-s[a] + (s[a] - 1)) * p[a]**2
ss += -s[a]**2 + (s[a] - 1)**2
ps += -p[a] # -p[a] * (-s[a] + (s[a] - 1))
s[a] -= 1
l = i
r = len(nums) - i - 1
# Start with all possible subsequences with `a` as the middle number.
ans += nC2(l) * nC2(r)
# Minus the cases where the frequency of `a` is 1, so it's not a mode.
ans -= nC2(l - p[a]) * nC2(r - s[a])
# Minus the values where `b != a`.
pss_ = pss - p[a] * s[a]**2
spp_ = spp - s[a] * p[a]**2
pp_ = pp - p[a]**2
ss_ = ss - s[a]**2
ps_ = ps - p[a] * s[a]
p_ = l - p[a]
s_ = r - s[a]
# Minus the cases where the `a` is not a "unique" mode or not a mode.
ans -= ps_ * (p[a] * (r - s[a])) + pss_ * (-p[a])
ans -= ps_ * (s[a] * (l - p[a])) + spp_ * (-s[a])
ans -= (pp_ - p_) * s[a] * (r - s[a]) // 2
ans -= (ss_ - s_) * p[a] * (l - p[a]) // 2
ans %= MOD
# Update running sums after incrementing p[a].
pss += s[a]**2 # (-p[a] + (p[a] + 1)) * s[a]**2
spp += s[a] * (-p[a]**2 + (p[a] + 1)**2)
pp += -p[a]**2 + (p[a] + 1)**2
ps += s[a] # (-p[a] + (p[a] + 1)) * s[a]
p[a] += 1
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/util/tf_stack.py | {
"start": 5626,
"end": 6115
} | class ____(_tf_stack.GraphDebugInfoBuilder):
def AppendGraphDebugInfo(self, fn_name, fn_debug_info):
debug_info_str = fn_debug_info.SerializeToString()
super().AppendGraphDebugInfo(fn_name, debug_info_str)
def Build(self):
debug_info_str = super().Build()
debug_info = graph_debug_info_pb2.GraphDebugInfo()
debug_info.ParseFromString(debug_info_str)
return debug_info
StackSummary = _tf_stack.StackTrace
FrameSummary = _tf_stack.StackFrame
| GraphDebugInfoBuilder |
python | pandas-dev__pandas | pandas/tests/indexes/datetimes/methods/test_factorize.py | {
"start": 149,
"end": 4468
} | class ____:
def test_factorize(self):
idx1 = DatetimeIndex(
["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"]
)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = DatetimeIndex(["2014-01", "2014-02", "2014-03"])
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
assert idx.freq == exp_idx.freq
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
assert idx.freq == exp_idx.freq
# tz must be preserved
idx1 = idx1.tz_localize("Asia/Tokyo")
exp_idx = exp_idx.tz_localize("Asia/Tokyo")
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
assert idx.freq == exp_idx.freq
idx2 = DatetimeIndex(
["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"]
)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
exp_idx = DatetimeIndex(["2014-01", "2014-02", "2014-03"])
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
assert idx.freq == exp_idx.freq
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = DatetimeIndex(["2014-03", "2014-02", "2014-01"])
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
assert idx.freq == exp_idx.freq
def test_factorize_preserves_freq(self):
# GH#38120 freq should be preserved
idx3 = date_range("2000-01", periods=4, freq="ME", tz="Asia/Tokyo")
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
assert idx.freq == idx3.freq
arr, idx = factorize(idx3)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
assert idx.freq == idx3.freq
def test_factorize_tz(self, tz_naive_fixture, index_or_series):
tz = tz_naive_fixture
# GH#13750
base = date_range("2016-11-05", freq="h", periods=100, tz=tz)
idx = base.repeat(5)
exp_arr = np.arange(100, dtype=np.intp).repeat(5)
obj = index_or_series(idx)
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
expected = base._with_freq(None)
tm.assert_index_equal(res, expected)
assert res.freq == expected.freq
def test_factorize_dst(self, index_or_series):
# GH#13750
idx = date_range("2016-11-06", freq="h", periods=12, tz="US/Eastern")
obj = index_or_series(idx)
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
if index_or_series is Index:
assert res.freq == idx.freq
idx = date_range("2016-06-13", freq="h", periods=12, tz="US/Eastern")
obj = index_or_series(idx)
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
if index_or_series is Index:
assert res.freq == idx.freq
@pytest.mark.parametrize("sort", [True, False])
def test_factorize_no_freq_non_nano(self, tz_naive_fixture, sort):
# GH#51978 case that does not go through the fastpath based on
# non-None freq
tz = tz_naive_fixture
idx = date_range("2016-11-06", freq="h", periods=5, tz=tz)[[0, 4, 1, 3, 2]]
exp_codes, exp_uniques = idx.factorize(sort=sort)
res_codes, res_uniques = idx.as_unit("s").factorize(sort=sort)
tm.assert_numpy_array_equal(res_codes, exp_codes)
tm.assert_index_equal(res_uniques, exp_uniques.as_unit("s"))
res_codes, res_uniques = idx.as_unit("s").to_series().factorize(sort=sort)
tm.assert_numpy_array_equal(res_codes, exp_codes)
tm.assert_index_equal(res_uniques, exp_uniques.as_unit("s"))
| TestDatetimeIndexFactorize |
python | getsentry__sentry | src/sentry/sentry_apps/api/parsers/sentry_app.py | {
"start": 2498,
"end": 9529
} | class ____(Serializer):
name = serializers.CharField(help_text="The name of the custom integration.")
author = serializers.CharField(
required=False, allow_null=True, help_text="The custom integration's author."
)
scopes = ApiScopesField(
allow_null=True, help_text="The custom integration's permission scopes for API access."
)
status = serializers.CharField(
required=False, allow_null=True, help_text="The custom integration's status."
)
events = EventListField(
required=False,
allow_null=True,
help_text="Webhook events the custom integration is subscribed to.",
)
features = serializers.MultipleChoiceField(
choices=Feature.as_choices(),
allow_blank=True,
allow_null=True,
required=False,
help_text="The list of features that the custom integration supports.",
)
schema = SchemaField(
required=False,
allow_null=True,
help_text="The UI components schema, used to render the custom integration's configuration UI elements. See our [schema docs](https://docs.sentry.io/organization/integrations/integration-platform/ui-components/) for more information.",
)
webhookUrl = URLField(
required=False,
allow_null=True,
allow_blank=True,
help_text="The webhook destination URL.",
)
redirectUrl = URLField(
required=False,
allow_null=True,
allow_blank=True,
help_text="The post-installation redirect URL.",
)
isInternal = serializers.BooleanField(
required=False,
default=False,
help_text="Whether or not the integration is internal only. False means the integration is public.",
)
isAlertable = serializers.BooleanField(
required=False,
default=False,
help_text="Marks whether or not the custom integration can be used in an alert rule.",
)
overview = serializers.CharField(
required=False, allow_null=True, help_text="The custom integration's description."
)
verifyInstall = serializers.BooleanField(
required=False,
default=True,
help_text="Whether or not an installation of the custom integration should be verified.",
)
allowedOrigins = serializers.ListField(
child=serializers.CharField(max_length=255),
required=False,
help_text="The list of allowed origins for CORS.",
)
# Bounds chosen to match PositiveSmallIntegerField (https://docs.djangoproject.com/en/3.2/ref/models/fields/#positivesmallintegerfield)
popularity = serializers.IntegerField(
min_value=0,
max_value=32767,
required=False,
allow_null=True,
)
def __init__(self, *args, **kwargs):
self.active_staff = kwargs.pop("active_staff", False)
self.access = kwargs.pop("access", None)
Serializer.__init__(self, *args, **kwargs)
# an abstraction to pull fields from attrs if they are available or the sentry_app if not
def get_current_value_wrapper(self, attrs):
def get_current_value(field_name):
if field_name in attrs:
return attrs[field_name]
# params might be passed as camel case but we always store as snake case
mapped_field_name = camel_to_snake_case(field_name)
if hasattr(self.instance, mapped_field_name):
return getattr(self.instance, mapped_field_name)
else:
return None
return get_current_value
def validate_name(self, value):
max_length = 64 - UUID_CHARS_IN_SLUG - 1 # -1 comes from the - before the UUID bit
if len(value) > max_length:
raise ValidationError("Cannot exceed %d characters" % max_length)
return value
def validate_allowedOrigins(self, value):
for allowed_origin in value:
if "*" in allowed_origin:
raise ValidationError("'*' not allowed in origin")
return value
def validate_scopes(self, value):
if not value:
return value
from sentry.conf.server import SENTRY_TOKEN_ONLY_SCOPES
validation_errors = []
for scope in value:
# if the existing instance already has this scope, skip the check
if self.instance and self.instance.has_scope(scope):
continue
# Token-only scopes can be granted even if the user doesn't have them.
# These are specialized scopes (like project:distribution) that are not
# included in any user role but can be granted to integration tokens.
if scope in SENTRY_TOKEN_ONLY_SCOPES:
continue
assert (
self.access is not None
), "Access is required to validate scopes in SentryAppParser"
# add an error if the requester lacks permissions being requested
if not self.access.has_scope(scope) and not self.active_staff:
validation_errors.append(
"Requested permission of %s exceeds requester's permission. Please contact an administrator to make the requested change."
% (scope)
)
if validation_errors:
raise ValidationError(validation_errors)
return value
def validate(self, attrs):
# validates events against scopes
if attrs.get("scopes"):
for resource in attrs.get("events", []):
needed_scope = REQUIRED_EVENT_PERMISSIONS[resource]
if needed_scope not in attrs["scopes"]:
raise ValidationError(
{"events": f"{resource} webhooks require the {needed_scope} permission."}
)
get_current_value = self.get_current_value_wrapper(attrs)
# validate if webhookUrl is missing that we don't have any webhook features enabled
if not get_current_value("webhookUrl"):
if get_current_value("isInternal"):
# for internal apps, make sure there aren't any events if webhookUrl is null
if get_current_value("events"):
raise ValidationError(
{"webhookUrl": "webhookUrl required if webhook events are enabled"}
)
# also check that we don't have the alert rule enabled
if get_current_value("isAlertable"):
raise ValidationError(
{"webhookUrl": "webhookUrl required if alert rule action is enabled"}
)
else:
raise ValidationError({"webhookUrl": "webhookUrl required for public integrations"})
# validate author for public integrations
if not get_current_value("isInternal") and not get_current_value("author"):
raise ValidationError({"author": "author required for public integrations"})
return attrs
| SentryAppParser |
python | plotly__plotly.py | tests/test_optional/test_figure_factory/test_figure_factory.py | {
"start": 106024,
"end": 116321
} | class ____(NumpyTestUtilsMixin, TestCaseNoTemplate):
def test_data_must_be_dataframe(self):
data = []
pattern = "You must input a pandas DataFrame."
self.assertRaisesRegex(
PlotlyError, pattern, ff.create_facet_grid, data, "a", "b"
)
def test_x_and_y_for_scatter(self):
data = pd.DataFrame([[0, 0], [1, 1]], columns=["a", "b"])
pattern = (
"You need to input 'x' and 'y' if you are you are using a "
"trace_type of 'scatter' or 'scattergl'."
)
self.assertRaisesRegex(PlotlyError, pattern, ff.create_facet_grid, data, "a")
def test_valid_col_selection(self):
data = pd.DataFrame([[0, 0], [1, 1]], columns=["a", "b"])
pattern = (
"x, y, facet_row, facet_col and color_name must be keys in your dataframe."
)
self.assertRaisesRegex(
PlotlyError, pattern, ff.create_facet_grid, data, "a", "c"
)
def test_valid_trace_type(self):
data = pd.DataFrame([[0, 0], [1, 1]], columns=["a", "b"])
self.assertRaises(
PlotlyError, ff.create_facet_grid, data, "a", "b", trace_type="foo"
)
def test_valid_scales(self):
data = pd.DataFrame([[0, 0], [1, 1]], columns=["a", "b"])
pattern = "'scales' must be set to 'fixed', 'free_x', 'free_y' and 'free'."
self.assertRaisesRegex(
PlotlyError,
pattern,
ff.create_facet_grid,
data,
"a",
"b",
scales="not_free",
)
def test_valid_plotly_color_scale_name(self):
data = pd.DataFrame([[0, 0], [1, 1]], columns=["a", "b"])
self.assertRaises(
PlotlyError,
ff.create_facet_grid,
data,
"a",
"b",
color_name="a",
colormap="wrong one",
)
def test_facet_labels(self):
data = pd.DataFrame([["a1", 0], ["a2", 1]], columns=["a", "b"])
self.assertRaises(
PlotlyError,
ff.create_facet_grid,
data,
"a",
"b",
facet_row="a",
facet_row_labels={},
)
self.assertRaises(
PlotlyError,
ff.create_facet_grid,
data,
"a",
"b",
facet_col="a",
facet_col_labels={},
)
def test_valid_color_dict(self):
data = pd.DataFrame([[0, 0, "foo"], [1, 1, "foo"]], columns=["a", "b", "foo"])
pattern = (
"If using 'colormap' as a dictionary, make sure "
"all the values of the colormap column are in "
"the keys of your dictionary."
)
color_dict = {"bar": "#ffffff"}
self.assertRaisesRegex(
PlotlyError,
pattern,
ff.create_facet_grid,
data,
"a",
"b",
color_name="a",
colormap=color_dict,
)
def test_valid_colorscale_name(self):
data = pd.DataFrame([[0, 1, 2], [3, 4, 5]], columns=["a", "b", "c"])
colormap = "foo"
self.assertRaises(
PlotlyError,
ff.create_facet_grid,
data,
"a",
"b",
color_name="c",
colormap=colormap,
)
def test_valid_facet_grid_fig(self):
mpg = [
["audi", "a4", 1.8, 1999, 4, "auto(15)", "f", 18, 29, "p", "compact"],
["audi", "a4", 1.8, 1999, 4, "auto(l5)", "f", 18, 29, "p", "compact"],
["audi", "a4", 2, 2008, 4, "manual(m6)", "f", 20, 31, "p", "compact"],
["audi", "a4", 2, 2008, 4, "auto(av)", "f", 21, 30, "p", "compact"],
["audi", "a4", 2.8, 1999, 6, "auto(l5)", "f", 16, 26, "p", "compact"],
["audi", "a4", 2.8, 1999, 6, "manual(m5)", "f", 18, 26, "p", "compact"],
["audi", "a4", 3.1, 2008, 6, "auto(av)", "f", 18, 27, "p", "compact"],
[
"audi",
"a4 quattro",
1.8,
1999,
4,
"manual(m5)",
"4",
18,
26,
"p",
"compact",
],
[
"audi",
"a4 quattro",
1.8,
1999,
4,
"auto(l5)",
"4",
16,
25,
"p",
"compact",
],
[
"audi",
"a4 quattro",
2,
2008,
4,
"manual(m6)",
"4",
20,
28,
"p",
"compact",
],
]
df = pd.DataFrame(
mpg,
columns=[
"manufacturer",
"model",
"displ",
"year",
"cyl",
"trans",
"drv",
"cty",
"hwy",
"fl",
"class",
],
)
test_facet_grid = ff.create_facet_grid(df, x="displ", y="cty", facet_col="cyl")
exp_facet_grid = {
"data": [
{
"marker": {
"color": "rgb(31, 119, 180)",
"line": {"color": "darkgrey", "width": 1},
"size": 8,
},
"mode": "markers",
"opacity": 0.6,
"type": "scatter",
"x": [1.8, 1.8, 2.0, 2.0, 1.8, 1.8, 2.0],
"xaxis": "x",
"y": [18, 18, 20, 21, 18, 16, 20],
"yaxis": "y",
},
{
"marker": {
"color": "rgb(31, 119, 180)",
"line": {"color": "darkgrey", "width": 1},
"size": 8,
},
"mode": "markers",
"opacity": 0.6,
"type": "scatter",
"x": [2.8, 2.8, 3.1],
"xaxis": "x2",
"y": [16, 18, 18],
"yaxis": "y2",
},
],
"layout": {
"annotations": [
{
"font": {"color": "#0f0f0f", "size": 13},
"showarrow": False,
"text": "4",
"textangle": 0,
"x": 0.24625,
"xanchor": "center",
"xref": "paper",
"y": 1.03,
"yanchor": "middle",
"yref": "paper",
},
{
"font": {"color": "#0f0f0f", "size": 13},
"showarrow": False,
"text": "6",
"textangle": 0,
"x": 0.7537499999999999,
"xanchor": "center",
"xref": "paper",
"y": 1.03,
"yanchor": "middle",
"yref": "paper",
},
{
"font": {"color": "#000000", "size": 12},
"showarrow": False,
"text": "displ",
"textangle": 0,
"x": 0.5,
"xanchor": "center",
"xref": "paper",
"y": -0.1,
"yanchor": "middle",
"yref": "paper",
},
{
"font": {"color": "#000000", "size": 12},
"showarrow": False,
"text": "cty",
"textangle": -90,
"x": -0.1,
"xanchor": "center",
"xref": "paper",
"y": 0.5,
"yanchor": "middle",
"yref": "paper",
},
],
"height": 600,
"legend": {
"bgcolor": "#efefef",
"borderwidth": 1,
"x": 1.05,
"y": 1,
"yanchor": "top",
},
"paper_bgcolor": "rgb(251, 251, 251)",
"showlegend": False,
"title": {"text": ""},
"width": 600,
"xaxis": {
"anchor": "y",
"domain": [0.0, 0.4925],
"dtick": 0,
"range": [0.85, 4.1575],
"ticklen": 0,
"zeroline": False,
},
"xaxis2": {
"anchor": "y2",
"domain": [0.5075, 1.0],
"dtick": 0,
"range": [0.85, 4.1575],
"ticklen": 0,
"zeroline": False,
},
"yaxis": {
"anchor": "x",
"domain": [0.0, 1.0],
"dtick": 1,
"range": [15.75, 21.2625],
"ticklen": 0,
"zeroline": False,
},
"yaxis2": {
"anchor": "x2",
"domain": [0.0, 1.0],
"dtick": 1,
"matches": "y",
"range": [15.75, 21.2625],
"showticklabels": False,
"ticklen": 0,
"zeroline": False,
},
},
}
for j in [0, 1]:
self.assert_fig_equal(test_facet_grid["data"][j], exp_facet_grid["data"][j])
self.assert_fig_equal(test_facet_grid["layout"], exp_facet_grid["layout"])
| TestFacetGrid |
python | huggingface__transformers | tests/models/siglip/test_modeling_siglip.py | {
"start": 1624,
"end": 2989
} | class ____(ModelTesterMixin):
def test_sdpa_can_dispatch_composite_models(self):
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# Load the model with SDPA
model_sdpa = model_class.from_pretrained(tmpdirname)
# Load model with eager attention
model_eager = model_class.from_pretrained(
tmpdirname,
attn_implementation="eager",
)
if hasattr(model_sdpa, "vision_model"):
self.assertTrue(model_sdpa.vision_model.config._attn_implementation == "sdpa")
self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager")
if hasattr(model_sdpa, "text_model"):
self.assertTrue(model_sdpa.text_model.config._attn_implementation == "sdpa")
self.assertTrue(model_eager.text_model.config._attn_implementation == "eager")
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
self.assertTrue(model_eager.config._attn_implementation == "eager")
| SiglipModelTesterMixin |
python | openai__openai-python | src/openai/types/responses/function_shell_tool_param.py | {
"start": 222,
"end": 367
} | class ____(TypedDict, total=False):
type: Required[Literal["shell"]]
"""The type of the shell tool. Always `shell`."""
| FunctionShellToolParam |
python | ray-project__ray | python/ray/data/aggregate.py | {
"start": 14927,
"end": 17200
} | class ____(AggregateFnV2[SupportsRichComparisonType, SupportsRichComparisonType]):
"""Defines min aggregation.
Example:
.. testcode::
import ray
from ray.data.aggregate import Min
ds = ray.data.range(100)
# Schema: {'id': int64}
ds = ds.add_column("group_key", lambda x: x % 3)
# Schema: {'id': int64, 'group_key': int64}
# Finding the minimum value per group:
result = ds.groupby("group_key").aggregate(Min(on="id")).take_all()
# result: [{'group_key': 0, 'min(id)': 0},
# {'group_key': 1, 'min(id)': 1},
# {'group_key': 2, 'min(id)': 2}]
Args:
on: The name of the column to find the minimum value from. Must be provided.
ignore_nulls: Whether to ignore null values. If `True` (default), nulls are
skipped. If `False`, the minimum will be null if any value in
the group is null (for most data types, or follow type-specific
comparison rules with nulls).
alias_name: Optional name for the resulting column.
zero_factory: A callable that returns the initial "zero" value for the
accumulator. For example, for a float column, this would be
`lambda: float("+inf")`. Default is `lambda: float("+inf")`.
"""
def __init__(
self,
on: Optional[str] = None,
ignore_nulls: bool = True,
alias_name: Optional[str] = None,
zero_factory: Callable[[], SupportsRichComparisonType] = lambda: float("+inf"),
):
super().__init__(
alias_name if alias_name else f"min({str(on)})",
on=on,
ignore_nulls=ignore_nulls,
zero_factory=zero_factory,
)
def aggregate_block(self, block: Block) -> SupportsRichComparisonType:
return BlockAccessor.for_block(block).min(
self._target_col_name, self._ignore_nulls
)
def combine(
self,
current_accumulator: SupportsRichComparisonType,
new: SupportsRichComparisonType,
) -> SupportsRichComparisonType:
return min(current_accumulator, new)
@PublicAPI
| Min |
python | spyder-ide__spyder | external-deps/spyder-remote-services/spyder_remote_services/services/files/compression.py | {
"start": 571,
"end": 729
} | class ____:
name: str
modified_at: datetime
mode: int
method: CompressionType
data: BinaryIO
size: int = 0
crc32: int = 0
| MemberFile |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_set.py | {
"start": 33063,
"end": 33348
} | class ____(TestSetSubclassWithSlots):
thetype = FrozenSetSubclassWithSlots
# Tests taken from test_sets.py =============================================
empty_set = set()
#==============================================================================
| TestFrozenSetSubclassWithSlots |
python | PrefectHQ__prefect | src/integrations/prefect-aws/prefect_aws/settings.py | {
"start": 1367,
"end": 1609
} | class ____(PrefectBaseSettings):
model_config = build_settings_config(("integrations", "aws"))
ecs: EcsSettings = Field(
description="Settings for controlling ECS behavior.",
default_factory=EcsSettings,
)
| AwsSettings |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/compiler.py | {
"start": 19579,
"end": 20274
} | class ____(IntEnum):
"""represent preferences for the 'SQL linting' feature.
this feature currently includes support for flagging cartesian products
in SQL statements.
"""
NO_LINTING = 0
"Disable all linting."
COLLECT_CARTESIAN_PRODUCTS = 1
"""Collect data on FROMs and cartesian products and gather into
'self.from_linter'"""
WARN_LINTING = 2
"Emit warnings for linters that find problems"
FROM_LINTING = COLLECT_CARTESIAN_PRODUCTS | WARN_LINTING
"""Warn for cartesian products; combines COLLECT_CARTESIAN_PRODUCTS
and WARN_LINTING"""
NO_LINTING, COLLECT_CARTESIAN_PRODUCTS, WARN_LINTING, FROM_LINTING = tuple(
Linting
)
| Linting |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_assorted_poly.py | {
"start": 34030,
"end": 35820
} | class ____(
AssertsCompiledSQL, fixtures.DeclarativeMappedTest
):
"""test #6762"""
__dialect__ = "default"
run_create_tables = None
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Content(Base):
__tablename__ = "content"
id = Column(Integer, primary_key=True)
type = Column(String)
container_id = Column(Integer, ForeignKey("folder.id"))
__mapper_args__ = {"polymorphic_on": type}
class Folder(Content):
__tablename__ = "folder"
id = Column(ForeignKey("content.id"), primary_key=True)
__mapper_args__ = {
"polymorphic_identity": "f",
"inherit_condition": id == Content.id,
}
_alias = aliased(Content)
Content.__mapper__.add_property(
"count_children",
column_property(
select(func.count("*"))
.where(_alias.container_id == Content.id)
.scalar_subquery()
),
)
def test_alias_omitted(self):
Content = self.classes.Content
Folder = self.classes.Folder
sess = fixture_session()
entity = with_polymorphic(Content, [Folder], innerjoin=True)
self.assert_compile(
sess.query(entity),
"SELECT content.id AS content_id, content.type AS content_type, "
"content.container_id AS content_container_id, "
"(SELECT count(:count_2) AS count_1 FROM content AS content_1 "
"WHERE content_1.container_id = content.id) AS anon_1, "
"folder.id AS folder_id FROM content "
"JOIN folder ON folder.id = content.id",
)
| ColPropWAliasJoinedToBaseTest |
python | huggingface__transformers | tests/models/xlnet/test_tokenization_xlnet.py | {
"start": 967,
"end": 2707
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "xlnet/xlnet-base-cased"
tokenizer_class = XLNetTokenizer
integration_expected_tokens = ['▁This', '▁is', '▁a', '▁test', '▁', '😊', '▁I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁false', '.', '▁', '生活的真谛是', '▁Hi', '▁', 'Hello', '▁Hi', '▁', 'Hello', '▁', 'Hello', '<s>', '▁', 'hi', '<s>', '▁there', '▁The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁encoded', ':', '▁', 'Hello', '.', '▁But', '▁', 'ir', 'd', '▁and', '▁', 'ป', '▁', 'ir', 'd', '▁', 'ด', '▁Hey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_token_ids = [122, 27, 24, 934, 17, 0, 35, 30, 1094, 25, 664, 7701, 19, 21, 52, 27, 4417, 9, 17, 0, 4036, 17, 11368, 4036, 17, 11368, 17, 11368, 1, 17, 2582, 1, 105, 32, 405, 4905, 170, 39, 4183, 23147, 60, 17, 11368, 9, 130, 17, 1121, 66, 21, 17, 0, 17, 1121, 66, 17, 0, 14239, 160, 41, 44, 690] # fmt: skip
expected_tokens_from_ids = ['▁This', '▁is', '▁a', '▁test', '▁', '<unk>', '▁I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁false', '.', '▁', '<unk>', '▁Hi', '▁', 'Hello', '▁Hi', '▁', 'Hello', '▁', 'Hello', '<s>', '▁', 'hi', '<s>', '▁there', '▁The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁encoded', ':', '▁', 'Hello', '.', '▁But', '▁', 'ir', 'd', '▁and', '▁', '<unk>', '▁', 'ir', 'd', '▁', '<unk>', '▁Hey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_decoded_text = "This is a test <unk> I was born in 92000, and this is false. <unk> Hi Hello Hi Hello Hello<s> hi<s> there The following string should be properly encoded: Hello. But ird and <unk> ird <unk> Hey how are you doing"
| XLNetTokenizationTest |
python | pandas-dev__pandas | asv_bench/benchmarks/groupby.py | {
"start": 4845,
"end": 5986
} | class ____:
param_names = ["dtype"]
params = ["float32", "float64", "datetime", "object"]
def setup(self, dtype):
N = 10**5
# with datetimes (GH7555)
if dtype == "datetime":
values = date_range("1/1/2011", periods=N, freq="s")
elif dtype == "object":
values = ["foo"] * N
else:
values = np.arange(N).astype(dtype)
key = np.arange(N)
self.df = DataFrame({"key": key, "values": values})
self.df.iloc[1, 1] = np.nan # insert missing data
def time_frame_nth_any(self, dtype):
self.df.groupby("key").nth(0, dropna="any")
def time_groupby_nth_all(self, dtype):
self.df.groupby("key").nth(0, dropna="all")
def time_frame_nth(self, dtype):
self.df.groupby("key").nth(0)
def time_series_nth_any(self, dtype):
self.df["values"].groupby(self.df["key"]).nth(0, dropna="any")
def time_series_nth_all(self, dtype):
self.df["values"].groupby(self.df["key"]).nth(0, dropna="all")
def time_series_nth(self, dtype):
self.df["values"].groupby(self.df["key"]).nth(0)
| Nth |
python | PyCQA__pylint | tests/pyreverse/functional/class_diagrams/relationships/comprehensions.py | {
"start": 135,
"end": 242
} | class ____:
"""A component class."""
def __init__(self, name: str):
self.name = name
| Component |
python | huggingface__transformers | tests/utils/test_core_model_loading.py | {
"start": 1112,
"end": 6302
} | class ____(unittest.TestCase):
def setUp(self):
self.weight_globs_digits = [
"model.layers.*.mlp.gate_up_proj.weight",
"model.layers.*.self_attn.q_proj.weight",
"embed_tokens.weight",
]
self.alt_digits, self.map_digits, _ = build_glob_alternation(self.weight_globs_digits)
self.weight_globs_any = [
"model.layers.*.mlp.gate_up_proj.weight",
"model.layers.*.self_attn.q_proj.weight",
"embed_tokens.weight",
]
self.alt_any, self.map_any, _ = build_glob_alternation(self.weight_globs_any)
@staticmethod
def _match_glob(key, alt, mapping):
matched = alt.search(key)
return mapping.get(matched.lastgroup) if matched else None
def test_exact_match(self):
self.assertEqual(
self._match_glob("embed_tokens.weight", self.alt_digits, self.map_digits), "embed_tokens.weight"
)
def test_digits_only_star_accepts_digits(self):
self.assertEqual(
self._match_glob("model.layers.0.mlp.gate_up_proj.weight", self.alt_digits, self.map_digits),
"model.layers.*.mlp.gate_up_proj.weight",
)
self.assertEqual(
self._match_glob("model.layers.12.self_attn.q_proj.weight", self.alt_digits, self.map_digits),
"model.layers.*.self_attn.q_proj.weight",
)
def test_anychar_star_accepts_nondigits(self):
self.assertEqual(
self._match_glob("model.layers.a.mlp.gate_up_proj.weight", self.alt_any, self.map_any),
"model.layers.*.mlp.gate_up_proj.weight",
)
self.assertEqual(
self._match_glob("model.layers.00x.mlp.gate_up_proj.weight", self.alt_any, self.map_any),
"model.layers.*.mlp.gate_up_proj.weight",
)
def test_no_match(self):
self.assertIsNone(self._match_glob("model.layers.0.mlp.up_proj.weight", self.alt_digits, self.map_digits))
def test_leftmost_alternative_wins_for_overlapping_patterns(self):
# Overlapping patterns: both could match; ensure leftmost wins
globs = [
"model.layers.*.mlp.*.weight", # broader (first)
"model.layers.0.mlp.gate_up_proj.weight", # more specific (second)
]
alt, mapping, _ = build_glob_alternation(globs)
# Both branches match; Python's regex picks the leftmost alternative → index 0
self.assertEqual(
self._match_glob("model.layers.0.mlp.gate_up_proj.weight", alt, mapping), "model.layers.*.mlp.*.weight"
)
def test_multiple_patterns_same_prefix(self):
globs = [
"model.layers.*.self_attn.q_proj.weight",
"model.layers.*.self_attn.k_proj.weight",
"model.layers.*.self_attn.v_proj.weight",
]
alt, mapping, _ = build_glob_alternation(
globs,
)
self.assertEqual(
self._match_glob("model.layers.3.self_attn.q_proj.weight", alt, mapping),
"model.layers.*.self_attn.q_proj.weight",
)
self.assertEqual(
self._match_glob("model.layers.3.self_attn.k_proj.weight", alt, mapping),
"model.layers.*.self_attn.k_proj.weight",
)
self.assertEqual(
self._match_glob("model.layers.3.self_attn.v_proj.weight", alt, mapping),
"model.layers.*.self_attn.v_proj.weight",
)
def test_anchor_full_match_only(self):
self.assertIsNotNone(
self._match_glob("model.layers.0.mlp.gate_up_proj.weight.bar", self.alt_any, self.map_any)
)
def test_large_batch_performance_smoke(self):
# Not a perf benchmark, but ensures building and matching a larger alternation is OK
globs = [f"model.layers.*.mlp.block{i}.weight" for i in range(200)]
alt, mapping, _ = build_glob_alternation(globs)
key = "model.layers.123.mlp.block57.weight"
self.assertEqual(self._match_glob(key, alt, mapping), "model.layers.*.mlp.block57.weight")
def test_sub_key_rewrites_targets(self):
renamings = [
WeightRenaming("block_sparse_moe.experts.*.w1.weight", "mlp.experts.gate_up_proj"),
WeightRenaming("block_sparse_moe.experts.*.w2.weight", "mlp.experts.down_proj"),
WeightRenaming("model.language_model.*", "language_model"),
]
self.assertEqual(
rename_source_key("foo.block_sparse_moe.experts.3.w1.weight", renamings, [])[0],
"foo.mlp.experts.gate_up_proj",
)
self.assertEqual(
rename_source_key("foo.block_sparse_moe.experts.3.w2.weight", renamings, [])[0],
"foo.mlp.experts.down_proj",
)
self.assertEqual(rename_source_key("model.language_model.lm_head.weight", renamings, [])[0], "language_model")
def test_sub_key_no_match_returns_original(self):
renamings = [
WeightRenaming("block_sparse_moe.experts.*.w1.weight", "*.mlp.experts.gate_up_proj"),
]
key = "unrelated.key"
renamed_key, _ = rename_source_key(key, renamings, [])
self.assertEqual(renamed_key, key)
| TestWeightGlobMatching |
python | celery__celery | t/unit/tasks/test_stamping.py | {
"start": 15106,
"end": 23880
} | class ____:
def setup_method(self):
@self.app.task(shared=False)
def identity(x):
return x
self.identity = identity
@self.app.task(shared=False)
def fail(*args):
args = ("Task expected to fail",) + args
raise Exception(*args)
self.fail = fail
@self.app.task(shared=False)
def add(x, y):
return x + y
self.add = add
@self.app.task(shared=False)
def mul(x, y):
return x * y
self.mul = mul
@self.app.task(shared=False)
def div(x, y):
return x / y
self.div = div
@self.app.task(shared=False)
def xsum(numbers):
return sum(sum(num) if isinstance(num, Iterable) else num for num in numbers)
self.xsum = xsum
@self.app.task(shared=False, bind=True)
def replaced(self, x, y):
return self.replace(add.si(x, y))
self.replaced = replaced
@self.app.task(shared=False, bind=True)
def replaced_group(self, x, y):
return self.replace(group(add.si(x, y), mul.si(x, y)))
self.replaced_group = replaced_group
@self.app.task(shared=False, bind=True)
def replace_with_group(self, x, y):
return self.replace(group(add.si(x, y), mul.si(x, y)))
self.replace_with_group = replace_with_group
@self.app.task(shared=False, bind=True)
def replace_with_chain(self, x, y):
return self.replace(group(add.si(x, y) | mul.s(y), add.si(x, y)))
self.replace_with_chain = replace_with_chain
@self.app.task(shared=False)
def xprod(numbers):
try:
return math.prod(numbers)
except AttributeError:
# TODO: Drop this backport once
# we drop support for Python 3.7
import operator
from functools import reduce
return reduce(operator.mul, numbers)
self.xprod = xprod
@self.app.task(bind=True, max_retries=3, iterations=0, shared=False)
def retry_task(self, arg1, arg2, kwarg=1, max_retries=None, care=True):
self.iterations += 1
rmax = self.max_retries if max_retries is None else max_retries
assert repr(self.request)
retries = self.request.retries
if care and retries >= rmax:
return arg1
else:
raise self.retry(countdown=0, max_retries=rmax)
self.retry_task = retry_task
@pytest.mark.parametrize(
"stamping_visitor",
[
BooleanStampingVisitor(),
ListStampingVisitor(),
SetStampingVisitor(),
StringStampingVisitor(),
UUIDStampingVisitor(),
],
)
@pytest.mark.parametrize(
"canvas_workflow",
[
signature("sig"),
group(signature("sig")),
group(signature("sig1", signature("sig2"))),
group(signature(f"sig{i}") for i in range(2)),
chord((signature(f"sig{i}") for i in range(2)), signature("sig3")),
chord(group(signature(f"sig{i}") for i in range(2)), signature("sig3")),
chord(group(signature(f"sig{i}") for i in range(2)), signature("sig3") | signature("sig4")),
chord(signature("sig1"), signature("sig2") | signature("sig3")),
chain(
signature("sig"),
chord((signature(f"sig{i}") for i in range(2)), signature("sig3")),
chord(group(signature(f"sig{i}") for i in range(2)), signature("sig3")),
chord(group(signature(f"sig{i}") for i in range(2)), signature("sig3") | signature("sig4")),
chord(signature("sig1"), signature("sig2") | signature("sig3")),
),
chain(
signature("sig1") | signature("sig2"),
group(signature("sig3"), signature("sig4")) | group(signature(f"sig{i}") for i in range(5, 6)),
chord(group(signature(f"sig{i}") for i in range(6, 8)), signature("sig8")) | signature("sig9"),
),
chain(
signature("sig"),
chord(
group(signature(f"sig{i}") for i in range(2)),
chain(
signature("sig3"),
chord(
(signature(f"sig{i}") for i in range(4, 6)),
chain(
signature("sig6"),
chord(
group(signature(f"sig{i}") for i in range(7, 9)),
chain(
signature("sig9"),
chord(group(signature("sig10"), signature("sig11")), signature("sig12")),
),
),
),
),
),
),
),
group(
signature("sig"),
group(signature("sig1")),
group(signature("sig1"), signature("sig2")),
group(signature(f"sig{i}") for i in range(2)),
group([signature("sig1"), signature("sig2")]),
group((signature("sig1"), signature("sig2"))),
chain(signature("sig1"), signature("sig2")),
chord(group(signature("sig1"), signature("sig2")), signature("sig3")),
chord(group(signature(f"sig{i}") for i in range(2)), group(signature("sig3"), signature("sig4"))),
chain(
group(signature("sig1"), signature("sig2")),
group(signature("sig3"), signature("sig4")),
signature("sig5"),
),
chain(
signature("sig1"),
group(signature("sig2"), signature("sig3")),
group(signature("sig4"), signature("sig5")),
),
chain(
group(
signature("sig1"),
group(signature("sig2")),
group([signature("sig3"), signature("sig4")]),
group(signature(f"sig{i}") for i in range(5, 7)),
),
chain(
signature("sig8"),
group(signature("sig9"), signature("sig10")),
),
),
),
chain(
signature("sig"),
group(signature("sig1")),
group(signature("sig1"), signature("sig2")),
group(signature(f"sig{i}") for i in range(2)),
group([signature("sig1"), signature("sig2")]),
group((signature("sig1"), signature("sig2"))),
chain(signature("sig1"), signature("sig2")),
chord(group(signature("sig1"), signature("sig2")), signature("sig3")),
chord(group(signature(f"sig{i}") for i in range(2)), group(signature("sig3"), signature("sig4"))),
chain(
group(signature("sig1"), signature("sig2")),
group(signature("sig3"), signature("sig4")),
signature("sig5"),
),
chain(
signature("sig1"),
group(signature("sig2"), signature("sig3")),
group(signature("sig4"), signature("sig5")),
),
chain(
group(
signature("sig1"),
group(signature("sig2")),
group([signature("sig3"), signature("sig4")]),
group(signature(f"sig{i}") for i in range(5, 7)),
),
chain(
signature("sig8"),
group(signature("sig9"), signature("sig10")),
),
),
),
chord(
group(
group(signature(f"sig{i}") for i in range(2)),
group(signature(f"sig{i}") for i in range(2, 4)),
group(signature(f"sig{i}") for i in range(4, 6)),
group(signature(f"sig{i}") for i in range(6, 8)),
),
chain(
chain(
signature("sig8") | signature("sig9"),
group(signature("sig10"), signature("sig11"))
| group(signature(f"sig{i}") for i in range(12, 14)),
chord(group(signature(f"sig{i}") for i in range(14, 16)), signature("sig16"))
| signature("sig17"),
),
signature("sig1") | signature("sig2"),
group(signature("sig3"), signature("sig4")) | group(signature(f"sig{i}") for i in range(5, 7)),
chord(group(signature(f"sig{i}") for i in range(7, 9)), signature("sig9")) | signature("sig10"),
),
),
],
)
| CanvasCase |
python | google__pytype | pytype/overlays/fiddle_overlay.py | {
"start": 8985,
"end": 9789
} | class ____(abstract.Instance, mixin.HasSlots):
"""Base class for Config and Partial instances."""
def __init__(self, fiddle_type_name, cls, ctx, container=None):
super().__init__(cls, ctx, container)
self.fiddle_type_name = fiddle_type_name
self.underlying = None
mixin.HasSlots.init_mixin(self)
self.set_native_slot("__getitem__", self.getitem_slot)
def getitem_slot(self, node, slice_var) -> tuple[Node, abstract.Instance]:
# We need to set this here otherwise we walk up the chain and call
# getitem_slot on BuildableBuilder, which tries to create an
# AnnotationContainer.
# TODO(mdemello): This probably needs to delegate to
# vm_utils._call_binop_on_bindings with the lhs set to self.underlying.
return node, self.ctx.new_unsolvable(node)
| Buildable |
python | getsentry__sentry | tests/sentry/db/postgres/schema/safe_migrations/integration/test_migrations.py | {
"start": 14676,
"end": 15242
} | class ____(BaseSafeMigrationTest, ColExistsMixin):
app = "good_flow_delete_field_pending_with_fk_constraint_app"
migrate_from = "0001"
migrate_to = "0003"
def test(self) -> None:
self._run_migration(self.app, "0001_initial")
assert self.col_exists("fk_table_id")
self._run_migration(self.app, "0002_remove_constraints_and_pending")
assert self.col_exists("fk_table_id")
self._run_migration(self.app, "0003_delete")
assert not self.col_exists("fk_table_id")
| DeletionFieldGoodDeletePendingWithFKConstraint |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassKwOnly1.py | {
"start": 748,
"end": 819
} | class ____(DC3):
c: float
DC4("", 0.2, b=3)
DC4(a="", b=3, c=0.2)
| DC4 |
python | falconry__falcon | tests/test_before_hooks.py | {
"start": 3181,
"end": 4132
} | class ____:
_some_fish = Fish()
# Test non-callable should be skipped by decorator
on_patch = {} # type: ignore
@falcon.before(validate_param, 'limit')
def on_get(self, req, resp, bunnies):
self._capture(req, resp, bunnies)
@falcon.before(validate_param, 'limit')
def on_head(self, req, resp, bunnies):
self._capture(req, resp, bunnies)
@falcon.before(_some_fish)
def on_post(self, req, resp, fish, bunnies):
self._capture(req, resp, bunnies)
self.fish = fish
@falcon.before(_some_fish.hook)
def on_put(self, req, resp, fish, bunnies):
self._capture(req, resp, bunnies)
self.fish = fish
def _capture(self, req, resp, bunnies):
self.req = req
self.resp = resp
self.bunnies = bunnies
# NOTE(swistakm): we use both type of hooks (class and method)
# at once for the sake of simplicity
@falcon.before(bunnies)
| WrappedClassResource |
python | tornadoweb__tornado | tornado/websocket.py | {
"start": 3580,
"end": 23028
} | class ____(tornado.web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override `on_message` to handle incoming messages, and use
`write_message` to send messages to the client. You can also
override `open` and `on_close` to handle opened and closed
connections.
Custom upgrade response headers can be sent by overriding
`~tornado.web.RequestHandler.set_default_headers` or
`~tornado.web.RequestHandler.prepare`.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example WebSocket handler that echos back all received messages
back to the client:
.. testcode::
class EchoWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
print("WebSocket opened")
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print("WebSocket closed")
WebSockets are not standard HTTP connections. The "handshake" is
HTTP, but after the handshake, the protocol is
message-based. Consequently, most of the Tornado HTTP facilities
are not available in handlers of this type. The only communication
methods available to you are `write_message()`, `ping()`, and
`close()`. Likewise, your request handler class should implement
`open()` method rather than ``get()`` or ``post()``.
If you map the handler above to ``/websocket`` in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
Web browsers allow any site to open a websocket connection to any other,
instead of using the same-origin policy that governs other network
access from JavaScript. This can be surprising and is a potential
security hole, so since Tornado 4.0 `WebSocketHandler` requires
applications that wish to receive cross-origin websockets to opt in
by overriding the `~WebSocketHandler.check_origin` method (see that
method's docs for details). Failure to do so is the most likely
cause of 403 errors when making a websocket connection.
When using a secure websocket connection (``wss://``) with a self-signed
certificate, the connection from a browser may fail because it wants
to show the "accept this certificate" dialog but has nowhere to show it.
You must first visit a regular HTML page using the same certificate
to accept it before the websocket connection will succeed.
If the application setting ``websocket_ping_interval`` has a non-zero
value, a ping will be sent periodically, and the connection will be
closed if a response is not received before the ``websocket_ping_timeout``.
Both settings are in seconds; floating point values are allowed.
The default timeout is equal to the interval.
Messages larger than the ``websocket_max_message_size`` application setting
(default 10MiB) will not be accepted.
.. versionchanged:: 4.5
Added ``websocket_ping_interval``, ``websocket_ping_timeout``, and
``websocket_max_message_size``.
"""
def __init__(
self,
application: tornado.web.Application,
request: httputil.HTTPServerRequest,
**kwargs: Any,
) -> None:
super().__init__(application, request, **kwargs)
self.ws_connection = None # type: Optional[WebSocketProtocol]
self.close_code = None # type: Optional[int]
self.close_reason = None # type: Optional[str]
self._on_close_called = False
async def get(self, *args: Any, **kwargs: Any) -> None:
self.open_args = args
self.open_kwargs = kwargs
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != "websocket":
self.set_status(400)
log_msg = 'Can "Upgrade" only to "WebSocket".'
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Connection header should be upgrade.
# Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(
lambda s: s.strip().lower(), headers.get("Connection", "").split(",")
)
if "upgrade" not in connection:
self.set_status(400)
log_msg = '"Connection" must be "Upgrade".'
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
# If there was an origin header, check to make sure it matches
# according to check_origin. When the origin is None, we assume it
# did not come from a browser and that it can be passed on.
if origin is not None and not self.check_origin(origin):
self.set_status(403)
log_msg = "Cross origin websockets not allowed"
self.finish(log_msg)
gen_log.debug(log_msg)
return
self.ws_connection = self.get_websocket_protocol()
if self.ws_connection:
await self.ws_connection.accept_connection(self)
else:
self.set_status(426, "Upgrade Required")
self.set_header("Sec-WebSocket-Version", "7, 8, 13")
@property
def ping_interval(self) -> Optional[float]:
"""The interval for sending websocket pings.
If this is non-zero, the websocket will send a ping every
ping_interval seconds.
The client will respond with a "pong". The connection can be configured
to timeout on late pong delivery using ``websocket_ping_timeout``.
Set ``websocket_ping_interval = 0`` to disable pings.
Default: ``0``
"""
return self.settings.get("websocket_ping_interval", None)
@property
def ping_timeout(self) -> Optional[float]:
"""Timeout if no pong is received in this many seconds.
To be used in combination with ``websocket_ping_interval > 0``.
If a ping response (a "pong") is not received within
``websocket_ping_timeout`` seconds, then the websocket connection
will be closed.
This can help to clean up clients which have disconnected without
cleanly closing the websocket connection.
Note, the ping timeout cannot be longer than the ping interval.
Set ``websocket_ping_timeout = 0`` to disable the ping timeout.
Default: equal to the ``ping_interval``.
.. versionchanged:: 6.5.0
Default changed from the max of 3 pings or 30 seconds.
The ping timeout can no longer be configured longer than the
ping interval.
"""
return self.settings.get("websocket_ping_timeout", None)
@property
def max_message_size(self) -> int:
"""Maximum allowed message size.
If the remote peer sends a message larger than this, the connection
will be closed.
Default is 10MiB.
"""
return self.settings.get(
"websocket_max_message_size", _default_max_message_size
)
def write_message(
self, message: Union[bytes, str, Dict[str, Any]], binary: bool = False
) -> "Future[None]":
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
If the connection is already closed, raises `WebSocketClosedError`.
Returns a `.Future` which can be used for flow control.
.. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection
would raise an `AttributeError`)
.. versionchanged:: 4.3
Returns a `.Future` which can be used for flow control.
.. versionchanged:: 5.0
Consistently raises `WebSocketClosedError`. Previously could
sometimes raise `.StreamClosedError`.
"""
if self.ws_connection is None or self.ws_connection.is_closing():
raise WebSocketClosedError()
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
return self.ws_connection.write_message(message, binary=binary)
def select_subprotocol(self, subprotocols: List[str]) -> Optional[str]:
"""Override to implement subprotocol negotiation.
``subprotocols`` is a list of strings identifying the
subprotocols proposed by the client. This method may be
overridden to return one of those strings to select it, or
``None`` to not select a subprotocol.
Failure to select a subprotocol does not automatically abort
the connection, although clients may close the connection if
none of their proposed subprotocols was selected.
The list may be empty, in which case this method must return
None. This method is always called exactly once even if no
subprotocols were proposed so that the handler can be advised
of this fact.
.. versionchanged:: 5.1
Previously, this method was called with a list containing
an empty string instead of an empty list if no subprotocols
were proposed by the client.
"""
return None
@property
def selected_subprotocol(self) -> Optional[str]:
"""The subprotocol returned by `select_subprotocol`.
.. versionadded:: 5.1
"""
assert self.ws_connection is not None
return self.ws_connection.selected_subprotocol
def get_compression_options(self) -> Optional[Dict[str, Any]]:
"""Override to return compression options for the connection.
If this method returns None (the default), compression will
be disabled. If it returns a dict (even an empty one), it
will be enabled. The contents of the dict may be used to
control the following compression options:
``compression_level`` specifies the compression level.
``mem_level`` specifies the amount of memory used for the internal compression state.
These parameters are documented in detail here:
https://docs.python.org/3.13/library/zlib.html#zlib.compressobj
.. versionadded:: 4.1
.. versionchanged:: 4.5
Added ``compression_level`` and ``mem_level``.
"""
# TODO: Add wbits option.
return None
def _open(self, *args: str, **kwargs: str) -> Optional[Awaitable[None]]:
pass
open = _open # type: Callable[..., Optional[Awaitable[None]]]
"""Invoked when a new WebSocket is opened.
The arguments to `open` are extracted from the `tornado.web.URLSpec`
regular expression, just like the arguments to
`tornado.web.RequestHandler.get`.
`open` may be a coroutine. `on_message` will not be called until
`open` has returned.
.. versionchanged:: 5.1
``open`` may be a coroutine.
"""
def on_message(self, message: Union[str, bytes]) -> Optional[Awaitable[None]]:
"""Handle incoming messages on the WebSocket
This method must be overridden.
.. versionchanged:: 4.5
``on_message`` can be a coroutine.
"""
raise NotImplementedError
def ping(self, data: Union[str, bytes] = b"") -> None:
"""Send ping frame to the remote end.
The data argument allows a small amount of data (up to 125
bytes) to be sent as a part of the ping message. Note that not
all websocket implementations expose this data to
applications.
Consider using the ``websocket_ping_interval`` application
setting instead of sending pings manually.
.. versionchanged:: 5.1
The data argument is now optional.
"""
data = utf8(data)
if self.ws_connection is None or self.ws_connection.is_closing():
raise WebSocketClosedError()
self.ws_connection.write_ping(data)
def on_pong(self, data: bytes) -> None:
"""Invoked when the response to a ping frame is received."""
pass
def on_ping(self, data: bytes) -> None:
"""Invoked when the a ping frame is received."""
pass
def on_close(self) -> None:
"""Invoked when the WebSocket is closed.
If the connection was closed cleanly and a status code or reason
phrase was supplied, these values will be available as the attributes
``self.close_code`` and ``self.close_reason``.
.. versionchanged:: 4.0
Added ``close_code`` and ``close_reason`` attributes.
"""
pass
def close(self, code: Optional[int] = None, reason: Optional[str] = None) -> None:
"""Closes this Web Socket.
Once the close handshake is successful the socket will be closed.
``code`` may be a numeric status code, taken from the values
defined in `RFC 6455 section 7.4.1
<https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
``reason`` may be a textual message about why the connection is
closing. These values are made available to the client, but are
not otherwise interpreted by the websocket protocol.
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.ws_connection:
self.ws_connection.close(code, reason)
self.ws_connection = None
def check_origin(self, origin: str) -> bool:
"""Override to enable support for allowing alternate origins.
The ``origin`` argument is the value of the ``Origin`` HTTP
header, the url responsible for initiating this request. This
method is not called for clients that do not send this header;
such requests are always allowed (because all browsers that
implement WebSockets support this header, and non-browser
clients do not have the same cross-site security concerns).
Should return ``True`` to accept the request or ``False`` to
reject it. By default, rejects all requests with an origin on
a host other than this one.
This is a security protection against cross site scripting attacks on
browsers, since WebSockets are allowed to bypass the usual same-origin
policies and don't use CORS headers.
.. warning::
This is an important security measure; don't disable it
without understanding the security implications. In
particular, if your authentication is cookie-based, you
must either restrict the origins allowed by
``check_origin()`` or implement your own XSRF-like
protection for websocket connections. See `these
<https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html>`_
`articles
<https://devcenter.heroku.com/articles/websocket-security>`_
for more.
To accept all cross-origin traffic (which was the default prior to
Tornado 4.0), simply override this method to always return ``True``::
def check_origin(self, origin):
return True
To allow connections from any subdomain of your site, you might
do something like::
def check_origin(self, origin):
parsed_origin = urllib.parse.urlparse(origin)
return parsed_origin.netloc.endswith(".mydomain.com")
.. versionadded:: 4.0
"""
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = self.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def set_nodelay(self, value: bool) -> None:
"""Set the no-delay flag for this stream.
By default, small messages may be delayed and/or combined to minimize
the number of packets sent. This can sometimes cause 200-500ms delays
due to the interaction between Nagle's algorithm and TCP delayed
ACKs. To reduce this delay (at the expense of possibly increasing
bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
connection is established.
See `.BaseIOStream.set_nodelay` for additional details.
.. versionadded:: 3.1
"""
assert self.ws_connection is not None
self.ws_connection.set_nodelay(value)
def on_connection_close(self) -> None:
if self.ws_connection:
self.ws_connection.on_connection_close()
self.ws_connection = None
if not self._on_close_called:
self._on_close_called = True
self.on_close()
self._break_cycles()
def on_ws_connection_close(
self, close_code: Optional[int] = None, close_reason: Optional[str] = None
) -> None:
self.close_code = close_code
self.close_reason = close_reason
self.on_connection_close()
def _break_cycles(self) -> None:
# WebSocketHandlers call finish() early, but we don't want to
# break up reference cycles (which makes it impossible to call
# self.render_string) until after we've really closed the
# connection (if it was established in the first place,
# indicated by status code 101).
if self.get_status() != 101 or self._on_close_called:
super()._break_cycles()
def get_websocket_protocol(self) -> Optional["WebSocketProtocol"]:
websocket_version = self.request.headers.get("Sec-WebSocket-Version")
if websocket_version in ("7", "8", "13"):
params = _WebSocketParams(
ping_interval=self.ping_interval,
ping_timeout=self.ping_timeout,
max_message_size=self.max_message_size,
compression_options=self.get_compression_options(),
)
return WebSocketProtocol13(self, False, params)
return None
def _detach_stream(self) -> IOStream:
# disable non-WS methods
for method in [
"write",
"redirect",
"set_header",
"set_cookie",
"set_status",
"flush",
"finish",
]:
setattr(self, method, _raise_not_supported_for_websockets)
return self.detach()
def _raise_not_supported_for_websockets(*args: Any, **kwargs: Any) -> None:
raise RuntimeError("Method not supported for Web Sockets")
| WebSocketHandler |
python | pypa__warehouse | tests/unit/macaroons/test_caveats.py | {
"start": 7933,
"end": 9739
} | class ____:
def test_verify_no_identity(self):
caveat = RequestUser(user_id="invalid")
result = caveat.verify(
pretend.stub(identity=None), pretend.stub(), pretend.stub()
)
assert result == Failure("token with user restriction without a user")
def test_verify_invalid_identity_no_user(self):
caveat = RequestUser(user_id="invalid")
result = caveat.verify(
pretend.stub(identity=pretend.stub()), pretend.stub(), pretend.stub()
)
assert result == Failure("token with user restriction without a user")
def test_verify_invalid_identity_no_macaroon(self, db_request):
user = UserFactory.create()
user_context = UserContext(user, None)
caveat = RequestUser(user_id=str(user.id))
result = caveat.verify(
pretend.stub(identity=user_context), pretend.stub(), pretend.stub()
)
assert result == Failure("token with user restriction without a macaroon")
def test_verify_invalid_user_id(self, db_request):
user = UserFactory.create()
user_context = UserContext(user, pretend.stub())
caveat = RequestUser(user_id="invalid")
result = caveat.verify(
pretend.stub(identity=user_context), pretend.stub(), pretend.stub()
)
assert result == Failure(
"current user does not match user restriction in token"
)
def test_verify_ok(self, db_request):
user = UserFactory.create()
user_context = UserContext(user, pretend.stub())
caveat = RequestUser(user_id=str(user.id))
result = caveat.verify(
pretend.stub(identity=user_context), pretend.stub(), pretend.stub()
)
assert result == Success()
| TestRequestUserCaveat |
python | PyCQA__pylint | doc/data/messages/i/invalid-match-args-definition/bad.py | {
"start": 0,
"end": 176
} | class ____:
__match_args__ = ["title", "year"] # [invalid-match-args-definition]
def __init__(self, title, year):
self.title = title
self.year = year
| Book |
python | getsentry__sentry | tests/sentry/tasks/test_clear_expired_resolutions.py | {
"start": 432,
"end": 3021
} | class ____(TestCase):
def test_task_persistent_name(self) -> None:
assert clear_expired_resolutions.name == "sentry.tasks.clear_expired_resolutions"
def test_simple(self) -> None:
project = self.create_project()
old_release = Release.objects.create(organization_id=project.organization_id, version="a")
old_release.add_project(project)
group1 = self.create_group(
project=project, status=GroupStatus.RESOLVED, active_at=timezone.now()
)
resolution1 = GroupResolution.objects.create(
group=group1, release=old_release, type=GroupResolution.Type.in_next_release
)
activity1 = Activity.objects.create(
group=group1,
project=project,
type=ActivityType.SET_RESOLVED_IN_RELEASE.value,
ident=resolution1.id,
data={"version": ""},
)
new_release = Release.objects.create(
organization_id=project.organization_id,
version="b",
date_added=timezone.now() + timedelta(minutes=1),
)
new_release.add_project(project)
group2 = self.create_group(status=GroupStatus.UNRESOLVED, active_at=timezone.now())
resolution2 = GroupResolution.objects.create(
group=group2, release=new_release, type=GroupResolution.Type.in_next_release
)
activity2 = Activity.objects.create(
group=group2,
project=project,
type=ActivityType.SET_RESOLVED_IN_RELEASE.value,
ident=resolution2.id,
data={"version": ""},
)
clear_expired_resolutions(new_release.id)
assert Group.objects.get(id=group1.id).status == GroupStatus.RESOLVED
assert Group.objects.get(id=group2.id).status == GroupStatus.UNRESOLVED
# row should be updated to the in_release type, and reflect
# the release it was resolved in
resolution1 = GroupResolution.objects.get(id=resolution1.id)
assert resolution1.status == GroupResolution.Status.resolved
assert resolution1.release == new_release
assert resolution1.type == GroupResolution.Type.in_release
resolution2 = GroupResolution.objects.get(id=resolution2.id)
assert resolution2.status == GroupResolution.Status.pending
activity1 = Activity.objects.get(id=activity1.id)
assert activity1.data["version"] == new_release.version
activity2 = Activity.objects.get(id=activity2.id)
assert activity2.data["version"] == ""
| ClearExpiredResolutionsTest |
python | html5lib__html5lib-python | html5lib/tests/tokenizer.py | {
"start": 6713,
"end": 7698
} | class ____(pytest.Collector):
def __init__(self, name, parent=None, config=None, session=None, testdata=None):
super(TokenizerTestCollector, self).__init__(name, parent, config, session)
if 'initialStates' not in testdata:
testdata["initialStates"] = ["Data state"]
if 'doubleEscaped' in testdata:
testdata = unescape(testdata)
self.testdata = testdata
def collect(self):
for initialState in self.testdata["initialStates"]:
initialState = capitalize(initialState)
item = TokenizerTest.from_parent(self,
name=initialState,
test=self.testdata,
initialState=initialState)
if self.testdata["input"] is None:
item.add_marker(pytest.mark.skipif(True, reason="Relies on lone surrogates"))
yield item
| TokenizerTestCollector |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_superfences.py | {
"start": 40473,
"end": 41699
} | class ____(util.MdCase):
"""Test custom Arithmatex preview format."""
extension = ['pymdownx.superfences']
extension_configs = {
'pymdownx.superfences': {
'custom_fences': [
{
'name': 'math',
'class': 'arithmatex',
'format': arithmatex.arithmatex_fenced_format(mode='mathjax', preview=True)
}
]
}
}
def test_arithmatex_preview(self):
"""Test Arithmatex formatter with preview."""
self.check_markdown(
r'''
```math
E(\mathbf{v}, \mathbf{h}) = -\sum_{i,j}w_{ij}v_i h_j - \sum_i b_i v_i - \sum_j c_j h_j
```
''',
r'''
<div class="arithmatex">
<div class="MathJax_Preview">
E(\mathbf{v}, \mathbf{h}) = -\sum_{i,j}w_{ij}v_i h_j - \sum_i b_i v_i - \sum_j c_j h_j
</div>
<script type="math/tex; mode=display">
E(\mathbf{v}, \mathbf{h}) = -\sum_{i,j}w_{ij}v_i h_j - \sum_i b_i v_i - \sum_j c_j h_j
</script>
</div>
''',
True
)
| TestSuperFencesCustomArithmatexPreview |
python | modin-project__modin | asv_bench/benchmarks/io/csv.py | {
"start": 2275,
"end": 2721
} | class ____(BaseReadCsv):
data_type = "true_false_int"
param_names = ["shape"]
params = [get_benchmark_shapes("TimeReadCsvTrueFalseValues")]
def time_true_false_values(self, test_filenames, shape):
execute(
IMPL.read_csv(
test_filenames[self.shape_id],
true_values=["Yes", "true"],
false_values=["No", "false"],
),
)
| TimeReadCsvTrueFalseValues |
python | getsentry__sentry | tests/sentry/discover/test_dashboard_widget_split.py | {
"start": 914,
"end": 24674
} | class ____(BaseMetricsLayerTestCase, TestCase, SnubaTestCase):
@property
def now(self) -> datetime:
return before_now(minutes=10)
def setUp(self) -> None:
super().setUp()
self.org = self.create_organization()
with assume_test_silo_mode_of(User):
self.user = User.objects.create(email="test@sentry.io")
self.project_2 = self.create_project(organization=self.org)
self.project_3 = self.create_project(organization=self.org)
self.project_ids = [
self.project.id,
self.project_2.id,
self.project_3.id,
]
self.projects = [
self.project,
self.project_2,
self.project_3,
]
self.query = {"fields": ["test"], "conditions": [], "limit": 10}
self.nine_mins_ago = before_now(minutes=9)
self.ten_mins_ago = before_now(minutes=10)
self.dry_run = False
self.dashboard = Dashboard.objects.create(
title="Dashboard With Split Widgets",
created_by_id=self.user.id,
organization=self.organization,
)
self.dashboard.projects.set([self.project, self.project_2])
def test_errors_widget(self) -> None:
error_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
order=0,
title="error widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
errors_widget_query = DashboardWidgetQuery.objects.create(
widget=error_widget,
fields=["title", "issue", "project", "release", "count()", "count_unique(user)"],
columns=[],
aggregates=["count_unique(user)"],
conditions="stack.filename:'../../sentry/scripts/views.js'",
order=0,
)
_get_and_save_split_decision_for_dashboard_widget(errors_widget_query, self.dry_run)
error_widget.refresh_from_db()
assert (
error_widget.discover_widget_split is None
if self.dry_run
else error_widget.discover_widget_split == 100
)
def test_metrics_compatible_query(self) -> None:
metrics_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
order=0,
title="widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
metrics_query = DashboardWidgetQuery.objects.create(
widget=metrics_widget,
fields=["transaction", "count()"],
columns=[],
aggregates=[],
conditions="",
order=0,
)
self.store_performance_metric(
name=TransactionMRI.DURATION.value,
project_id=self.project.id,
tags={"transaction": "/sentry/scripts/views.js"},
value=30,
org_id=self.dashboard.organization.id,
hours_before_now=2,
)
with self.feature({"organizations:dynamic-sampling": True}):
_, queried_snuba = _get_and_save_split_decision_for_dashboard_widget(
metrics_query, self.dry_run
)
metrics_widget.refresh_from_db()
assert (
metrics_widget.discover_widget_split is None
if self.dry_run
else metrics_widget.discover_widget_split == 101
)
assert queried_snuba
def test_metrics_compatible_query_no_data(self) -> None:
metrics_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
order=0,
title="widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
metrics_query = DashboardWidgetQuery.objects.create(
widget=metrics_widget,
fields=["transaction", "count()"],
columns=[],
aggregates=[],
conditions="transaction:'/sentry/scripts/views.js'",
order=0,
)
with self.feature({"organizations:dynamic-sampling": True}):
_, queried_snuba = _get_and_save_split_decision_for_dashboard_widget(
metrics_query, self.dry_run
)
metrics_widget.refresh_from_db()
assert (
metrics_widget.discover_widget_split is None
if self.dry_run
else metrics_widget.discover_widget_split == 100
)
assert queried_snuba
def test_metrics_compatible_query_no_data_only_aggregates(self) -> None:
metrics_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
order=0,
title="widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
# When only aggregates are requested, the response has a row but it's
# completely empty.
metrics_query = DashboardWidgetQuery.objects.create(
widget=metrics_widget,
fields=["count()", "count_unique(user)"],
columns=[],
aggregates=[],
conditions=f"project:[{self.project_2.slug}]",
order=0,
)
with self.feature({"organizations:dynamic-sampling": True}):
_, queried_snuba = _get_and_save_split_decision_for_dashboard_widget(
metrics_query, self.dry_run
)
metrics_widget.refresh_from_db()
assert (
metrics_widget.discover_widget_split is None
if self.dry_run
else metrics_widget.discover_widget_split == 100
)
assert queried_snuba
def test_metrics_query_with_no_dynamic_sampling(self) -> None:
metrics_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
order=0,
title="widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
metrics_query = DashboardWidgetQuery.objects.create(
widget=metrics_widget,
fields=["transaction", "count()"],
columns=[],
aggregates=[],
conditions="transaction:'/sentry/scripts/views.js'",
order=0,
)
with self.feature({"organizations:dynamic-sampling": False}):
_, queried_snuba = _get_and_save_split_decision_for_dashboard_widget(
metrics_query, self.dry_run
)
metrics_widget.refresh_from_db()
assert (
metrics_widget.discover_widget_split is None
if self.dry_run
else metrics_widget.discover_widget_split == 100
)
assert queried_snuba
def test_ambiguous_widget_with_error_data(self) -> None:
data = load_data("javascript", timestamp=self.ten_mins_ago)
data["transaction"] = "/to_other/"
self.store_event(data, project_id=self.project.id, assert_no_errors=False)
data = load_data("javascript", timestamp=self.ten_mins_ago)
data["transaction"] = "/to_other/2"
self.store_event(data, project_id=self.project.id, assert_no_errors=False)
error_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
order=0,
title="error widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
errors_widget_query = DashboardWidgetQuery.objects.create(
widget=error_widget,
fields=["title", "org_slug", "project", "release", "count()", "count_unique(user)"],
columns=[],
aggregates=["count_unique(user)"],
conditions="",
order=0,
)
_, queried_snuba = _get_and_save_split_decision_for_dashboard_widget(
errors_widget_query, self.dry_run
)
error_widget.refresh_from_db()
assert (
error_widget.discover_widget_split is None
if self.dry_run
else error_widget.discover_widget_split == 100
)
assert queried_snuba
def test_ambiguous_widget_with_transactions_data(self) -> None:
data = load_data("transaction", timestamp=self.ten_mins_ago)
data["transaction"] = "/to_other/"
self.store_event(data, project_id=self.project.id, assert_no_errors=False)
data = load_data("transaction", timestamp=self.ten_mins_ago)
data["transaction"] = "/to_other/2"
self.store_event(data, project_id=self.project.id, assert_no_errors=False)
error_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
order=0,
title="error widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
errors_widget_query = DashboardWidgetQuery.objects.create(
widget=error_widget,
fields=["title", "org_slug", "project", "release", "count()", "count_unique(user)"],
columns=[],
aggregates=["count_unique(user)"],
conditions="",
order=0,
)
_, queried_snuba = _get_and_save_split_decision_for_dashboard_widget(
errors_widget_query, self.dry_run
)
error_widget.refresh_from_db()
assert (
error_widget.discover_widget_split is None
if self.dry_run
else error_widget.discover_widget_split == 101
)
assert queried_snuba
def test_alias_with_user_misery_widget(self) -> None:
data = load_data("transaction", timestamp=self.ten_mins_ago)
data["transaction"] = "/to_other/"
self.store_event(data, project_id=self.project.id, assert_no_errors=False)
data = load_data("transaction", timestamp=self.ten_mins_ago)
data["transaction"] = "/to_other/2"
self.store_event(data, project_id=self.project.id, assert_no_errors=False)
user_misery_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
order=0,
title="user misery",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
user_misery_widget_query = DashboardWidgetQuery.objects.create(
widget=user_misery_widget,
fields=["title", "user_misery(300)"],
columns=[],
aggregates=["user_misery(300)"],
conditions="",
order=0,
)
_, queried_snuba = _get_and_save_split_decision_for_dashboard_widget(
user_misery_widget_query, self.dry_run
)
user_misery_widget.refresh_from_db()
assert not queried_snuba
assert (
user_misery_widget.discover_widget_split is None
if self.dry_run
else user_misery_widget.discover_widget_split == 101
)
def test_alias_with_last_seen_widget(self) -> None:
data = load_data("python", timestamp=self.ten_mins_ago)
self.store_event(data, project_id=self.project.id, assert_no_errors=False)
last_seen_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
order=0,
title="last seen",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
last_seen_widget_query = DashboardWidgetQuery.objects.create(
widget=last_seen_widget,
fields=["title", "last_seen()"],
columns=[],
aggregates=["last_seen()"],
conditions="",
order=0,
)
_, queried_snuba = _get_and_save_split_decision_for_dashboard_widget(
last_seen_widget_query, self.dry_run
)
last_seen_widget.refresh_from_db()
assert not queried_snuba
assert (
last_seen_widget.discover_widget_split is None
if self.dry_run
else last_seen_widget.discover_widget_split == 100
)
@freeze_time("2024-05-01 12:00:00")
def test_out_of_range_defaults_to_seven_days(self) -> None:
dashboard = Dashboard.objects.create(
title="test 2",
created_by_id=self.user.id,
organization=self.organization,
filters={"start": "2024-01-01T10:00:00", "end": "2024-01-02T10:00:00"},
)
dashboard.projects.set([self.project, self.project_2])
error_widget = DashboardWidget.objects.create(
dashboard=dashboard,
order=0,
title="error widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
with self.options({"system.event-retention-days": 90}):
snuba_dataclass = _get_snuba_dataclass_for_dashboard_widget(error_widget, self.projects)
assert snuba_dataclass.start == datetime(2024, 4, 24, 12, 0, tzinfo=timezone.utc)
assert snuba_dataclass.end == datetime(2024, 5, 1, 12, 0, tzinfo=timezone.utc)
@freeze_time("2024-05-01 12:00:00")
def test_respects_range_date_and_environment_params(self) -> None:
environment = self.environment
dashboard = Dashboard.objects.create(
title="test 3",
created_by_id=self.user.id,
organization=self.organization,
filters={"period": "1h", "environment": [environment.name]},
)
dashboard.projects.set([self.project, self.project_2])
error_widget = DashboardWidget.objects.create(
dashboard=dashboard,
order=0,
title="error widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
snuba_dataclass = _get_snuba_dataclass_for_dashboard_widget(error_widget, self.projects)
assert snuba_dataclass.start == datetime(2024, 5, 1, 11, 0, tzinfo=timezone.utc)
assert snuba_dataclass.end == datetime(2024, 5, 1, 12, 0, tzinfo=timezone.utc)
assert snuba_dataclass.environments == [environment]
def test_errors_widget_unhandled_in_conditions(self) -> None:
error_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
order=0,
title="error widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
errors_widget_query = DashboardWidgetQuery.objects.create(
widget=error_widget,
fields=["title", "issue", "project", "release", "count()", "count_unique(user)"],
columns=[],
aggregates=["count_unique(user)"],
conditions="(error.unhandled:true message:testing) OR message:test",
order=0,
)
_get_and_save_split_decision_for_dashboard_widget(errors_widget_query, self.dry_run)
error_widget.refresh_from_db()
assert (
error_widget.discover_widget_split is None
if self.dry_run
else error_widget.discover_widget_split == 100
)
if not self.dry_run:
assert error_widget.dataset_source == DashboardDatasetSourcesTypes.FORCED.value
def test_dashboard_projects_empty(self) -> None:
# Dashboard belonging to an org with no projects
self.organization = self.create_organization()
self.dashboard = Dashboard.objects.create(
title="Dashboard With Split Widgets",
created_by_id=self.user.id,
organization=self.organization,
)
error_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
order=0,
title="error widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
errors_widget_query = DashboardWidgetQuery.objects.create(
widget=error_widget,
fields=["title", "issue", "project", "release", "count()", "count_unique(user)"],
columns=[],
aggregates=["count_unique(user)"],
conditions="(error.unhandled:true message:testing) OR message:test",
order=0,
)
_get_and_save_split_decision_for_dashboard_widget(errors_widget_query, self.dry_run)
error_widget.refresh_from_db()
assert (
error_widget.discover_widget_split is None
if self.dry_run
else error_widget.discover_widget_split == 100
)
if not self.dry_run:
assert error_widget.dataset_source == DashboardDatasetSourcesTypes.FORCED.value
def test_dashboard_split_equation_without_aggregates(self) -> None:
transaction_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
order=0,
title="transaction widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
transaction_widget_query = DashboardWidgetQuery.objects.create(
widget=transaction_widget,
fields=[
"equation|count_if(blah-key-set,equals,True) / count()",
],
columns=[],
aggregates=[
"equation|count_if(blah-key-set,equals,True) / count()",
],
conditions="event.type:transaction transaction:foo",
order=0,
)
_get_and_save_split_decision_for_dashboard_widget(transaction_widget_query, self.dry_run)
transaction_widget.refresh_from_db()
assert (
transaction_widget.discover_widget_split is None
if self.dry_run
else transaction_widget.discover_widget_split == 101
)
if not self.dry_run:
assert (
transaction_widget.dataset_source
== DashboardDatasetSourcesTypes.SPLIT_VERSION_2.value
)
def test_dashboard_split_transaction_status_error_events_dataset(self) -> None:
transaction_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
order=0,
title="transaction widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
transaction_widget_query = DashboardWidgetQuery.objects.create(
widget=transaction_widget,
fields=["transaction", "p75(transaction.duration)", "total.count"],
columns=["transaction"],
aggregates=["p75(transaction.duration)", "total.count"],
conditions="event.type:transaction transaction.status:ok",
order=0,
)
_get_and_save_split_decision_for_dashboard_widget(transaction_widget_query, self.dry_run)
transaction_widget.refresh_from_db()
assert (
transaction_widget.discover_widget_split is None
if self.dry_run
else transaction_widget.discover_widget_split == 101
)
if not self.dry_run:
assert transaction_widget.dataset_source == DashboardDatasetSourcesTypes.FORCED.value
def test_unhandled_filter_sets_error_events_dataset(self) -> None:
error_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
order=0,
title="error widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
error_widget_query = DashboardWidgetQuery.objects.create(
widget=error_widget,
fields=[
"equation|count() / total.count * 100",
"release",
"error_event",
"count()",
"total.count",
],
columns=["release"],
aggregates=["equation|count() / total.count * 100", "count()", "total.count"],
conditions="error.unhandled:false",
order=0,
)
_get_and_save_split_decision_for_dashboard_widget(error_widget_query, self.dry_run)
error_widget.refresh_from_db()
assert (
error_widget.discover_widget_split is None
if self.dry_run
else error_widget.discover_widget_split == 100
)
if not self.dry_run:
assert error_widget.dataset_source == DashboardDatasetSourcesTypes.FORCED.value
def test_empty_equation_is_filtered_out(self) -> None:
error_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
order=0,
title="error widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2}},
)
error_widget_query = DashboardWidgetQuery.objects.create(
widget=error_widget,
fields=[
"count()",
"equation|",
],
columns=[],
aggregates=["count()", "equation|"],
conditions='message:"Testing"',
order=0,
)
_get_and_save_split_decision_for_dashboard_widget(error_widget_query, self.dry_run)
error_widget.refresh_from_db()
assert (
error_widget.discover_widget_split is None
if self.dry_run
else error_widget.discover_widget_split == 100
)
if not self.dry_run:
assert error_widget.dataset_source == DashboardDatasetSourcesTypes.SPLIT_VERSION_2.value
| DashboardWidgetDatasetSplitTestCase |
python | milvus-io__pymilvus | tests/test_grpc_handler_mutations.py | {
"start": 19312,
"end": 21765
} | class ____:
def test_get_info(self) -> None:
handler = GrpcHandler(channel=None)
# Test with schema provided
schema = {
"fields": [
{"name": "id", "type": DataType.INT64},
{"name": "vector", "type": DataType.FLOAT_VECTOR}
],
"enable_dynamic_field": True
}
fields_info, enable_dynamic = handler._get_info("test_collection", schema=schema)
assert fields_info == schema["fields"]
assert enable_dynamic is True
def test_get_info_without_schema(self) -> None:
handler = GrpcHandler(channel=None)
with patch.object(handler, 'describe_collection') as mock_describe:
mock_describe.return_value = {
"fields": [{"name": "id", "type": DataType.INT64}],
"enable_dynamic_field": False
}
fields_info, enable_dynamic = handler._get_info("test_collection")
assert fields_info == [{"name": "id", "type": DataType.INT64}]
assert enable_dynamic is False
def test_get_schema_from_cache_or_remote_cached(self) -> None:
handler = GrpcHandler(channel=None)
# Add to cache
cached_schema = {
"fields": [{"name": "id", "type": DataType.INT64}],
"update_timestamp": 100
}
handler.schema_cache["test_collection"] = {
"schema": cached_schema,
"schema_timestamp": 100
}
schema, timestamp = handler._get_schema_from_cache_or_remote("test_collection")
assert schema == cached_schema
assert timestamp == 100
def test_get_schema_from_cache_or_remote_not_cached(self) -> None:
handler = GrpcHandler(channel=None)
with patch.object(handler, 'describe_collection') as mock_describe:
remote_schema = {
"fields": [{"name": "id", "type": DataType.INT64}],
"update_timestamp": 200
}
mock_describe.return_value = remote_schema
schema, timestamp = handler._get_schema_from_cache_or_remote("test_collection")
assert schema == remote_schema
assert timestamp == 200
# Check it was cached
assert handler.schema_cache["test_collection"]["schema"] == remote_schema
assert handler.schema_cache["test_collection"]["schema_timestamp"] == 200
| TestGrpcHandlerHelperMethods |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 675671,
"end": 676059
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("IssueComment", graphql_name="node")
"""The item at the end of the edge."""
| IssueCommentEdge |
python | mlflow__mlflow | mlflow/gateway/providers/gemini.py | {
"start": 1020,
"end": 20510
} | class ____(ProviderAdapter):
@classmethod
def chat_to_model(cls, payload, config):
# Documentation: https://ai.google.dev/api/generate-content
# Example payload for the chat API.
#
# {
# "contents": [
# {
# "role": "user",
# "parts": [
# {
# "text": "System: You are an advanced AI Assistant"
# }
# ]
# },
# {
# "role": "user",
# "parts": [
# {
# "text": "Please give the code for addition of two numbers"
# }
# ]
# }
# ],
# "generationConfig": {
# "temperature": 0.1,
# "topP": 1,
# "stopSequences": ["\n"],
# "candidateCount": 1,
# "maxOutputTokens": 100,
# "topK": 40
# }
# }
for k1, k2 in GENERATION_CONFIG_KEY_MAPPING.items():
if k2 in payload:
raise AIGatewayException(
status_code=422, detail=f"Invalid parameter {k2}. Use {k1} instead."
)
if "top_p" in payload and payload["top_p"] > 1:
raise AIGatewayException(
status_code=422, detail="top_p should be less than or equal to 1"
)
payload = rename_payload_keys(payload, GENERATION_CONFIG_KEY_MAPPING)
contents = []
system_message = None
call_id_to_function_name_map = {}
for message in payload["messages"]:
role = message["role"]
if role in ("user", "assistant"):
if role == "assistant":
role = "model"
gemini_function_calls = []
if role == "model":
if tool_calls := message.get("tool_calls"):
for tool_call in tool_calls:
call_id_to_function_name_map[tool_call["id"]] = tool_call["function"][
"name"
]
gemini_function_calls.append(
{
"functionCall": {
"id": tool_call["id"],
"name": tool_call["function"]["name"],
"args": json.loads(tool_call["function"]["arguments"]),
}
}
)
if gemini_function_calls:
contents.append({"role": "model", "parts": gemini_function_calls})
else:
contents.append({"role": role, "parts": [{"text": message["content"]}]})
elif role == "system":
if system_message is None:
system_message = {"parts": []}
system_message["parts"].append({"text": message["content"]})
elif role == "tool":
call_id = message["tool_call_id"]
contents.append(
{
"role": "user",
"parts": [
{
"functionResponse": {
"id": call_id,
# the function name field is required by Gemini request format
"name": call_id_to_function_name_map[call_id],
"response": json.loads(message["content"]),
}
}
],
}
)
gemini_payload = {"contents": contents}
if system_message:
gemini_payload["system_instruction"] = system_message
if generation_config := {k: v for k, v in payload.items() if k in GENERATION_CONFIGS}:
gemini_payload["generationConfig"] = generation_config
# convert tool definition to Gemini format
if tools := payload.pop("tools", None):
function_declarations = []
for tool in tools:
if tool["type"] != "function":
raise AIGatewayException(
status_code=422,
detail=(
"Only function calling tool is supported, but received tool type "
f"{tool['type']}"
),
)
tool_function = tool["function"]
function_declarations.append(
{
"name": tool_function["name"],
"description": tool_function["description"],
"parametersJsonSchema": tool_function["parameters"],
}
)
gemini_payload["tools"] = [{"functionDeclarations": function_declarations}]
return gemini_payload
@classmethod
def _convert_function_call_to_openai_choice(
cls,
content_parts: list[dict[str, Any]],
finish_reason: str,
choice_idx: int,
stream: bool,
):
# convert gemini model responded "function call" struct to Openai choice / choice chunk
# struct.
# Gemini doc: https://ai.google.dev/api/caching#FunctionCall
tool_calls = []
for part in content_parts:
function_call = part["functionCall"]
func_name = function_call["name"]
func_arguments = json.dumps(function_call["args"])
call_id = function_call.get("id")
if call_id is None:
# Gemini model response might not contain function call id,
# in order to make it compatible with Openai chat protocol,
# we need to generate a unique call id.
call_id = (
"call_"
+ hashlib.md5(
f"{func_name}/{func_arguments}".encode(),
usedforsecurity=False,
).hexdigest()
)
if stream:
tool_calls.append(
chat_schema.ToolCallDelta(
index=0,
id=call_id,
function=Function(
name=func_name,
arguments=func_arguments,
),
type="function",
)
)
else:
tool_calls.append(
ToolCall(
id=call_id,
function=Function(
name=func_name,
arguments=func_arguments,
),
type="function",
)
)
if stream:
return chat_schema.StreamChoice(
index=choice_idx,
delta=chat_schema.StreamDelta(
role="assistant",
tool_calls=tool_calls,
),
finish_reason=finish_reason,
)
return chat_schema.Choice(
index=choice_idx,
message=chat_schema.ResponseMessage(
role="assistant",
tool_calls=tool_calls,
),
finish_reason=finish_reason,
)
@classmethod
def model_to_chat(cls, resp, config):
# Documentation: https://ai.google.dev/api/generate-content
#
# Example Response:
# {
# "candidates": [
# {
# "content": {
# "parts": [
# {
# "text": "Blue is often seen as a calming and soothing color."
# }
# ]
# },
# "finishReason": "stop"
# }
# ],
# "usageMetadata": {
# "promptTokenCount": 10,
# "candidatesTokenCount": 10,
# "totalTokenCount": 20
# }
# }
choices = []
for idx, candidate in enumerate(resp.get("candidates", [])):
finish_reason = candidate.get("finishReason", "stop")
if finish_reason == "MAX_TOKENS":
finish_reason = "length"
if parts := candidate.get("content", {}).get("parts", None):
if parts[0].get("functionCall", None):
choices.append(
GeminiAdapter._convert_function_call_to_openai_choice(
parts, finish_reason, idx, False
)
)
elif content := parts[0].get("text"):
choices.append(
chat_schema.Choice(
index=idx,
message=chat_schema.ResponseMessage(
role="assistant",
content=content,
),
finish_reason=finish_reason,
)
)
usage_metadata = resp.get("usageMetadata", {})
return chat_schema.ResponsePayload(
id=f"gemini-chat-{int(time.time())}",
created=int(time.time()),
object="chat.completion",
model=config.model.name,
choices=choices,
usage=chat_schema.ChatUsage(
prompt_tokens=usage_metadata.get("promptTokenCount", None),
completion_tokens=usage_metadata.get("candidatesTokenCount", None),
total_tokens=usage_metadata.get("totalTokenCount", None),
),
)
@classmethod
def model_to_chat_streaming(
cls, resp: dict[str, Any], config
) -> chat_schema.StreamResponsePayload:
# Documentation: https://ai.google.dev/api/generate-content#method:-models.streamgeneratecontent
#
# Example Streaming Chunk:
# {
# "candidates": [
# {
# "content": {
# "parts": [
# {
# "text": "Blue is often seen as a calming and soothing color."
# }
# ]
# },
# "finishReason": null
# }
# ],
# "id": "stream-id",
# "object": "chat.completion.chunk",
# "created": 1234567890,
# "model": "gemini-2.0-flash"
# }
choices = []
for idx, cand in enumerate(resp.get("candidates", [])):
parts = cand.get("content", {}).get("parts", [])
finish_reason = cand.get("finishReason")
if parts:
if parts[0].get("functionCall"):
# for gemini model streaming response,
# the function call message is not split into chunks
# it still contains the full function call arguments data.
choices.append(
GeminiAdapter._convert_function_call_to_openai_choice(
parts, finish_reason, idx, True
)
)
continue
delta_text = parts[0].get("text", "") if parts else ""
choices.append(
chat_schema.StreamChoice(
index=idx,
finish_reason=finish_reason,
delta=chat_schema.StreamDelta(
role="assistant",
content=delta_text,
),
)
)
current_time = int(time.time())
return chat_schema.StreamResponsePayload(
id=f"gemini-chat-stream-{current_time}",
object="chat.completion.chunk",
created=current_time,
model=config.model.name,
choices=choices,
)
@classmethod
def completions_to_model(cls, payload, config):
# Documentation: https://ai.google.dev/api/generate-content
# Example payload for the completions API.
#
# {
# "prompt": "What is the world record for flapjack consumption in a single sitting?",
# "temperature": 0.1,
# "topP": 1,
# "stop": ["\n"],
# "n": 1,
# "max_tokens": 100,
# "top_k": 40,
# }
chat_payload = {"messages": [{"role": "user", "content": payload.pop("prompt")}], **payload}
if system_message := payload.pop("system_prompt", None):
chat_payload["messages"].insert(0, {"role": "system", "content": system_message})
return cls.chat_to_model(chat_payload, config)
@classmethod
def model_to_completions(cls, resp, config):
# Documentation: https://ai.google.dev/api/generate-content
#
# Example Response:
# {
# "candidates": [
# {
# "content": {
# "parts": [
# {
# "text": "Blue is often seen as a calming and soothing color."
# }
# ]
# },
# "finishReason": "stop"
# }
# ],
# "usageMetadata": {
# "promptTokenCount": 10,
# "candidatesTokenCount": 10,
# "totalTokenCount": 20
# }
# }
choices = []
for idx, candidate in enumerate(resp.get("candidates", [])):
text = ""
if parts := candidate.get("content", {}).get("parts", None):
text = parts[0].get("text", None)
if not text:
continue
finish_reason = candidate.get("finishReason", "stop")
if finish_reason == "MAX_TOKENS":
finish_reason = "length"
choices.append(
completions_schema.Choice(
index=idx,
text=text,
finish_reason=finish_reason,
)
)
usage_metadata = resp.get("usageMetadata", {})
return completions_schema.ResponsePayload(
created=int(time.time()),
object="text_completion",
model=config.model.name,
choices=choices,
usage=completions_schema.CompletionsUsage(
prompt_tokens=usage_metadata.get("promptTokenCount", None),
completion_tokens=usage_metadata.get("candidatesTokenCount", None),
total_tokens=usage_metadata.get("totalTokenCount", None),
),
)
@classmethod
def model_to_completions_streaming(
cls, resp: dict[str, Any], config
) -> completions_schema.StreamResponsePayload:
# Documentation: https://ai.google.dev/api/generate-content#method:-models.streamgeneratecontent
# Example SSE chunk for streaming completions:
# {
# "candidates": [
# {
# "content": {
# "parts": [
# { "text": "Hello, world!" }
# ]
# },
# "finishReason": "stop"
# }
# ],
# "id": "gemini-completions-stream-1234567890",
# "object": "text_completion.chunk",
# "created": 1234567890,
# "model": "gemini-2.0-flash"
# }
choices = []
for idx, cand in enumerate(resp.get("candidates", [])):
parts = cand.get("content", {}).get("parts", [])
delta_text = parts[0].get("text", "") if parts else ""
choices.append(
completions_schema.StreamChoice(
index=idx,
finish_reason=cand.get("finishReason"),
text=delta_text,
)
)
current_time = int(time.time())
return completions_schema.StreamResponsePayload(
id=f"gemini-completions-stream-{current_time}",
object="text_completion.chunk",
created=current_time,
model=config.model.name,
choices=choices,
)
@classmethod
def embeddings_to_model(cls, payload, config):
# Example payload for the embedding API.
# Documentation: https://ai.google.dev/api/embeddings#v1beta.ContentEmbedding
#
# {
# "requests": [
# {
# "model": "models/text-embedding-004",
# "content": {
# "parts": [
# {
# "text": "What is the meaning of life?"
# }
# ]
# }
# },
# {
# "model": "models/text-embedding-004",
# "content": {
# "parts": [
# {
# "text": "How much wood would a woodchuck chuck?"
# }
# ]
# }
# },
# {
# "model": "models/text-embedding-004",
# "content": {
# "parts": [
# {
# "text": "How does the brain work?"
# }
# ]
# }
# }
# ]
# }
texts = payload["input"]
if isinstance(texts, str):
texts = [texts]
return (
{"content": {"parts": [{"text": texts[0]}]}}
if len(texts) == 1
else {
"requests": [
{"model": f"models/{config.model.name}", "content": {"parts": [{"text": text}]}}
for text in texts
]
}
)
@classmethod
def model_to_embeddings(cls, resp, config):
# Documentation: https://ai.google.dev/api/embeddings#v1beta.ContentEmbedding
#
# Example Response:
# {
# "embeddings": [
# {
# "values": [
# 3.25,
# 0.7685547,
# 2.65625,
# ...,
# -0.30126953,
# -2.3554688,
# 1.2597656
# ]
# }
# ]
# }
data = [
embeddings_schema.EmbeddingObject(embedding=item.get("values", []), index=i)
for i, item in enumerate(resp.get("embeddings") or [resp.get("embedding", {})])
]
# Create and return response payload directly
return embeddings_schema.ResponsePayload(
data=data,
model=config.model.name,
usage=embeddings_schema.EmbeddingsUsage(
prompt_tokens=None,
total_tokens=None,
),
)
| GeminiAdapter |
python | getsentry__sentry | tests/sentry/issues/test_utils.py | {
"start": 3601,
"end": 4310
} | class ____:
def build_statuschange_data(self, **overrides: Any) -> StatusChangeMessageData:
kwargs: StatusChangeMessageData = {
"id": uuid.uuid4().hex,
"project_id": 1,
"fingerprint": ["some-fingerprint"],
"new_status": 1,
"new_substatus": 1,
"detector_id": None,
"activity_data": {"test": "test"},
}
kwargs.update(overrides) # type: ignore[typeddict-item]
process_occurrence_data(kwargs)
return kwargs
def build_statuschange(self, **overrides: Any) -> StatusChangeMessage:
return StatusChangeMessage(**self.build_statuschange_data(**overrides))
| StatusChangeTestMixin |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 1643,
"end": 1794
} | class ____(DagsterError):
"""Indicates that an invalid value was returned from a source asset observation function."""
| DagsterInvalidObservationError |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_function_base.py | {
"start": 21574,
"end": 22432
} | class ____(TestCase):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [
np.int8,
np.uint8,
np.int16,
np.int32,
np.float32,
np.float64,
np.complex64,
np.complex128,
]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype)
assert_array_equal(np.cumsum(a, axis=0), tgt)
tgt = np.array([[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype)
assert_array_equal(np.cumsum(a2, axis=0), tgt)
tgt = np.array([[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype)
assert_array_equal(np.cumsum(a2, axis=1), tgt)
| TestCumsum |
python | sympy__sympy | sympy/physics/quantum/cartesian.py | {
"start": 6399,
"end": 6750
} | class ____(Bra, PositionState3D): # type: ignore
""" 3D cartesian position eigenbra """
@classmethod
def dual_class(self):
return PositionKet3D
#-------------------------------------------------------------------------
# Momentum eigenstates
#-------------------------------------------------------------------------
| PositionBra3D |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassTransform3.py | {
"start": 1239,
"end": 1311
} | class ____(Customer1):
salary: float = model_field()
| Customer1Subclass |
python | ray-project__ray | python/ray/serve/tests/unit/test_user_callable_wrapper.py | {
"start": 647,
"end": 15847
} | class ____:
def __call__(self, suffix: Optional[str] = None, raise_exception: bool = False):
if raise_exception:
raise RuntimeError("uh-oh!")
return "hi" + (suffix if suffix is not None else "")
async def call_async(
self, suffix: Optional[str] = None, raise_exception: bool = False
):
if raise_exception:
raise RuntimeError("uh-oh!")
return "hi" + (suffix if suffix is not None else "")
def call_generator(
self, n: int, raise_exception: bool = False
) -> Generator[int, None, None]:
for i in range(n):
yield i
if raise_exception:
raise RuntimeError("uh-oh!")
async def call_async_generator(
self, n: int, raise_exception: bool = False
) -> AsyncGenerator[int, None]:
for i in range(n):
yield i
if raise_exception:
raise RuntimeError("uh-oh!")
def basic_sync_function(suffix: Optional[str] = None, raise_exception: bool = False):
if raise_exception:
raise RuntimeError("uh-oh!")
return "hi" + (suffix if suffix is not None else "")
async def basic_async_function(
suffix: Optional[str] = None, raise_exception: bool = False
):
if raise_exception:
raise RuntimeError("uh-oh!")
return "hi" + (suffix if suffix is not None else "")
def basic_sync_generator(n: int, raise_exception: bool = False):
for i in range(n):
yield i
if raise_exception:
raise RuntimeError("uh-oh!")
async def basic_async_generator(n: int, raise_exception: bool = False):
for i in range(n):
yield i
if raise_exception:
raise RuntimeError("uh-oh!")
def _make_user_callable_wrapper(
callable: Optional[Callable] = None,
*,
init_args: Optional[Tuple[Any]] = None,
init_kwargs: Optional[Dict[str, Any]] = None,
run_sync_methods_in_threadpool: bool = False,
run_user_code_in_separate_thread: bool = True,
) -> UserCallableWrapper:
return UserCallableWrapper(
callable if callable is not None else BasicClass,
init_args or tuple(),
init_kwargs or dict(),
deployment_id=DeploymentID(name="test_name"),
run_sync_methods_in_threadpool=run_sync_methods_in_threadpool,
run_user_code_in_separate_thread=run_user_code_in_separate_thread,
local_testing_mode=False,
deployment_config=DeploymentConfig(max_ongoing_requests=100),
)
def _make_request_metadata(
*,
call_method: Optional[str] = None,
is_http_request: bool = False,
is_grpc_request: bool = False,
is_streaming: bool = False,
) -> RequestMetadata:
protocol = RequestProtocol.UNDEFINED
if is_http_request:
protocol = RequestProtocol.HTTP
if is_grpc_request:
protocol = RequestProtocol.GRPC
return RequestMetadata(
request_id="test_request",
internal_request_id="test_internal_request",
call_method=call_method if call_method is not None else "__call__",
_request_protocol=protocol,
is_streaming=is_streaming,
)
@pytest.mark.parametrize("run_user_code_in_separate_thread", [False, True])
@pytest.mark.asyncio
async def test_calling_initialize_twice(run_user_code_in_separate_thread: bool):
user_callable_wrapper = _make_user_callable_wrapper(
run_user_code_in_separate_thread=run_user_code_in_separate_thread
)
await user_callable_wrapper.initialize_callable()
assert isinstance(user_callable_wrapper.user_callable, BasicClass)
with pytest.raises(RuntimeError):
await user_callable_wrapper.initialize_callable()
@pytest.mark.parametrize("run_user_code_in_separate_thread", [False, True])
@pytest.mark.asyncio
async def test_calling_methods_before_initialize(
run_user_code_in_separate_thread: bool,
):
user_callable_wrapper = _make_user_callable_wrapper(
run_user_code_in_separate_thread=run_user_code_in_separate_thread
)
with pytest.raises(RuntimeError):
await user_callable_wrapper.call_user_method(None, tuple(), dict())
with pytest.raises(RuntimeError):
await user_callable_wrapper.call_user_health_check()
with pytest.raises(RuntimeError):
await user_callable_wrapper.call_reconfigure(None, rank=0)
@pytest.mark.parametrize("run_user_code_in_separate_thread", [False, True])
@pytest.mark.parametrize("run_sync_methods_in_threadpool", [False, True])
@pytest.mark.asyncio
async def test_basic_class_callable(
run_user_code_in_separate_thread: bool, run_sync_methods_in_threadpool: bool
):
user_callable_wrapper = _make_user_callable_wrapper(
run_sync_methods_in_threadpool=run_sync_methods_in_threadpool,
run_user_code_in_separate_thread=run_user_code_in_separate_thread,
)
await user_callable_wrapper.initialize_callable()
# Call non-generator method with is_streaming.
request_metadata = _make_request_metadata(is_streaming=True)
with pytest.raises(TypeError, match="did not return a generator."):
async for _ in user_callable_wrapper.call_user_generator(
request_metadata, tuple(), dict()
):
pass
# Test calling default sync `__call__` method.
request_metadata = _make_request_metadata()
assert (
await user_callable_wrapper.call_user_method(request_metadata, tuple(), dict())
) == "hi"
assert (
await user_callable_wrapper.call_user_method(
request_metadata, ("-arg",), dict()
)
== "hi-arg"
)
assert (
await user_callable_wrapper.call_user_method(
request_metadata, tuple(), {"suffix": "-kwarg"}
)
== "hi-kwarg"
)
with pytest.raises(RuntimeError, match="uh-oh"):
await user_callable_wrapper.call_user_method(
request_metadata, tuple(), {"raise_exception": True}
)
# Call non-generator async method with is_streaming.
request_metadata = _make_request_metadata(
call_method="call_async", is_streaming=True
)
with pytest.raises(TypeError, match="did not return a generator."):
async for _ in user_callable_wrapper.call_user_generator(
request_metadata, tuple(), dict()
):
pass
# Test calling `call_async` method.
request_metadata = _make_request_metadata(call_method="call_async")
assert (
await user_callable_wrapper.call_user_method(request_metadata, tuple(), dict())
== "hi"
)
assert (
await user_callable_wrapper.call_user_method(
request_metadata, ("-arg",), dict()
)
== "hi-arg"
)
assert (
await user_callable_wrapper.call_user_method(
request_metadata, tuple(), {"suffix": "-kwarg"}
)
== "hi-kwarg"
)
with pytest.raises(RuntimeError, match="uh-oh"):
await user_callable_wrapper.call_user_method(
request_metadata, tuple(), {"raise_exception": True}
)
@pytest.mark.parametrize("run_user_code_in_separate_thread", [False, True])
@pytest.mark.parametrize("run_sync_methods_in_threadpool", [False, True])
@pytest.mark.asyncio
async def test_basic_class_callable_generators(
run_sync_methods_in_threadpool: bool, run_user_code_in_separate_thread: bool
):
user_callable_wrapper = _make_user_callable_wrapper(
run_sync_methods_in_threadpool=run_sync_methods_in_threadpool,
run_user_code_in_separate_thread=run_user_code_in_separate_thread,
)
await user_callable_wrapper.initialize_callable()
result_list = []
# Call sync generator without is_streaming.
request_metadata = _make_request_metadata(
call_method="call_generator", is_streaming=False
)
with pytest.raises(
TypeError, match="Method 'call_generator' returned a generator."
):
await user_callable_wrapper.call_user_method(
request_metadata,
(10,),
dict(),
)
# Call sync generator.
request_metadata = _make_request_metadata(
call_method="call_generator", is_streaming=True
)
async for result in user_callable_wrapper.call_user_generator(
request_metadata, (10,), dict()
):
result_list.append(result)
assert result_list == list(range(10))
result_list.clear()
# Call sync generator raising exception.
with pytest.raises(RuntimeError, match="uh-oh"):
async for result in user_callable_wrapper.call_user_generator(
request_metadata,
(10,),
{"raise_exception": True},
):
result_list.append(result)
assert result_list == [0]
result_list.clear()
# Call async generator without is_streaming.
request_metadata = _make_request_metadata(
call_method="call_async_generator", is_streaming=False
)
with pytest.raises(
TypeError, match="Method 'call_async_generator' returned a generator."
):
await user_callable_wrapper.call_user_method(
request_metadata,
(10,),
dict(),
)
# Call async generator.
request_metadata = _make_request_metadata(
call_method="call_async_generator", is_streaming=True
)
async for result in user_callable_wrapper.call_user_generator(
request_metadata, (10,), dict()
):
result_list.append(result)
assert result_list == list(range(10))
result_list.clear()
# Call async generator raising exception.
with pytest.raises(RuntimeError, match="uh-oh"):
async for result in user_callable_wrapper.call_user_generator(
request_metadata,
(10,),
{"raise_exception": True},
):
result_list.append(result)
assert result_list == [0]
@pytest.mark.parametrize("run_user_code_in_separate_thread", [False, True])
@pytest.mark.parametrize("run_sync_methods_in_threadpool", [False, True])
@pytest.mark.parametrize("fn", [basic_sync_function, basic_async_function])
@pytest.mark.asyncio
async def test_basic_function_callable(
fn: Callable,
run_sync_methods_in_threadpool: bool,
run_user_code_in_separate_thread: bool,
):
user_callable_wrapper = _make_user_callable_wrapper(
fn,
run_sync_methods_in_threadpool=run_sync_methods_in_threadpool,
run_user_code_in_separate_thread=run_user_code_in_separate_thread,
)
await user_callable_wrapper.initialize_callable()
# Call non-generator function with is_streaming.
request_metadata = _make_request_metadata(is_streaming=True)
with pytest.raises(TypeError, match="did not return a generator."):
async for _ in user_callable_wrapper.call_user_generator(
request_metadata, tuple(), dict()
):
pass
request_metadata = _make_request_metadata()
assert (
await user_callable_wrapper.call_user_method(request_metadata, tuple(), dict())
) == "hi"
assert (
await user_callable_wrapper.call_user_method(
request_metadata, ("-arg",), dict()
)
) == "hi-arg"
assert (
await user_callable_wrapper.call_user_method(
request_metadata, tuple(), {"suffix": "-kwarg"}
)
) == "hi-kwarg"
with pytest.raises(RuntimeError, match="uh-oh"):
await user_callable_wrapper.call_user_method(
request_metadata, tuple(), {"raise_exception": True}
)
@pytest.mark.parametrize("run_user_code_in_separate_thread", [False, True])
@pytest.mark.parametrize("run_sync_methods_in_threadpool", [False, True])
@pytest.mark.parametrize("fn", [basic_sync_generator, basic_async_generator])
@pytest.mark.asyncio
async def test_basic_function_callable_generators(
fn: Callable,
run_sync_methods_in_threadpool: bool,
run_user_code_in_separate_thread: bool,
):
user_callable_wrapper = _make_user_callable_wrapper(
fn,
run_sync_methods_in_threadpool=run_sync_methods_in_threadpool,
run_user_code_in_separate_thread=run_user_code_in_separate_thread,
)
await user_callable_wrapper.initialize_callable()
result_list = []
# Call generator function without is_streaming.
request_metadata = _make_request_metadata(is_streaming=False)
with pytest.raises(
TypeError, match=f"Method '{fn.__name__}' returned a generator."
):
await user_callable_wrapper.call_user_method(
request_metadata,
(10,),
dict(),
)
# Call generator function.
request_metadata = _make_request_metadata(
call_method="call_generator", is_streaming=True
)
async for result in user_callable_wrapper.call_user_generator(
request_metadata, (10,), dict()
):
result_list.append(result)
assert result_list == list(range(10))
result_list.clear()
# Call generator function raising exception.
with pytest.raises(RuntimeError, match="uh-oh"):
async for result in user_callable_wrapper.call_user_generator(
request_metadata,
(10,),
{"raise_exception": True},
):
result_list.append(result)
assert result_list == [0]
@pytest.mark.parametrize("run_user_code_in_separate_thread", [False, True])
@pytest.mark.asyncio
async def test_callable_with_async_init(run_user_code_in_separate_thread: bool):
class AsyncInitializer:
async def __init__(self, msg: str):
await asyncio.sleep(0.001)
self._msg = msg
def __call__(self) -> str:
return self._msg
msg = "hello world"
user_callable_wrapper = _make_user_callable_wrapper(
AsyncInitializer,
init_args=(msg,),
run_user_code_in_separate_thread=run_user_code_in_separate_thread,
)
await user_callable_wrapper.initialize_callable()
request_metadata = _make_request_metadata()
assert (
await user_callable_wrapper.call_user_method(request_metadata, tuple(), dict())
) == msg
@pytest.mark.parametrize("run_user_code_in_separate_thread", [False, True])
@pytest.mark.parametrize("async_del", [False, True])
@pytest.mark.asyncio
async def test_destructor_only_called_once(
async_del: bool, run_user_code_in_separate_thread: bool
):
num_destructor_calls = 0
if async_del:
class DestroyerOfNothing:
async def __del__(self) -> str:
nonlocal num_destructor_calls
num_destructor_calls += 1
else:
class DestroyerOfNothing:
def __del__(self) -> str:
nonlocal num_destructor_calls
num_destructor_calls += 1
user_callable_wrapper = _make_user_callable_wrapper(
DestroyerOfNothing,
run_user_code_in_separate_thread=run_user_code_in_separate_thread,
)
await user_callable_wrapper.initialize_callable()
# Call `call_destructor` many times in parallel; only the first one should actually
# run the `__del__` method.
await asyncio.gather(*[user_callable_wrapper.call_destructor() for _ in range(100)])
assert num_destructor_calls == 1
| BasicClass |
python | PyCQA__pylint | pylint/pyreverse/diadefslib.py | {
"start": 794,
"end": 7345
} | class ____:
"""Handle diagram generation options."""
def __init__(self, linker: Linker, handler: DiadefsHandler) -> None:
"""Common Diagram Handler initialization."""
self.config = handler.config
self.args = handler.args
self.module_names: bool = False
self._set_default_options()
self.linker = linker
self.classdiagram: ClassDiagram # defined by subclasses
# Only pre-calculate depths if user has requested a max_depth
if handler.config.max_depth is not None:
# Detect which of the args are leaf nodes
leaf_nodes = self.get_leaf_nodes()
# Emit a warning if any of the args are not leaf nodes
diff = set(self.args).difference(set(leaf_nodes))
if len(diff) > 0:
warnings.warn(
"Detected nested names within the specified packages. "
f"The following packages: {sorted(diff)} will be ignored for "
f"depth calculations, using only: {sorted(leaf_nodes)} as the base for limiting "
"package depth.",
stacklevel=2,
)
self.args_depths = {module: module.count(".") for module in leaf_nodes}
def get_title(self, node: nodes.ClassDef) -> str:
"""Get title for objects."""
title = node.name
if self.module_names:
title = f"{node.root().name}.{title}"
return title # type: ignore[no-any-return]
def get_leaf_nodes(self) -> list[str]:
"""
Get the leaf nodes from the list of args in the generator.
A leaf node is one that is not a prefix (with an extra dot) of any other node.
"""
leaf_nodes = [
module
for module in self.args
if not any(
other != module and other.startswith(module + ".")
for other in self.args
)
]
return leaf_nodes
def _set_option(self, option: bool | None) -> bool:
"""Activate some options if not explicitly deactivated."""
# if we have a class diagram, we want more information by default;
# so if the option is None, we return True
if option is None:
return bool(self.config.classes)
return option
def _set_default_options(self) -> None:
"""Set different default options with _default dictionary."""
self.module_names = self._set_option(self.config.module_names)
all_ancestors = self._set_option(self.config.all_ancestors)
all_associated = self._set_option(self.config.all_associated)
anc_level, association_level = (0, 0)
if all_ancestors:
anc_level = -1
if all_associated:
association_level = -1
if self.config.show_ancestors is not None:
anc_level = self.config.show_ancestors
if self.config.show_associated is not None:
association_level = self.config.show_associated
self.anc_level, self.association_level = anc_level, association_level
def _get_levels(self) -> tuple[int, int]:
"""Help function for search levels."""
return self.anc_level, self.association_level
def _should_include_by_depth(self, node: nodes.NodeNG) -> bool:
"""Check if a node should be included based on depth.
A node will be included if it is at or below the max_depth relative to the
specified base packages. A node is considered to be a base package if it is the
deepest package in the list of specified packages. In other words the base nodes
are the leaf nodes of the specified package tree.
"""
# If max_depth is not set, include all nodes
if self.config.max_depth is None:
return True
# Calculate the absolute depth of the node
name = node.root().name
absolute_depth = name.count(".")
# Retrieve the base depth to compare against
relative_depth = next(
(v for k, v in self.args_depths.items() if name.startswith(k)), None
)
return relative_depth is not None and bool(
(absolute_depth - relative_depth) <= self.config.max_depth
)
def show_node(self, node: nodes.ClassDef) -> bool:
"""Determine if node should be shown based on config."""
if node.root().name == "builtins":
return self.config.show_builtin # type: ignore[no-any-return]
if is_stdlib_module(node.root().name):
return self.config.show_stdlib # type: ignore[no-any-return]
# Filter node by depth
return self._should_include_by_depth(node)
def add_class(self, node: nodes.ClassDef) -> None:
"""Visit one class and add it to diagram."""
self.linker.visit(node)
self.classdiagram.add_object(self.get_title(node), node)
def get_ancestors(
self, node: nodes.ClassDef, level: int
) -> Generator[nodes.ClassDef]:
"""Return ancestor nodes of a class node."""
if level == 0:
return
for ancestor in node.ancestors(recurs=False):
if not self.show_node(ancestor):
continue
yield ancestor
def get_associated(
self, klass_node: nodes.ClassDef, level: int
) -> Generator[nodes.ClassDef]:
"""Return associated nodes of a class node."""
if level == 0:
return
for association_nodes in list(klass_node.instance_attrs_type.values()) + list(
klass_node.locals_type.values()
):
for node in association_nodes:
if isinstance(node, astroid.Instance):
node = node._proxied
if not (isinstance(node, nodes.ClassDef) and self.show_node(node)):
continue
yield node
def extract_classes(
self, klass_node: nodes.ClassDef, anc_level: int, association_level: int
) -> None:
"""Extract recursively classes related to klass_node."""
if self.classdiagram.has_node(klass_node) or not self.show_node(klass_node):
return
self.add_class(klass_node)
for ancestor in self.get_ancestors(klass_node, anc_level):
self.extract_classes(ancestor, anc_level - 1, association_level)
for node in self.get_associated(klass_node, association_level):
self.extract_classes(node, anc_level, association_level - 1)
| DiaDefGenerator |
python | wandb__wandb | wandb/vendor/pygments/lexers/html.py | {
"start": 15766,
"end": 19269
} | class ____(ExtendedRegexLexer):
"""
For Pug markup.
Pug is a variant of Scaml, see:
http://scalate.fusesource.org/documentation/scaml-reference.html
.. versionadded:: 1.4
"""
name = 'Pug'
aliases = ['pug', 'jade']
filenames = ['*.pug', '*.jade']
mimetypes = ['text/x-pug', 'text/x-jade']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)), 'root'),
default('plain'),
],
'content': [
include('css'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
(r'[\w:-]+', Name.Tag, 'tag'),
(r'\|', Text, 'eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
JadeLexer = PugLexer # compat
| PugLexer |
python | kamyu104__LeetCode-Solutions | Python/equal-sum-arrays-with-minimum-number-of-operations.py | {
"start": 54,
"end": 843
} | class ____(object):
def minOperations(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: int
"""
if len(nums1)*6 < len(nums2) or len(nums1) > len(nums2)*6:
return -1
diff = sum(nums2)-sum(nums1)
if diff < 0:
nums1, nums2 = nums2, nums1
diff = -diff
count = collections.Counter(6-num for num in nums1)
count += collections.Counter(num-1 for num in nums2)
result = 0
for i in reversed(xrange(1, 6)):
if not count[i]:
continue
cnt = min(count[i], (diff+i-1)//i)
result += cnt
diff -= i*cnt
if diff <= 0:
break
return result
| Solution |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_servicehooks.py | {
"start": 752,
"end": 1797
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.project = self.create_project()
self.login_as(user=self.user)
self.path = f"/api/0/projects/{self.project.organization.slug}/{self.project.slug}/hooks/"
def test_simple(self) -> None:
with self.feature("projects:servicehooks"):
resp = self.client.post(
self.path,
data={"url": "http://example.com", "events": ["event.alert", "event.created"]},
)
assert resp.status_code == 201, resp.content
hook = ServiceHook.objects.get(guid=resp.data["id"])
assert hook.url == "http://example.com"
assert hook.project_id == self.project.id
assert hook.actor_id == self.user.id
assert sorted(hook.events) == ["event.alert", "event.created"]
assert hook.version == 0
hook_project = ServiceHookProject.objects.get(project_id=self.project.id)
assert hook_project.service_hook_id == hook.id
| CreateProjectServiceHookTest |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_getnewargs/invalid_getnewargs_ex_returned.py | {
"start": 536,
"end": 681
} | class ____(type):
def __getnewargs_ex__(cls):
return ((1,), {"2": "2"})
@six.add_metaclass(GetNewArgsExMetaclass)
| GetNewArgsExMetaclass |
python | coleifer__peewee | tests/fields.py | {
"start": 41529,
"end": 41603
} | class ____(TestModel):
content = TextField()
tags = ListField()
| Todo |
python | sympy__sympy | sympy/physics/quantum/circuitplot.py | {
"start": 10742,
"end": 10984
} | class ____(OneQubitGate):
"""Mock-up of a z measurement gate.
This is in circuitplot rather than gate.py because it's not a real
gate, it just draws one.
"""
measurement = True
gate_name='Mz'
gate_name_latex='M_z'
| Mz |
python | getsentry__sentry | src/sentry/notifications/notification_action/action_validation.py | {
"start": 5184,
"end": 5369
} | class ____(TicketingActionValidatorHandler):
provider = Action.Type.GITHUB_ENTERPRISE
@action_validator_registry.register(Action.Type.PAGERDUTY)
| GithubEnterpriseActionValidatorHandler |
python | tensorflow__tensorflow | tensorflow/python/ops/script_ops.py | {
"start": 2900,
"end": 6706
} | class ____:
"""A wrapper for a function owned by an EagerPyFunc."""
def __init__(self, func, Tout, is_grad_func):
"""Constructs an EagerFunc.
Args:
func: The function to wrap.
Tout: A list of datatypes for the output; an empty list if the output is
None.
is_grad_func: Whether this EagerFunc is the gradient of another
EagerPyFunc.
"""
self._func = func
self._out_dtypes = Tout
self._is_grad_func = is_grad_func
self._support_graph_mode_gradient = False
def set_support_graph_mode_gradient(self):
"""Indicates the object shall support gradient ops.
This function is internally used by _EagerPyFuncGrad to support
graph mode gradient of EagerFunc via tf.gradient().
"""
self._support_graph_mode_gradient = True
def _convert(self, value, dtype):
"""Converts `value` to a tensor of type `dtype`, with error checking.
Args:
value: The tensor to convert.
dtype: The desired dtype.
Returns:
A tensor of type `dtype`, or a zeros tensor if value is None and
this function is in fact a gradient function.
Raises:
RuntimeError: if `value` is a variable.
"""
if isinstance(value, resource_variable_ops.ResourceVariable):
raise RuntimeError(
"Attempting to return a variable from an eagerly executed py_func. "
"Only numeric data structures like Tensors or NumPy arrays should "
"be returned; to return the value of a variable, make sure to obtain "
"the Tensor backing it by calling `.read_value()` on the variable in "
f"question: {value}")
if value is None and self._is_grad_func:
# Gradient functions may legitimately return a list that contains
# both Tensors and Python Nones. Unfortunately this breaks the
# OpKernel, so for now we replace None objects with zeros, which is
# mathematically correct but will prevent short-circuiting gradient
# computations.
#
# TODO(akshayka): Make it possible to return a list of both Tensors and
# Nones from an EagerPyFunc.
return constant_op.constant(0.0, dtype=dtype)
return ops.convert_to_tensor(value, dtype=dtype)
def __call__(self, device, token, args):
"""Calls `self._func` in eager mode, recording the tape if needed."""
use_tape_cache = (
self._support_graph_mode_gradient or record.could_possibly_record())
if use_tape_cache:
with backprop.GradientTape() as tape:
for tensor in args:
for t in nest.flatten(tensor):
if backprop_util.IsTrainable(t):
tape.watch(t)
outputs = self._call(device, args)
tape_cache[compat.as_bytes(token)] = (tape, args, outputs)
else:
outputs = self._call(device, args)
return outputs
def _call(self, device, args):
"""Passes `args` to `self._func`, which is executed eagerly."""
with context.eager_mode():
ret = self._func(*args)
# copy the returned tensors to the PyFunc op's device if necessary.
device_name = device
if device_name is None:
# "None" here means "CPU", from the nullptr convention with C++ device
# pointers.
device_name = "/job:localhost/replica:0/task:0/device:CPU:0"
with ops.device(device):
if isinstance(ret, (tuple, list)):
outputs = [
_maybe_copy_to_context_device(self._convert(x, dtype=dtype),
device_name)
for (x, dtype) in zip(ret, self._out_dtypes)
]
elif ret is None:
outputs = None
else:
outputs = _maybe_copy_to_context_device(
self._convert(ret, dtype=self._out_dtypes[0]), device_name)
return outputs
| EagerFunc |
python | run-llama__llama_index | llama-index-core/llama_index/core/instrumentation/events/rerank.py | {
"start": 221,
"end": 840
} | class ____(BaseEvent):
"""
ReRankStartEvent.
Args:
query (QueryType): Query as a string or query bundle.
nodes (List[NodeWithScore]): List of nodes with scores.
top_n (int): Number of nodes to return after rerank.
model_name (str): Name of the model used for reranking.
"""
model_config = ConfigDict(protected_namespaces=("pydantic_model_",))
query: Optional[QueryType]
nodes: List[NodeWithScore]
top_n: int
model_name: str
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "ReRankStartEvent"
| ReRankStartEvent |
python | openai__openai-python | tests/api_resources/fine_tuning/checkpoints/test_permissions.py | {
"start": 531,
"end": 7302
} | class ____:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create(self, client: OpenAI) -> None:
permission = client.fine_tuning.checkpoints.permissions.create(
fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
project_ids=["string"],
)
assert_matches_type(SyncPage[PermissionCreateResponse], permission, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.fine_tuning.checkpoints.permissions.with_raw_response.create(
fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
project_ids=["string"],
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = response.parse()
assert_matches_type(SyncPage[PermissionCreateResponse], permission, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.fine_tuning.checkpoints.permissions.with_streaming_response.create(
fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
project_ids=["string"],
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = response.parse()
assert_matches_type(SyncPage[PermissionCreateResponse], permission, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_create(self, client: OpenAI) -> None:
with pytest.raises(
ValueError, match=r"Expected a non-empty value for `fine_tuned_model_checkpoint` but received ''"
):
client.fine_tuning.checkpoints.permissions.with_raw_response.create(
fine_tuned_model_checkpoint="",
project_ids=["string"],
)
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
permission = client.fine_tuning.checkpoints.permissions.retrieve(
fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
@parametrize
def test_method_retrieve_with_all_params(self, client: OpenAI) -> None:
permission = client.fine_tuning.checkpoints.permissions.retrieve(
fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
after="after",
limit=0,
order="ascending",
project_id="project_id",
)
assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = response.parse()
assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
with client.fine_tuning.checkpoints.permissions.with_streaming_response.retrieve(
fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = response.parse()
assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(
ValueError, match=r"Expected a non-empty value for `fine_tuned_model_checkpoint` but received ''"
):
client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
fine_tuned_model_checkpoint="",
)
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
permission = client.fine_tuning.checkpoints.permissions.delete(
permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
)
assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
response = client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = response.parse()
assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
with client.fine_tuning.checkpoints.permissions.with_streaming_response.delete(
permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = response.parse()
assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
with pytest.raises(
ValueError, match=r"Expected a non-empty value for `fine_tuned_model_checkpoint` but received ''"
):
client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
fine_tuned_model_checkpoint="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
permission_id="",
fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
)
| TestPermissions |
python | ansible__ansible | lib/ansible/module_utils/_internal/_json/_profiles/_tagless.py | {
"start": 396,
"end": 2053
} | class ____(_profiles._JSONSerializationProfile["Encoder", "Decoder"]):
@classmethod
def post_init(cls) -> None:
cls.serialize_map = {
# DTFIX5: support serialization of every type that is supported in the Ansible variable type system
set: cls.serialize_as_list,
tuple: cls.serialize_as_list,
_datetime.date: cls.serialize_as_isoformat,
_datetime.time: cls.serialize_as_isoformat,
_datetime.datetime: cls.serialize_as_isoformat,
# bytes intentionally omitted as they are not a supported variable type, they were not originally supported by the old AnsibleJSONEncoder
_datatag._AnsibleTaggedDate: cls.discard_tags,
_datatag._AnsibleTaggedTime: cls.discard_tags,
_datatag._AnsibleTaggedDateTime: cls.discard_tags,
_datatag._AnsibleTaggedStr: cls.discard_tags,
_datatag._AnsibleTaggedInt: cls.discard_tags,
_datatag._AnsibleTaggedFloat: cls.discard_tags,
_datatag._AnsibleTaggedSet: cls.discard_tags,
_datatag._AnsibleTaggedList: cls.discard_tags,
_datatag._AnsibleTaggedTuple: cls.discard_tags,
_datatag._AnsibleTaggedDict: cls.discard_tags,
}
cls.deserialize_map = {
'__ansible_unsafe': _functools.partial(cls.unsupported_target_type_error, '__ansible_unsafe'),
'__ansible_vault': _functools.partial(cls.unsupported_target_type_error, '__ansible_vault'),
}
cls.handle_key = cls._handle_key_str_fallback # type: ignore[method-assign] # legacy stdlib-compatible key behavior
| _Profile |
python | gevent__gevent | src/gevent/libev/watcher.py | {
"start": 7790,
"end": 8196
} | class ____(_base.StatMixin, watcher):
_watcher_type = 'stat'
@property
def attr(self):
if not self._watcher.attr.st_nlink:
return
return self._watcher.attr
@property
def prev(self):
if not self._watcher.prev.st_nlink:
return
return self._watcher.prev
@property
def interval(self):
return self._watcher.interval
| stat |
python | joke2k__faker | faker/providers/sbn/sbn.py | {
"start": 371,
"end": 1520
} | class ____(SBN):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.check_digit = self._check_digit()
def _check_digit(self) -> str:
"""Calculate the check digit for SBN-9.
SBNs use the same check digit calculation as ISBN. See
https://en.wikipedia.org/wiki/International_Standard_Book_Number
for calculation. Only modification is weights range from 1 to 9
instead of 1 to 10.
"""
weights = range(1, 9)
body = "".join([part for part in [self.registrant, self.publication] if part is not None])
remainder = sum(int(b) * w for b, w in zip(body, weights)) % 11
check_digit = "X" if remainder == 10 else str(remainder)
return str(check_digit)
def format(self, separator: str = "") -> str:
return separator.join(
[
part
for part in [
self.registrant,
self.publication,
self.check_digit,
]
if part is not None
]
)
| SBN9 |
python | getsentry__sentry | src/sentry/issues/endpoints/organization_issues_count.py | {
"start": 1214,
"end": 4246
} | class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.ISSUES
enforce_rate_limit = True
rate_limits = RateLimitConfig(
limit_overrides={
"GET": {
RateLimitCategory.IP: RateLimit(limit=10, window=1),
RateLimitCategory.USER: RateLimit(limit=10, window=1),
RateLimitCategory.ORGANIZATION: RateLimit(limit=10, window=1),
}
}
)
def _count(
self, request: Request, query, organization, projects, environments, extra_query_kwargs=None
):
with start_span(op="_count"):
query_kwargs = {
"projects": projects,
"referrer": Referrer.API_ORGANIZATION_ISSUES_COUNT,
}
query = query.strip()
if query:
search_filters = convert_query_values(
parse_search_query(query), projects, request.user, environments
)
validate_search_filter_permissions(organization, search_filters, request.user)
query_kwargs["search_filters"] = search_filters
if extra_query_kwargs is not None:
assert "environment" not in extra_query_kwargs
query_kwargs.update(extra_query_kwargs)
query_kwargs["environments"] = environments if environments else None
query_kwargs["max_hits"] = ISSUES_COUNT_MAX_HITS_LIMIT
query_kwargs["actor"] = request.user
with start_span(op="start_search") as span:
span.set_data("query_kwargs", query_kwargs)
result = search.backend.query(**query_kwargs)
return result.hits
def get(self, request: Request, organization: Organization | RpcOrganization) -> Response:
stats_period = request.GET.get("groupStatsPeriod")
try:
start, end = get_date_range_from_params(request.GET)
except InvalidParams as e:
raise ParseError(detail=str(e))
if stats_period not in (None, "", "24h", "14d", "auto"):
return Response({"detail": ERR_INVALID_STATS_PERIOD}, status=400)
environments = self.get_environments(request, organization)
projects = self.get_projects(request, organization)
if not projects:
return Response([])
queries = request.GET.getlist("query")
response = {}
for query in queries:
try:
count = self._count(
request,
query,
organization,
projects,
environments,
{"count_hits": True, "date_to": end, "date_from": start},
)
response[query] = count
except (ValidationError, discover.InvalidSearchQuery) as exc:
return Response({"detail": str(exc)}, status=400)
return Response(response)
| OrganizationIssuesCountEndpoint |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-timeplus/destination_timeplus/destination.py | {
"start": 456,
"end": 7515
} | class ____(Destination):
def write(
self, config: Mapping[str, Any], configured_catalog: ConfiguredAirbyteCatalog, input_messages: Iterable[AirbyteMessage]
) -> Iterable[AirbyteMessage]:
"""
Reads the input stream of messages, config, and catalog to write data to the destination.
This method returns an iterable (typically a generator of AirbyteMessages via yield) containing state messages received
in the input message stream. Outputting a state message means that every AirbyteRecordMessage which came before it has been
successfully persisted to the destination. This is used to ensure fault tolerance in the case that a sync fails before fully completing,
then the source is given the last state message output from this method as the starting point of the next sync.
:param config: dict of JSON configuration matching the configuration declared in spec.json
:param configured_catalog: The Configured Catalog describing the schema of the data being received and how it should be persisted in the
destination
:param input_messages: The stream of input messages received from the source
:return: Iterable of AirbyteStateMessages wrapped in AirbyteMessage structs
"""
endpoint = config["endpoint"]
apikey = config["apikey"]
if endpoint[-1] == "/":
endpoint = endpoint[0 : len(endpoint) - 1]
env = Environment().address(endpoint).apikey(apikey)
stream_list = Stream(env=env).list()
all_streams = {s.name for s in stream_list}
# only support "overwrite", "append"
for configured_stream in configured_catalog.streams:
is_overwrite = configured_stream.destination_sync_mode == DestinationSyncMode.overwrite
stream_exists = configured_stream.stream.name in all_streams
logger.info(f"Stream {configured_stream.stream.name} {configured_stream.destination_sync_mode}")
need_delete_stream = False
need_create_stream = False
if is_overwrite:
if stream_exists:
# delete all data in the existing stream and recreate the stream.
need_delete_stream = True
need_create_stream = True
else:
# only need to create the stream
need_create_stream = True
else:
if stream_exists:
# for append mode, just add more data to the existing stream. No need to do anything.
pass
else:
# for append mode, create the stream and append data to it.
need_create_stream = True
if need_delete_stream:
# delete the existing stream
Stream(env=env).name(configured_stream.stream.name).get().delete()
logger.info(f"Stream {configured_stream.stream.name} deleted successfully")
if need_create_stream:
# create a new stream
DestinationTimeplus.create_stream(env, configured_stream.stream)
logger.info(f"Stream {configured_stream.stream.name} created successfully")
for message in input_messages:
if message.type == Type.STATE:
# Emitting a state message indicates that all records which came before it have been written to the destination. So we flush
# the queue to ensure writes happen, then output the state message to indicate it's safe to checkpoint state
yield message
elif message.type == Type.RECORD:
record = message.record
# this code is to send data to a single-column stream
# Stream(env=env).name(record.stream).column("raw", "string").ingest(payload=record.data)
Stream(env=env).name(record.stream).ingest(payload=record.data, format="streaming")
else:
# ignore other message types for now
continue
@staticmethod
def create_stream(env, stream: AirbyteStream):
# singlel-column stream
# Stream(env=env).name(stream.name).column('raw','string').create()
tp_stream = Stream(env=env).name(stream.name.strip())
for name, v in stream.json_schema["properties"].items():
tp_stream.column(name.strip(), DestinationTimeplus.type_mapping(v))
tp_stream.create()
@staticmethod
def type_mapping(v) -> str:
airbyte_type = v["type"]
if type(airbyte_type) is list:
for t in list(airbyte_type):
if t != "null":
type_def = {"type": t}
if t == "array":
if "items" in v:
type_def["items"] = v["items"]
else:
type_def["type"] = "string"
return DestinationTimeplus.type_mapping(type_def)
if airbyte_type == "number":
return "float"
elif airbyte_type == "integer":
return "integer"
elif airbyte_type == "boolean":
return "bool"
elif airbyte_type == "object":
return "string"
elif airbyte_type == "array":
return f"array({DestinationTimeplus.type_mapping(v['items'])})"
else:
return "string"
def check(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
"""
Tests if the input configuration can be used to successfully connect to the destination with the needed permissions
e.g: if a provided API token or password can be used to connect and write to the destination.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this destination, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
try:
endpoint = config["endpoint"]
apikey = config["apikey"]
if len(apikey) != 60:
return AirbyteConnectionStatus(status=Status.FAILED, message="API Key must be 60 characters")
if endpoint[-1] == "/":
endpoint = endpoint[0 : len(endpoint) - 1]
env = Environment().address(endpoint).apikey(apikey)
Stream(env=env).list()
logger.info("Successfully connected to " + endpoint)
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
return AirbyteConnectionStatus(
status=Status.FAILED, message=f"Fail to connect to Timeplus endpoint with the given API key: {repr(e)}"
)
| DestinationTimeplus |
python | google__flatbuffers | python/flatbuffers/number_types.py | {
"start": 2299,
"end": 2447
} | class ____(object):
bytewidth = 4
min_val = None
max_val = None
py_type = float
name = "float32"
packer_type = packer.float32
| Float32Flags |
python | allegroai__clearml | clearml/backend_api/services/v2_23/queues.py | {
"start": 86273,
"end": 87172
} | class ____(Request):
"""
Peek the next task from a given queue
:param queue: ID of the queue
:type queue: str
"""
_service = "queues"
_action = "peek_task"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {"queue": {"description": "ID of the queue", "type": "string"}},
"required": ["queue"],
"type": "object",
}
def __init__(self, queue: str, **kwargs: Any) -> None:
super(PeekTaskRequest, self).__init__(**kwargs)
self.queue = queue
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
| PeekTaskRequest |
python | spack__spack | lib/spack/spack/solver/asp.py | {
"start": 53444,
"end": 137987
} | class ____:
"""Class to set up and run a Spack concretization solve."""
gen: "ProblemInstanceBuilder"
possible_versions: Dict[str, Dict[GitOrStandardVersion, List[Provenance]]]
def __init__(self, tests: spack.concretize.TestsType = False):
self.possible_graph = create_graph_analyzer()
# these are all initialized in setup()
self.requirement_parser = RequirementParser(spack.config.CONFIG)
self.possible_virtuals: Set[str] = set()
self.assumptions: List[Tuple["clingo.Symbol", bool]] = [] # type: ignore[name-defined]
# pkg_name -> version -> list of possible origins (package.py, installed, etc.)
self.possible_versions = collections.defaultdict(lambda: collections.defaultdict(list))
self.versions_from_yaml: Dict[str, List[GitOrStandardVersion]] = {}
self.git_commit_versions: Dict[str, Dict[GitOrStandardVersion, str]] = (
collections.defaultdict(dict)
)
self.deprecated_versions: Dict[str, Set[GitOrStandardVersion]] = collections.defaultdict(
set
)
self.possible_compilers: List[spack.spec.Spec] = []
self.rejected_compilers: Set[spack.spec.Spec] = set()
self.possible_oses: Set = set()
self.variant_values_from_specs: Set = set()
self.version_constraints: Set = set()
self.target_constraints: Set = set()
self.default_targets: List = []
self.compiler_version_constraints: Set = set()
self.post_facts: List = []
self.variant_ids_by_def_id: Dict[int, int] = {}
self.reusable_and_possible: ConcreteSpecsByHash = ConcreteSpecsByHash()
self._id_counter: Iterator[int] = itertools.count()
self._trigger_cache: ConditionSpecCache = collections.defaultdict(dict)
self._effect_cache: ConditionSpecCache = collections.defaultdict(dict)
# Caches to optimize the setup phase of the solver
self.target_specs_cache = None
# whether to add installed/binary hashes to the solve
self.tests = tests
# If False allows for input specs that are not solved
self.concretize_everything = True
# Set during the call to setup
self.pkgs: Set[str] = set()
self.explicitly_required_namespaces: Dict[str, str] = {}
# list of unique libc specs targeted by compilers (or an educated guess if no compiler)
self.libcs: List[spack.spec.Spec] = []
# If true, we have to load the code for synthesizing splices
self.enable_splicing: bool = spack.config.CONFIG.get("concretizer:splice:automatic")
def pkg_version_rules(self, pkg: Type[spack.package_base.PackageBase]) -> None:
"""Declares known versions, their origins, and their weights."""
version_provenance = self.possible_versions[pkg.name]
ordered_versions = spack.package_base.sort_by_pkg_preference(
self.possible_versions[pkg.name], pkg=pkg
)
# Account for preferences in packages.yaml, if any
if pkg.name in self.versions_from_yaml:
ordered_versions = spack.llnl.util.lang.dedupe(
self.versions_from_yaml[pkg.name] + ordered_versions
)
for weight, declared_version in enumerate(ordered_versions):
self.gen.fact(fn.pkg_fact(pkg.name, fn.version_declared(declared_version, weight)))
for origin in version_provenance[declared_version]:
self.gen.fact(
fn.pkg_fact(pkg.name, fn.version_origin(declared_version, str(origin)))
)
for v in self.possible_versions[pkg.name]:
if pkg.needs_commit(v):
commit = pkg.version_or_package_attr("commit", v, "")
self.git_commit_versions[pkg.name][v] = commit
# Declare deprecated versions for this package, if any
deprecated = self.deprecated_versions[pkg.name]
for v in sorted(deprecated):
self.gen.fact(fn.pkg_fact(pkg.name, fn.deprecated_version(v)))
def spec_versions(self, spec):
"""Return list of clauses expressing spec's version constraints."""
spec = specify(spec)
msg = "Internal Error: spec with no name occured. Please report to the spack maintainers."
assert spec.name, msg
if spec.concrete:
return [fn.attr("version", spec.name, spec.version)]
if spec.versions == vn.any_version:
return []
# record all version constraints for later
self.version_constraints.add((spec.name, spec.versions))
return [fn.attr("node_version_satisfies", spec.name, spec.versions)]
def target_ranges(self, spec, single_target_fn):
target = spec.architecture.target
# Check if the target is a concrete target
if str(target) in spack.vendor.archspec.cpu.TARGETS:
return [single_target_fn(spec.name, target)]
self.target_constraints.add(target)
return [fn.attr("node_target_satisfies", spec.name, target)]
def conflict_rules(self, pkg):
for when_spec, conflict_specs in pkg.conflicts.items():
when_spec_msg = f"conflict constraint {when_spec}"
when_spec_id = self.condition(when_spec, required_name=pkg.name, msg=when_spec_msg)
when_spec_str = str(when_spec)
for conflict_spec, conflict_msg in conflict_specs:
conflict_spec_str = str(conflict_spec)
if conflict_msg is None:
conflict_msg = f"{pkg.name}: "
if not when_spec_str:
conflict_msg += f"conflicts with '{conflict_spec_str}'"
else:
conflict_msg += f"'{conflict_spec_str}' conflicts with '{when_spec_str}'"
if not conflict_spec_str:
conflict_spec_msg = f"conflict is triggered when {pkg.name}"
else:
conflict_spec_msg = f"conflict is triggered when {conflict_spec_str}"
conflict_spec_id = self.condition(
conflict_spec,
required_name=conflict_spec.name or pkg.name,
msg=conflict_spec_msg,
)
self.gen.fact(
fn.pkg_fact(
pkg.name, fn.conflict(conflict_spec_id, when_spec_id, conflict_msg)
)
)
self.gen.newline()
def config_compatible_os(self):
"""Facts about compatible os's specified in configs"""
self.gen.h2("Compatible OS from concretizer config file")
os_data = spack.config.get("concretizer:os_compatible", {})
for recent, reusable in os_data.items():
for old in reusable:
self.gen.fact(fn.os_compatible(recent, old))
self.gen.newline()
def package_requirement_rules(self, pkg):
self.emit_facts_from_requirement_rules(self.requirement_parser.rules(pkg))
def pkg_rules(self, pkg, tests):
pkg = self.pkg_class(pkg)
# Namespace of the package
self.gen.fact(fn.pkg_fact(pkg.name, fn.namespace(pkg.namespace)))
# versions
self.pkg_version_rules(pkg)
self.gen.newline()
# variants
self.variant_rules(pkg)
# conflicts
self.conflict_rules(pkg)
# virtuals
self.package_provider_rules(pkg)
# dependencies
self.package_dependencies_rules(pkg)
# splices
if self.enable_splicing:
self.package_splice_rules(pkg)
self.package_requirement_rules(pkg)
# trigger and effect tables
self.trigger_rules()
self.effect_rules()
def trigger_rules(self):
"""Flushes all the trigger rules collected so far, and clears the cache."""
if not self._trigger_cache:
return
self.gen.h2("Trigger conditions")
for name in self._trigger_cache:
cache = self._trigger_cache[name]
for (spec_str, _), (trigger_id, requirements) in cache.items():
self.gen.fact(fn.pkg_fact(name, fn.trigger_id(trigger_id)))
self.gen.fact(fn.pkg_fact(name, fn.trigger_msg(spec_str)))
for predicate in requirements:
self.gen.fact(fn.condition_requirement(trigger_id, *predicate.args))
self.gen.newline()
self._trigger_cache.clear()
def effect_rules(self):
"""Flushes all the effect rules collected so far, and clears the cache."""
if not self._effect_cache:
return
self.gen.h2("Imposed requirements")
for name in sorted(self._effect_cache):
cache = self._effect_cache[name]
for (spec_str, _), (effect_id, requirements) in cache.items():
self.gen.fact(fn.pkg_fact(name, fn.effect_id(effect_id)))
self.gen.fact(fn.pkg_fact(name, fn.effect_msg(spec_str)))
for predicate in requirements:
self.gen.fact(fn.imposed_constraint(effect_id, *predicate.args))
self.gen.newline()
self._effect_cache.clear()
def define_variant(
self,
pkg: Type[spack.package_base.PackageBase],
name: str,
when: spack.spec.Spec,
variant_def: vt.Variant,
):
pkg_fact = lambda f: self.gen.fact(fn.pkg_fact(pkg.name, f))
# Every variant id has a unique definition (conditional or unconditional), and
# higher variant id definitions take precedence when variants intersect.
vid = next(self._id_counter)
# used to find a variant id from its variant definition (for variant values on specs)
self.variant_ids_by_def_id[id(variant_def)] = vid
if when == EMPTY_SPEC:
# unconditional variant
pkg_fact(fn.variant_definition(name, vid))
else:
# conditional variant
msg = f"Package {pkg.name} has variant '{name}' when {when}"
cond_id = self.condition(when, required_name=pkg.name, msg=msg)
pkg_fact(fn.variant_condition(name, vid, cond_id))
# record type so we can construct the variant when we read it back in
self.gen.fact(fn.variant_type(vid, variant_def.variant_type.string))
if variant_def.sticky:
pkg_fact(fn.variant_sticky(vid))
# define defaults for this variant definition
if variant_def.multi:
for val in sorted(variant_def.make_default().values):
pkg_fact(fn.variant_default_value_from_package_py(vid, val))
else:
pkg_fact(fn.variant_default_value_from_package_py(vid, variant_def.default))
# define possible values for this variant definition
values = variant_def.values
if values is None:
values = []
elif isinstance(values, vt.DisjointSetsOfValues):
union = set()
for sid, s in enumerate(sorted(values.sets)):
for value in sorted(s):
pkg_fact(fn.variant_value_from_disjoint_sets(vid, value, sid))
union.update(s)
values = union
# ensure that every variant has at least one possible value.
if not values:
values = [variant_def.default]
for value in sorted(values):
pkg_fact(fn.variant_possible_value(vid, value))
# we're done here for unconditional values
if not isinstance(value, vt.ConditionalValue):
continue
# make a spec indicating whether the variant has this conditional value
variant_has_value = spack.spec.Spec()
variant_has_value.variants[name] = vt.VariantValue(
vt.VariantType.MULTI, name, (value.value,)
)
if value.when:
# the conditional value is always "possible", but it imposes its when condition as
# a constraint if the conditional value is taken. This may seem backwards, but it
# ensures that the conditional can only occur when its condition holds.
self.condition(
required_spec=variant_has_value,
imposed_spec=value.when,
required_name=pkg.name,
imposed_name=pkg.name,
msg=f"{pkg.name} variant {name} has value '{value.value}' when {value.when}",
)
else:
vstring = f"{name}='{value.value}'"
# We know the value is never allowed statically (when was None), but we can't just
# ignore it b/c it could come in as a possible value and we need a good error msg.
# So, it's a conflict -- if the value is somehow used, it'll trigger an error.
trigger_id = self.condition(
variant_has_value,
required_name=pkg.name,
msg=f"invalid variant value: {vstring}",
)
constraint_id = self.condition(
EMPTY_SPEC, required_name=pkg.name, msg="empty (total) conflict constraint"
)
msg = f"variant value {vstring} is conditionally disabled"
pkg_fact(fn.conflict(trigger_id, constraint_id, msg))
self.gen.newline()
def define_auto_variant(self, name: str, multi: bool):
self.gen.h3(f"Special variant: {name}")
vid = next(self._id_counter)
self.gen.fact(fn.auto_variant(name, vid))
self.gen.fact(
fn.variant_type(
vid, vt.VariantType.MULTI.value if multi else vt.VariantType.SINGLE.value
)
)
def variant_rules(self, pkg: Type[spack.package_base.PackageBase]):
for name in pkg.variant_names():
self.gen.h3(f"Variant {name} in package {pkg.name}")
for when, variant_def in pkg.variant_definitions(name):
self.define_variant(pkg, name, when, variant_def)
def _get_condition_id(
self,
named_cond: spack.spec.Spec,
cache: ConditionSpecCache,
body: bool,
context: ConditionIdContext,
) -> int:
"""Get the id for one half of a condition (either a trigger or an imposed constraint).
Construct a key from the condition spec and any associated transformation, and
cache the ASP functions that they imply. The saved functions will be output
later in ``trigger_rules()`` and ``effect_rules()``.
Returns:
The id of the cached trigger or effect.
"""
pkg_cache = cache[named_cond.name]
named_cond_key = (str(named_cond), context.transform)
result = pkg_cache.get(named_cond_key)
if result:
return result[0]
cond_id = next(self._id_counter)
requirements = self.spec_clauses(named_cond, body=body, context=context)
if context.transform:
requirements = context.transform(named_cond, requirements)
pkg_cache[named_cond_key] = (cond_id, requirements)
return cond_id
def _condition_clauses(
self,
required_spec: spack.spec.Spec,
imposed_spec: Optional[spack.spec.Spec] = None,
*,
required_name: Optional[str] = None,
imposed_name: Optional[str] = None,
msg: Optional[str] = None,
context: Optional[ConditionContext] = None,
) -> Tuple[List[AspFunction], int]:
clauses = []
required_name = required_spec.name or required_name
if not required_name:
raise ValueError(f"Must provide a name for anonymous condition: '{required_spec}'")
if not context:
context = ConditionContext()
context.transform_imposed = remove_facts("node", "virtual_node")
if imposed_spec:
imposed_name = imposed_spec.name or imposed_name
if not imposed_name:
raise ValueError(f"Must provide a name for imposed constraint: '{imposed_spec}'")
with named_spec(required_spec, required_name), named_spec(imposed_spec, imposed_name):
# Check if we can emit the requirements before updating the condition ID counter.
# In this way, if a condition can't be emitted but the exception is handled in the
# caller, we won't emit partial facts.
condition_id = next(self._id_counter)
requirement_context = context.requirement_context()
trigger_id = self._get_condition_id(
required_spec, cache=self._trigger_cache, body=True, context=requirement_context
)
clauses.append(fn.pkg_fact(required_spec.name, fn.condition(condition_id)))
clauses.append(fn.condition_reason(condition_id, msg))
clauses.append(
fn.pkg_fact(required_spec.name, fn.condition_trigger(condition_id, trigger_id))
)
if not imposed_spec:
return clauses, condition_id
impose_context = context.impose_context()
effect_id = self._get_condition_id(
imposed_spec, cache=self._effect_cache, body=False, context=impose_context
)
clauses.append(
fn.pkg_fact(required_spec.name, fn.condition_effect(condition_id, effect_id))
)
return clauses, condition_id
def condition(
self,
required_spec: spack.spec.Spec,
imposed_spec: Optional[spack.spec.Spec] = None,
*,
required_name: Optional[str] = None,
imposed_name: Optional[str] = None,
msg: Optional[str] = None,
context: Optional[ConditionContext] = None,
) -> int:
"""Generate facts for a dependency or virtual provider condition.
Arguments:
required_spec: the constraints that triggers this condition
imposed_spec: the constraints that are imposed when this condition is triggered
required_name: name for ``required_spec``
(required if required_spec is anonymous, ignored if not)
imposed_name: name for ``imposed_spec``
(required if imposed_spec is anonymous, ignored if not)
msg: description of the condition
context: if provided, indicates how to modify the clause-sets for the required/imposed
specs based on the type of constraint they are generated for (e.g. ``depends_on``)
Returns:
int: id of the condition created by this function
"""
clauses, condition_id = self._condition_clauses(
required_spec=required_spec,
imposed_spec=imposed_spec,
required_name=required_name,
imposed_name=imposed_name,
msg=msg,
context=context,
)
for clause in clauses:
self.gen.fact(clause)
return condition_id
def impose(self, condition_id, imposed_spec, node=True, body=False):
imposed_constraints = self.spec_clauses(imposed_spec, body=body)
for pred in imposed_constraints:
# imposed "node"-like conditions are no-ops
if not node and pred.args[0] in ("node", "virtual_node"):
continue
self.gen.fact(fn.imposed_constraint(condition_id, *pred.args))
def package_provider_rules(self, pkg):
for vpkg_name in pkg.provided_virtual_names():
if vpkg_name not in self.possible_virtuals:
continue
self.gen.fact(fn.pkg_fact(pkg.name, fn.possible_provider(vpkg_name)))
for when, provided in pkg.provided.items():
for vpkg in sorted(provided):
if vpkg.name not in self.possible_virtuals:
continue
msg = f"{pkg.name} provides {vpkg} when {when}"
condition_id = self.condition(when, vpkg, required_name=pkg.name, msg=msg)
self.gen.fact(
fn.pkg_fact(when.name, fn.provider_condition(condition_id, vpkg.name))
)
self.gen.newline()
for when, sets_of_virtuals in pkg.provided_together.items():
condition_id = self.condition(
when, required_name=pkg.name, msg="Virtuals are provided together"
)
for set_id, virtuals_together in enumerate(sorted(sets_of_virtuals)):
for name in sorted(virtuals_together):
self.gen.fact(
fn.pkg_fact(pkg.name, fn.provided_together(condition_id, set_id, name))
)
self.gen.newline()
def package_dependencies_rules(self, pkg):
"""Translate ``depends_on`` directives into ASP logic."""
def track_dependencies(input_spec, requirements):
return requirements + [fn.attr("track_dependencies", input_spec.name)]
def dependency_holds(input_spec, requirements):
result = remove_facts("node", "virtual_node")(input_spec, requirements) + [
fn.attr("dependency_holds", pkg.name, input_spec.name, dt.flag_to_string(t))
for t in dt.ALL_FLAGS
if t & depflag
]
if input_spec.name not in pkg.extendees:
return result
return result + [fn.attr("extends", pkg.name, input_spec.name)]
for cond, deps_by_name in pkg.dependencies.items():
cond_str = str(cond)
cond_str_suffix = f" when {cond_str}" if cond_str else ""
for _, dep in deps_by_name.items():
depflag = dep.depflag
# Skip test dependencies if they're not requested
if not self.tests:
depflag &= ~dt.TEST
# ... or if they are requested only for certain packages
elif not isinstance(self.tests, bool) and pkg.name not in self.tests:
depflag &= ~dt.TEST
# if there are no dependency types to be considered
# anymore, don't generate the dependency
if not depflag:
continue
msg = f"{pkg.name} depends on {dep.spec}{cond_str_suffix}"
context = ConditionContext()
context.source = ConstraintOrigin.append_type_suffix(
pkg.name, ConstraintOrigin.DEPENDS_ON
)
context.transform_required = track_dependencies
context.transform_imposed = dependency_holds
self.condition(cond, dep.spec, required_name=pkg.name, msg=msg, context=context)
self.gen.newline()
def _gen_match_variant_splice_constraints(
self,
pkg,
cond_spec: spack.spec.Spec,
splice_spec: spack.spec.Spec,
hash_asp_var: "AspVar",
splice_node,
match_variants: List[str],
):
# If there are no variants to match, no constraints are needed
variant_constraints = []
for i, variant_name in enumerate(match_variants):
vari_defs = pkg.variant_definitions(variant_name)
# the spliceable config of the package always includes the variant
if vari_defs != [] and any(cond_spec.satisfies(s) for (s, _) in vari_defs):
variant = vari_defs[0][1]
if variant.multi:
continue # cannot automatically match multi-valued variants
value_var = AspVar(f"VariValue{i}")
attr_constraint = fn.attr("variant_value", splice_node, variant_name, value_var)
hash_attr_constraint = fn.hash_attr(
hash_asp_var, "variant_value", splice_spec.name, variant_name, value_var
)
variant_constraints.append(attr_constraint)
variant_constraints.append(hash_attr_constraint)
return variant_constraints
def package_splice_rules(self, pkg):
self.gen.h2("Splice rules")
for i, (cond, (spec_to_splice, match_variants)) in enumerate(
sorted(pkg.splice_specs.items())
):
with named_spec(cond, pkg.name):
self.version_constraints.add((cond.name, cond.versions))
self.version_constraints.add((spec_to_splice.name, spec_to_splice.versions))
hash_var = AspVar("Hash")
splice_node = fn.node(AspVar("NID"), cond.name)
when_spec_attrs = [
fn.attr(c.args[0], splice_node, *(c.args[2:]))
for c in self.spec_clauses(cond, body=True, required_from=None)
if c.args[0] != "node"
]
splice_spec_hash_attrs = [
fn.hash_attr(hash_var, *(c.args))
for c in self.spec_clauses(spec_to_splice, body=True, required_from=None)
if c.args[0] != "node"
]
if match_variants is None:
variant_constraints = []
elif match_variants == "*":
filt_match_variants = set()
for map in pkg.variants.values():
for k in map:
filt_match_variants.add(k)
filt_match_variants = sorted(filt_match_variants)
variant_constraints = self._gen_match_variant_splice_constraints(
pkg, cond, spec_to_splice, hash_var, splice_node, filt_match_variants
)
else:
if any(
v in cond.variants or v in spec_to_splice.variants for v in match_variants
):
raise spack.error.PackageError(
"Overlap between match_variants and explicitly set variants"
)
variant_constraints = self._gen_match_variant_splice_constraints(
pkg, cond, spec_to_splice, hash_var, splice_node, match_variants
)
rule_head = fn.abi_splice_conditions_hold(
i, splice_node, spec_to_splice.name, hash_var
)
rule_body_components = (
[
# splice_set_fact,
fn.attr("node", splice_node),
fn.installed_hash(spec_to_splice.name, hash_var),
]
+ when_spec_attrs
+ splice_spec_hash_attrs
+ variant_constraints
)
rule_body = ",\n ".join(str(r) for r in rule_body_components)
rule = f"{rule_head} :-\n {rule_body}."
self.gen.append(rule)
self.gen.newline()
def virtual_requirements_and_weights(self):
virtual_preferences = spack.config.CONFIG.get("packages:all:providers", {})
self.gen.h1("Virtual requirements and weights")
for virtual_str in sorted(self.possible_virtuals):
self.gen.newline()
self.gen.h2(f"Virtual: {virtual_str}")
self.gen.fact(fn.virtual(virtual_str))
rules = self.requirement_parser.rules_from_virtual(virtual_str)
if not rules and virtual_str not in virtual_preferences:
continue
required, preferred, removed = [], [], set()
for rule in rules:
# We don't deal with conditional requirements
if rule.condition != EMPTY_SPEC:
continue
if rule.origin == RequirementOrigin.PREFER_YAML:
preferred.extend(x.name for x in rule.requirements if x.name)
elif rule.origin == RequirementOrigin.REQUIRE_YAML:
required.extend(x.name for x in rule.requirements if x.name)
elif rule.origin == RequirementOrigin.CONFLICT_YAML:
conflict_spec = rule.requirements[0]
# For conflicts, we take action only if just a name is used
if spack.spec.Spec(conflict_spec.name).satisfies(conflict_spec):
removed.add(conflict_spec.name)
current_preferences = required + preferred + virtual_preferences.get(virtual_str, [])
current_preferences = [x for x in current_preferences if x not in removed]
for i, provider in enumerate(spack.llnl.util.lang.dedupe(current_preferences)):
provider_name = spack.spec.Spec(provider).name
self.gen.fact(fn.provider_weight_from_config(virtual_str, provider_name, i))
self.gen.newline()
if rules:
self.emit_facts_from_requirement_rules(rules)
self.trigger_rules()
self.effect_rules()
def emit_facts_from_requirement_rules(self, rules: List[RequirementRule]):
"""Generate facts to enforce requirements.
Args:
rules: rules for which we want facts to be emitted
"""
for requirement_grp_id, rule in enumerate(rules):
virtual = rule.kind == RequirementKind.VIRTUAL
pkg_name, policy, requirement_grp = rule.pkg_name, rule.policy, rule.requirements
requirement_weight = 0
# Propagated preferences have a higher penalty that normal preferences
weight_multiplier = 2 if rule.origin == RequirementOrigin.INPUT_SPECS else 1
# Write explicitly if a requirement is conditional or not
if rule.condition != EMPTY_SPEC:
msg = f"activate requirement {requirement_grp_id} if {rule.condition} holds"
context = ConditionContext()
context.transform_required = dag_closure_by_deptype
try:
main_condition_id = self.condition(
rule.condition, required_name=pkg_name, msg=msg, context=context
)
except Exception as e:
if rule.kind != RequirementKind.DEFAULT:
raise RuntimeError(
"cannot emit requirements for the solver: " + str(e)
) from e
continue
self.gen.fact(
fn.requirement_conditional(pkg_name, requirement_grp_id, main_condition_id)
)
self.gen.fact(fn.requirement_group(pkg_name, requirement_grp_id))
self.gen.fact(fn.requirement_policy(pkg_name, requirement_grp_id, policy))
if rule.message:
self.gen.fact(fn.requirement_message(pkg_name, requirement_grp_id, rule.message))
self.gen.newline()
for input_spec in requirement_grp:
spec = spack.spec.Spec(input_spec)
spec.replace_hash()
if not spec.name:
spec.name = pkg_name
spec.attach_git_version_lookup()
when_spec = spec
if virtual and spec.name != pkg_name:
when_spec = spack.spec.Spec(f"^[virtuals={pkg_name}] {spec}")
try:
context = ConditionContext()
context.source = ConstraintOrigin.append_type_suffix(
pkg_name, ConstraintOrigin.REQUIRE
)
context.wrap_node_requirement = True
if not virtual:
context.transform_required = remove_facts("depends_on")
context.transform_imposed = remove_facts(
"node", "virtual_node", "depends_on"
)
# else: for virtuals we want to emit "node" and
# "virtual_node" in imposed specs
info_msg = f"{input_spec} is a requirement for package {pkg_name}"
if rule.condition != EMPTY_SPEC:
info_msg += f" when {rule.condition}"
if rule.message:
info_msg += f" ({rule.message})"
member_id = self.condition(
required_spec=when_spec,
imposed_spec=spec,
required_name=pkg_name,
msg=info_msg,
context=context,
)
# Conditions don't handle conditional dependencies directly
# Those are handled separately here
self.generate_conditional_dep_conditions(spec, member_id)
except Exception as e:
# Do not raise if the rule comes from the 'all' subsection, since usability
# would be impaired. If a rule does not apply for a specific package, just
# discard it.
if rule.kind != RequirementKind.DEFAULT:
raise RuntimeError(
"cannot emit requirements for the solver: " + str(e)
) from e
continue
self.gen.fact(fn.requirement_group_member(member_id, pkg_name, requirement_grp_id))
self.gen.fact(
fn.requirement_has_weight(member_id, requirement_weight * weight_multiplier)
)
self.gen.newline()
requirement_weight += 1
def external_packages(self):
"""Facts on external packages, from packages.yaml and implicit externals."""
self.gen.h1("External packages")
packages_yaml = external_config_with_implicit_externals(spack.config.CONFIG)
for pkg_name, data in packages_yaml.items():
if pkg_name == "all":
continue
# This package is not among possible dependencies
if pkg_name not in self.pkgs:
continue
if not data.get("buildable", True):
self.gen.h2(f"External package: {pkg_name}")
self.gen.fact(fn.buildable_false(pkg_name))
def preferred_variants(self, pkg_name):
"""Facts on concretization preferences, as read from packages.yaml"""
preferences = spack.package_prefs.PackagePrefs
preferred_variants = preferences.preferred_variants(pkg_name)
if not preferred_variants:
return
self.gen.h2(f"Package preferences: {pkg_name}")
for variant_name in sorted(preferred_variants):
variant = preferred_variants[variant_name]
# perform validation of the variant and values
try:
variant_defs = vt.prevalidate_variant_value(self.pkg_class(pkg_name), variant)
except (vt.InvalidVariantValueError, KeyError, ValueError) as e:
tty.debug(
f"[SETUP]: rejected {str(variant)} as a preference for {pkg_name}: {str(e)}"
)
continue
for value in variant.values:
for variant_def in variant_defs:
self.variant_values_from_specs.add((pkg_name, id(variant_def), value))
self.gen.fact(
fn.variant_default_value_from_packages_yaml(pkg_name, variant.name, value)
)
def target_preferences(self):
key_fn = spack.package_prefs.PackagePrefs("all", "target")
if not self.target_specs_cache:
self.target_specs_cache = [
spack.spec.Spec("target={0}".format(target_name))
for _, target_name in self.default_targets
]
package_targets = self.target_specs_cache[:]
package_targets.sort(key=key_fn)
for i, preferred in enumerate(package_targets):
self.gen.fact(fn.target_weight(str(preferred.architecture.target), i))
def spec_clauses(
self,
spec: spack.spec.Spec,
*,
body: bool = False,
transitive: bool = True,
expand_hashes: bool = False,
concrete_build_deps=False,
include_runtimes=False,
required_from: Optional[str] = None,
context: Optional[SourceContext] = None,
) -> List[AspFunction]:
"""Wrap a call to ``_spec_clauses()`` into a try/except block with better error handling.
Arguments are as for ``_spec_clauses()`` except ``required_from``.
Arguments:
required_from: name of package that caused this call.
"""
try:
clauses = self._spec_clauses(
spec,
body=body,
transitive=transitive,
expand_hashes=expand_hashes,
concrete_build_deps=concrete_build_deps,
include_runtimes=include_runtimes,
context=context,
)
except RuntimeError as exc:
msg = str(exc)
if required_from:
msg += f" [required from package '{required_from}']"
raise RuntimeError(msg)
return clauses
def _spec_clauses(
self,
spec: spack.spec.Spec,
*,
body: bool = False,
transitive: bool = True,
expand_hashes: bool = False,
concrete_build_deps: bool = False,
include_runtimes: bool = False,
context: Optional[SourceContext] = None,
seen: Optional[Set[int]] = None,
) -> List[AspFunction]:
"""Return a list of clauses for a spec mandates are true.
Arguments:
spec: the spec to analyze
body: if True, generate clauses to be used in rule bodies (final values) instead
of rule heads (setters).
transitive: if False, don't generate clauses from dependencies (default True)
expand_hashes: if True, descend into hashes of concrete specs (default False)
concrete_build_deps: if False, do not include pure build deps of concrete specs
(as they have no effect on runtime constraints)
include_runtimes: generate full dependency clauses from runtime libraries that
are ommitted from the solve.
context: tracks what constraint this clause set is generated for (e.g. a
``depends_on`` constraint in a package.py file)
seen: set of ids of specs that have already been processed (for internal use only)
Normally, if called with ``transitive=True``, ``spec_clauses()`` just generates
hashes for the dependency requirements of concrete specs. If ``expand_hashes``
is ``True``, we'll *also* output all the facts implied by transitive hashes,
which are redundant during a solve but useful outside of one (e.g.,
for spec ``diff``).
"""
clauses = []
seen = seen if seen is not None else set()
seen.add(id(spec))
f: Union[Type[_Head], Type[_Body]] = _Body if body else _Head
if spec.name:
clauses.append(
f.node(spec.name)
if not spack.repo.PATH.is_virtual(spec.name)
else f.virtual_node(spec.name)
)
if spec.namespace:
clauses.append(f.namespace(spec.name, spec.namespace))
clauses.extend(self.spec_versions(spec))
# seed architecture at the root (we'll propagate later)
# TODO: use better semantics.
arch = spec.architecture
if arch:
if arch.platform:
clauses.append(f.node_platform(spec.name, arch.platform))
if arch.os:
clauses.append(f.node_os(spec.name, arch.os))
if arch.target:
clauses.extend(self.target_ranges(spec, f.node_target))
# variants
for vname, variant in sorted(spec.variants.items()):
# TODO: variant="*" means 'variant is defined to something', which used to
# be meaningless in concretization, as all variants had to be defined. But
# now that variants can be conditional, it should force a variant to exist.
if not variant.values:
continue
for value in variant.values:
# ensure that the value *can* be valid for the spec
if spec.name and not spec.concrete and not spack.repo.PATH.is_virtual(spec.name):
variant_defs = vt.prevalidate_variant_value(
self.pkg_class(spec.name), variant, spec
)
# Record that that this is a valid possible value. Accounts for
# int/str/etc., where valid values can't be listed in the package
for variant_def in variant_defs:
self.variant_values_from_specs.add((spec.name, id(variant_def), value))
if variant.propagate:
clauses.append(f.propagate(spec.name, fn.variant_value(vname, value)))
if self.pkg_class(spec.name).has_variant(vname):
clauses.append(f.variant_value(spec.name, vname, value))
else:
variant_clause = f.variant_value(spec.name, vname, value)
if (
variant.concrete
and variant.type == vt.VariantType.MULTI
and not spec.concrete
):
if body is False:
variant_clause.args = (
f"concrete_{variant_clause.args[0]}",
*variant_clause.args[1:],
)
else:
clauses.append(
fn.attr("concrete_variant_request", spec.name, vname, value)
)
clauses.append(variant_clause)
# compiler flags
source = context.source if context else "none"
for flag_type, flags in spec.compiler_flags.items():
flag_group = " ".join(flags)
for flag in flags:
clauses.append(
f.node_flag(spec.name, fn.node_flag(flag_type, flag, flag_group, source))
)
if not spec.concrete and flag.propagate is True:
clauses.append(
f.propagate(
spec.name,
fn.node_flag(flag_type, flag, flag_group, source),
fn.edge_types("link", "run"),
)
)
# Hash for concrete specs
if spec.concrete:
# older specs do not have package hashes, so we have to do this carefully
package_hash = getattr(spec, "_package_hash", None)
if package_hash:
clauses.append(fn.attr("package_hash", spec.name, package_hash))
clauses.append(fn.attr("hash", spec.name, spec.dag_hash()))
if spec.external:
clauses.append(fn.attr("external", spec.name))
edges = spec.edges_from_dependents()
virtuals = sorted(
{x for x in itertools.chain.from_iterable([edge.virtuals for edge in edges])}
)
if not body and not spec.concrete:
for virtual in virtuals:
clauses.append(fn.attr("provider_set", spec.name, virtual))
clauses.append(fn.attr("virtual_node", virtual))
else:
for virtual in virtuals:
clauses.append(fn.attr("virtual_on_incoming_edges", spec.name, virtual))
# If the spec is external and concrete, we allow all the libcs on the system
if spec.external and spec.concrete and using_libc_compatibility():
clauses.append(fn.attr("needs_libc", spec.name))
for libc in self.libcs:
clauses.append(fn.attr("compatible_libc", spec.name, libc.name, libc.version))
if not transitive:
return clauses
# Dependencies
edge_clauses = []
for dspec in spec.edges_to_dependencies():
# Ignore conditional dependencies, they are handled by caller
if dspec.when != EMPTY_SPEC:
continue
dep = dspec.spec
if spec.concrete:
# GCC runtime is solved again by clingo, even on concrete specs, to give
# the possibility to reuse specs built against a different runtime.
if dep.name == "gcc-runtime":
edge_clauses.append(
fn.attr("compatible_runtime", spec.name, dep.name, f"{dep.version}:")
)
constraint_spec = spack.spec.Spec(f"{dep.name}@{dep.version}")
self.spec_versions(constraint_spec)
if not include_runtimes:
continue
# libc is also solved again by clingo, but in this case the compatibility
# is not encoded in the parent node - so we need to emit explicit facts
if "libc" in dspec.virtuals:
edge_clauses.append(fn.attr("needs_libc", spec.name))
for libc in self.libcs:
if libc_is_compatible(libc, dep):
edge_clauses.append(
fn.attr("compatible_libc", spec.name, libc.name, libc.version)
)
if not include_runtimes:
continue
# We know dependencies are real for concrete specs. For abstract
# specs they just mean the dep is somehow in the DAG.
for dtype in dt.ALL_FLAGS:
if not dspec.depflag & dtype:
continue
# skip build dependencies of already-installed specs
if concrete_build_deps or dtype != dt.BUILD:
edge_clauses.append(
fn.attr("depends_on", spec.name, dep.name, dt.flag_to_string(dtype))
)
for virtual_name in dspec.virtuals:
edge_clauses.append(
fn.attr("virtual_on_edge", spec.name, dep.name, virtual_name)
)
edge_clauses.append(fn.attr("virtual_node", virtual_name))
# imposing hash constraints for all but pure build deps of
# already-installed concrete specs.
if concrete_build_deps or dspec.depflag != dt.BUILD:
edge_clauses.append(fn.attr("hash", dep.name, dep.dag_hash()))
elif not concrete_build_deps and dspec.depflag:
edge_clauses.append(
fn.attr("concrete_build_dependency", spec.name, dep.name, dep.dag_hash())
)
for virtual_name in dspec.virtuals:
edge_clauses.append(
fn.attr("virtual_on_build_edge", spec.name, dep.name, virtual_name)
)
# if the spec is abstract, descend into dependencies.
# if it's concrete, then the hashes above take care of dependency
# constraints, but expand the hashes if asked for.
if (not spec.concrete or expand_hashes) and id(dep) not in seen:
dependency_clauses = self._spec_clauses(
dep,
body=body,
expand_hashes=expand_hashes,
concrete_build_deps=concrete_build_deps,
context=context,
seen=seen,
)
###
# Dependency expressed with "^"
###
if not dspec.direct:
edge_clauses.extend(dependency_clauses)
continue
###
# Direct dependencies expressed with "%"
###
for dependency_type in dt.flag_to_tuple(dspec.depflag):
edge_clauses.append(
fn.attr("depends_on", spec.name, dep.name, dependency_type)
)
# By default, wrap head of rules, unless the context says otherwise
wrap_node_requirement = body is False
if context and context.wrap_node_requirement is not None:
wrap_node_requirement = context.wrap_node_requirement
if not wrap_node_requirement:
edge_clauses.extend(dependency_clauses)
continue
for clause in dependency_clauses:
clause.name = "node_requirement"
edge_clauses.append(fn.attr("direct_dependency", spec.name, clause))
clauses.extend(edge_clauses)
return clauses
def define_package_versions_and_validate_preferences(
self, possible_pkgs: Set[str], *, require_checksum: bool, allow_deprecated: bool
):
"""Declare any versions in specs not declared in packages."""
packages_yaml = spack.config.CONFIG.get_config("packages")
for pkg_name in sorted(possible_pkgs):
pkg_cls = self.pkg_class(pkg_name)
# All the versions from the corresponding package.py file. Since concepts
# like being a "develop" version or being preferred exist only at a
# package.py level, sort them in this partial list here
from_package_py = list(pkg_cls.versions.items())
if require_checksum and pkg_cls.has_code:
from_package_py = [x for x in from_package_py if _is_checksummed_version(x)]
for v, version_info in from_package_py:
if version_info.get("deprecated", False):
self.deprecated_versions[pkg_name].add(v)
if not allow_deprecated:
continue
self.possible_versions[pkg_name][v].append(Provenance.PACKAGE_PY)
if pkg_name not in packages_yaml or "version" not in packages_yaml[pkg_name]:
continue
# TODO(psakiev) Need facts about versions
# - requires_commit (associated with tag or branch)
from_packages_yaml: List[GitOrStandardVersion] = []
for vstr in packages_yaml[pkg_name]["version"]:
v = vn.ver(vstr)
if isinstance(v, vn.GitVersion):
if not require_checksum or v.is_commit:
from_packages_yaml.append(v)
else:
matches = [x for x in self.possible_versions[pkg_name] if x.satisfies(v)]
matches.sort(reverse=True)
if not matches:
raise spack.error.ConfigError(
f"Preference for version {v} does not match any known "
f"version of {pkg_name}"
)
from_packages_yaml.extend(matches)
from_packages_yaml = list(spack.llnl.util.lang.dedupe(from_packages_yaml))
for v in from_packages_yaml:
self.possible_versions[pkg_name][v].append(Provenance.PACKAGES_YAML)
self.versions_from_yaml[pkg_name] = from_packages_yaml
def define_ad_hoc_versions_from_specs(
self, specs, origin, *, allow_deprecated: bool, require_checksum: bool
):
"""Add concrete versions to possible versions from lists of CLI/dev specs."""
for s in traverse.traverse_nodes(specs):
# If there is a concrete version on the CLI *that we know nothing
# about*, add it to the known versions.
version = s.versions.concrete
if version is None or (any((v == version) for v in self.possible_versions[s.name])):
continue
if require_checksum and not _is_checksummed_git_version(version):
raise UnsatisfiableSpecError(
s.format("No matching version for constraint {name}{@versions}")
)
if not allow_deprecated and version in self.deprecated_versions[s.name]:
continue
self.possible_versions[s.name][version].append(origin)
def _supported_targets(self, compiler_name, compiler_version, targets):
"""Get a list of which targets are supported by the compiler.
Results are ordered most to least recent.
"""
supported, unsupported = [], []
for target in targets:
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
target.optimization_flags(
compiler_name, compiler_version.dotted_numeric_string
)
supported.append(target)
except spack.vendor.archspec.cpu.UnsupportedMicroarchitecture:
unsupported.append(target)
except ValueError:
unsupported.append(target)
return supported, unsupported
def platform_defaults(self):
self.gen.h2("Default platform")
platform = spack.platforms.host()
self.gen.fact(fn.node_platform_default(platform))
self.gen.fact(fn.allowed_platform(platform))
def os_defaults(self, specs):
self.gen.h2("Possible operating systems")
platform = spack.platforms.host()
# create set of OS's to consider
buildable = set(platform.operating_sys.keys())
# Consider any OS's mentioned on the command line. We need this to
# cross-concretize in CI, and for some tests.
# TODO: OS should really be more than just a label -- rework this.
for spec in specs:
if spec.architecture and spec.architecture.os:
buildable.add(spec.architecture.os)
# make directives for buildable OS's
for build_os in sorted(buildable):
self.gen.fact(fn.buildable_os(build_os))
def keyfun(os):
return (
os == platform.default_os, # prefer default
os not in buildable, # then prefer buildables
os, # then sort by name
)
all_oses = buildable.union(self.possible_oses)
ordered_oses = sorted(all_oses, key=keyfun, reverse=True)
# output the preference order of OS's for the concretizer to choose
for i, os_name in enumerate(ordered_oses):
self.gen.fact(fn.os(os_name, i))
def target_defaults(self, specs):
"""Add facts about targets and target compatibility."""
self.gen.h2("Target compatibility")
# Add targets explicitly requested from specs
candidate_targets = []
for x in self.possible_graph.candidate_targets():
if all(
self.possible_graph.unreachable(pkg_name=pkg_name, when_spec=f"target={x}")
for pkg_name in self.pkgs
):
tty.debug(f"[{__name__}] excluding target={x}, cause no package can use it")
continue
candidate_targets.append(x)
host_compatible = spack.config.CONFIG.get("concretizer:targets:host_compatible")
for spec in specs:
if not spec.architecture or not spec.architecture.target:
continue
target = spack.vendor.archspec.cpu.TARGETS.get(spec.target.name)
if not target:
self.target_ranges(spec, None)
continue
if target not in candidate_targets and not host_compatible:
candidate_targets.append(target)
for ancestor in target.ancestors:
if ancestor not in candidate_targets:
candidate_targets.append(ancestor)
platform = spack.platforms.host()
uarch = spack.vendor.archspec.cpu.TARGETS.get(platform.default)
best_targets = {uarch.family.name}
for compiler in self.possible_compilers:
supported, unsupported = self._supported_targets(
compiler.name, compiler.version, candidate_targets
)
for target in supported:
best_targets.add(target.name)
self.gen.fact(fn.target_supported(compiler.name, compiler.version, target.name))
if supported:
self.gen.fact(
fn.target_supported(compiler.name, compiler.version, uarch.family.name)
)
for target in unsupported:
self.gen.fact(
fn.target_not_supported(compiler.name, compiler.version, target.name)
)
self.gen.newline()
i = 0
for target in candidate_targets:
self.gen.fact(fn.target(target.name))
self.gen.fact(fn.target_family(target.name, target.family.name))
self.gen.fact(fn.target_compatible(target.name, target.name))
# Code for ancestor can run on target
for ancestor in target.ancestors:
self.gen.fact(fn.target_compatible(target.name, ancestor.name))
# prefer best possible targets; weight others poorly so
# they're not used unless set explicitly
# these are stored to be generated as facts later offset by the
# number of preferred targets
if target.name in best_targets:
self.default_targets.append((i, target.name))
i += 1
else:
self.default_targets.append((100, target.name))
self.gen.newline()
self.default_targets = list(sorted(set(self.default_targets)))
self.target_preferences()
def define_version_constraints(self):
"""Define what version_satisfies(...) means in ASP logic."""
for pkg_name, versions in self.possible_versions.items():
for v in versions:
if v in self.git_commit_versions[pkg_name]:
sha = self.git_commit_versions[pkg_name].get(v)
if sha:
self.gen.fact(fn.pkg_fact(pkg_name, fn.version_has_commit(v, sha)))
else:
self.gen.fact(fn.pkg_fact(pkg_name, fn.version_needs_commit(v)))
self.gen.newline()
for pkg_name, versions in self.version_constraints:
# generate facts for each package constraint and the version
# that satisfies it
for v in self.possible_versions[pkg_name]:
if v.satisfies(versions):
self.gen.fact(fn.pkg_fact(pkg_name, fn.version_satisfies(versions, v)))
self.gen.newline()
def collect_virtual_constraints(self):
"""Define versions for constraints on virtuals.
Must be called before define_version_constraints().
"""
# aggregate constraints into per-virtual sets
constraint_map = collections.defaultdict(lambda: set())
for pkg_name, versions in self.version_constraints:
if not spack.repo.PATH.is_virtual(pkg_name):
continue
constraint_map[pkg_name].add(versions)
# extract all the real versions mentioned in version ranges
def versions_for(v):
if isinstance(v, vn.StandardVersion):
return [v]
elif isinstance(v, vn.ClosedOpenRange):
return [v.lo, vn._prev_version(v.hi)]
elif isinstance(v, vn.VersionList):
return sum((versions_for(e) for e in v), [])
else:
raise TypeError(f"expected version type, found: {type(v)}")
# define a set of synthetic possible versions for virtuals, so
# that `version_satisfies(Package, Constraint, Version)` has the
# same semantics for virtuals as for regular packages.
for pkg_name, versions in sorted(constraint_map.items()):
possible_versions = set(sum([versions_for(v) for v in versions], []))
for version in sorted(possible_versions):
self.possible_versions[pkg_name][version].append(Provenance.VIRTUAL_CONSTRAINT)
def define_compiler_version_constraints(self):
for constraint in sorted(self.compiler_version_constraints):
for compiler_id, compiler in enumerate(self.possible_compilers):
if compiler.spec.satisfies(constraint):
self.gen.fact(
fn.compiler_version_satisfies(
constraint.name, constraint.versions, compiler_id
)
)
self.gen.newline()
def define_target_constraints(self):
def _all_targets_satisfiying(single_constraint):
allowed_targets = []
if ":" not in single_constraint:
return [single_constraint]
t_min, _, t_max = single_constraint.partition(":")
for test_target in spack.vendor.archspec.cpu.TARGETS.values():
# Check lower bound
if t_min and not t_min <= test_target:
continue
# Check upper bound
if t_max and not t_max >= test_target:
continue
allowed_targets.append(test_target)
return allowed_targets
cache = {}
for target_constraint in sorted(self.target_constraints, key=lambda x: x.name):
# Construct the list of allowed targets for this constraint
allowed_targets = []
for single_constraint in str(target_constraint).split(","):
if single_constraint not in cache:
cache[single_constraint] = _all_targets_satisfiying(single_constraint)
allowed_targets.extend(cache[single_constraint])
for target in allowed_targets:
self.gen.fact(fn.target_satisfies(target_constraint, target))
self.gen.newline()
def define_variant_values(self):
"""Validate variant values from the command line.
Add valid variant values from the command line to the possible values for
variant definitions.
"""
# for determinism, sort by variant ids, not variant def ids (which are object ids)
def_info = []
for pkg_name, variant_def_id, value in sorted(self.variant_values_from_specs):
try:
vid = self.variant_ids_by_def_id[variant_def_id]
except KeyError:
tty.debug(
f"[{__name__}] cannot retrieve id of the {value} variant from {pkg_name}"
)
continue
def_info.append((pkg_name, vid, value))
# Tell the concretizer about possible values from specs seen in spec_clauses().
for pkg_name, vid, value in sorted(def_info):
self.gen.fact(fn.pkg_fact(pkg_name, fn.variant_possible_value(vid, value)))
def register_concrete_spec(self, spec, possible):
# tell the solver about any installed packages that could
# be dependencies (don't tell it about the others)
if spec.name not in possible:
return
try:
# Only consider installed packages for repo we know
spack.repo.PATH.get(spec)
except (spack.repo.UnknownNamespaceError, spack.repo.UnknownPackageError) as e:
tty.debug(f"[REUSE] Issues when trying to reuse {spec.short_spec}: {str(e)}")
return
self.reusable_and_possible.add(spec)
def concrete_specs(self):
"""Emit facts for reusable specs"""
for h, spec in self.reusable_and_possible.explicit_items():
# this indicates that there is a spec like this installed
self.gen.fact(fn.installed_hash(spec.name, h))
# indirection layer between hash constraints and imposition to allow for splicing
for pred in self.spec_clauses(spec, body=True, required_from=None):
self.gen.fact(fn.hash_attr(h, *pred.args))
self.gen.newline()
# Declare as possible parts of specs that are not in package.py
# - Add versions to possible versions
# - Add OS to possible OS's
for dep in spec.traverse():
provenance = Provenance.INSTALLED
if isinstance(dep.version, vn.GitVersion):
provenance = Provenance.INSTALLED_GIT_VERSION
self.possible_versions[dep.name][dep.version].append(provenance)
self.possible_oses.add(dep.os)
def define_concrete_input_specs(self, specs, possible):
# any concrete specs in the input spec list
for input_spec in specs:
for spec in input_spec.traverse():
if spec.concrete:
self.register_concrete_spec(spec, possible)
def setup(
self,
specs: Sequence[spack.spec.Spec],
*,
reuse: Optional[List[spack.spec.Spec]] = None,
allow_deprecated: bool = False,
_use_unsat_cores: bool = True,
) -> "ProblemInstanceBuilder":
"""Generate an ASP program with relevant constraints for specs.
This calls methods on the solve driver to set up the problem with
facts and rules from all possible dependencies of the input
specs, as well as constraints from the specs themselves.
Arguments:
specs: list of Specs to solve
reuse: list of concrete specs that can be reused
allow_deprecated: if True adds deprecated versions into the solve
_use_unsat_cores: if True, use unsat cores for internal errors
Return:
A ProblemInstanceBuilder populated with facts and rules for an ASP solve.
"""
reuse = reuse or []
check_packages_exist(specs)
self.gen = ProblemInstanceBuilder()
# Get compilers from buildcaches only if injected through "reuse" specs
supported_compilers = spack.compilers.config.supported_compilers()
compilers_from_reuse = {
x for x in reuse if x.name in supported_compilers and not x.external
}
candidate_compilers, self.rejected_compilers = possible_compilers(
configuration=spack.config.CONFIG
)
for x in candidate_compilers:
if x.external or x in reuse:
continue
reuse.append(x)
for dep in x.traverse(root=False, deptype="run"):
reuse.extend(dep.traverse(deptype=("link", "run")))
candidate_compilers.update(compilers_from_reuse)
self.possible_compilers = list(candidate_compilers)
# TODO: warning is because mypy doesn't know Spec supports rich comparison via decorator
self.possible_compilers.sort() # type: ignore[call-arg,call-overload]
self.compiler_mixing()
self.gen.h1("Runtimes")
injected_dependencies = self.define_runtime_constraints()
node_counter = create_counter(
list(specs) + injected_dependencies,
tests=self.tests,
possible_graph=self.possible_graph,
)
self.possible_virtuals = node_counter.possible_virtuals()
self.pkgs = node_counter.possible_dependencies()
self.libcs = sorted(all_libcs()) # type: ignore[type-var]
for node in traverse.traverse_nodes(specs):
if node.namespace is not None:
self.explicitly_required_namespaces[node.name] = node.namespace
self.requirement_parser.parse_rules_from_input_specs(specs)
self.gen.h1("Generic information")
if using_libc_compatibility():
for libc in self.libcs:
self.gen.fact(fn.host_libc(libc.name, libc.version))
if not allow_deprecated:
self.gen.fact(fn.deprecated_versions_not_allowed())
self.gen.newline()
for pkg_name in spack.compilers.config.supported_compilers():
self.gen.fact(fn.compiler_package(pkg_name))
# Calculate develop specs
# they will be used in addition to command line specs
# in determining known versions/targets/os
dev_specs: Tuple[spack.spec.Spec, ...] = ()
env = ev.active_environment()
if env:
dev_specs = tuple(
spack.spec.Spec(info["spec"]).constrained(
'dev_path="%s"'
% spack.util.path.canonicalize_path(info["path"], default_wd=env.path)
)
for name, info in env.dev_specs.items()
)
specs = tuple(specs) # ensure compatible types to add
self.gen.h1("Reusable concrete specs")
self.define_concrete_input_specs(specs, self.pkgs)
if reuse:
self.gen.fact(fn.optimize_for_reuse())
for reusable_spec in reuse:
self.register_concrete_spec(reusable_spec, self.pkgs)
self.concrete_specs()
self.gen.h1("Generic statements on possible packages")
node_counter.possible_packages_facts(self.gen, fn)
self.gen.h1("Possible flags on nodes")
for flag in spack.spec.FlagMap.valid_compiler_flags():
self.gen.fact(fn.flag_type(flag))
self.gen.newline()
self.gen.h1("General Constraints")
self.config_compatible_os()
# architecture defaults
self.platform_defaults()
self.os_defaults(specs + dev_specs)
self.target_defaults(specs + dev_specs)
self.virtual_requirements_and_weights()
self.external_packages()
# TODO: make a config option for this undocumented feature
checksummed = "SPACK_CONCRETIZER_REQUIRE_CHECKSUM" in os.environ
self.define_package_versions_and_validate_preferences(
self.pkgs, allow_deprecated=allow_deprecated, require_checksum=checksummed
)
self.define_ad_hoc_versions_from_specs(
specs, Provenance.SPEC, allow_deprecated=allow_deprecated, require_checksum=checksummed
)
self.define_ad_hoc_versions_from_specs(
dev_specs,
Provenance.DEV_SPEC,
allow_deprecated=allow_deprecated,
require_checksum=checksummed,
)
self.validate_and_define_versions_from_requirements(
allow_deprecated=allow_deprecated, require_checksum=checksummed
)
self.gen.h1("Package Constraints")
for pkg in sorted(self.pkgs):
self.gen.h2(f"Package rules: {pkg}")
self.pkg_rules(pkg, tests=self.tests)
self.preferred_variants(pkg)
self.gen.h1("Special variants")
self.define_auto_variant("dev_path", multi=False)
self.define_auto_variant("commit", multi=False)
self.define_auto_variant("patches", multi=True)
self.gen.h1("Develop specs")
# Inject dev_path from environment
for ds in dev_specs:
self.condition(spack.spec.Spec(ds.name), ds, msg=f"{ds.name} is a develop spec")
self.trigger_rules()
self.effect_rules()
self.gen.h1("Spec Constraints")
self.literal_specs(specs)
self.gen.h1("Variant Values defined in specs")
self.define_variant_values()
self.gen.h1("Version Constraints")
self.collect_virtual_constraints()
self.define_version_constraints()
self.gen.h1("Compiler Version Constraints")
self.define_compiler_version_constraints()
self.gen.h1("Target Constraints")
self.define_target_constraints()
self.gen.h1("Internal errors")
self.internal_errors(_use_unsat_cores=_use_unsat_cores)
return self.gen
def compiler_mixing(self):
should_mix = spack.config.get("concretizer:compiler_mixing", True)
if should_mix is True:
return
# anything besides should_mix: true
for lang in ["c", "cxx", "fortran"]:
self.gen.fact(fn.no_compiler_mixing(lang))
# user specified an allow-list
if isinstance(should_mix, list):
for pkg_name in should_mix:
self.gen.fact(fn.allow_mixing(pkg_name))
def internal_errors(self, *, _use_unsat_cores: bool):
parent_dir = os.path.dirname(__file__)
def visit(node):
if ast_type(node) == clingo().ast.ASTType.Rule:
for term in node.body:
if ast_type(term) == clingo().ast.ASTType.Literal:
if ast_type(term.atom) == clingo().ast.ASTType.SymbolicAtom:
name = ast_sym(term.atom).name
if name == "internal_error":
arg = ast_sym(ast_sym(term.atom).arguments[0])
symbol = AspFunction(name)(arg.string)
if _use_unsat_cores:
self.assumptions.append((parse_term(str(symbol)), True))
self.gen.asp_problem.append(f"{{{symbol}}}.")
else:
self.gen.asp_problem.append(f"{symbol}.")
path = os.path.join(parent_dir, "concretize.lp")
parse_files([path], visit)
def define_runtime_constraints(self) -> List[spack.spec.Spec]:
"""Define the constraints to be imposed on the runtimes, and returns a list of
injected packages.
"""
recorder = RuntimePropertyRecorder(self)
for compiler in self.possible_compilers:
try:
compiler_cls = spack.repo.PATH.get_pkg_class(compiler.name)
except spack.repo.UnknownPackageError:
pass
else:
if hasattr(compiler_cls, "runtime_constraints"):
compiler_cls.runtime_constraints(spec=compiler, pkg=recorder)
# Inject default flags for compilers
recorder("*").default_flags(compiler)
# Add a dependency on the compiler wrapper
compiler_str = f"{compiler.name} /{compiler.dag_hash()}"
for language in ("c", "cxx", "fortran"):
# Using compiler.name causes a bit of duplication, but that is taken care of by
# clingo during grounding.
recorder("*").depends_on(
"compiler-wrapper",
when=f"%[deptypes=build virtuals={language}] {compiler.name}",
type="build",
description=f"Add compiler wrapper when using {compiler.name} for {language}",
)
if not using_libc_compatibility():
continue
current_libc = None
if compiler.external or compiler.installed:
current_libc = CompilerPropertyDetector(compiler).default_libc()
else:
try:
current_libc = compiler["libc"]
except (KeyError, RuntimeError) as e:
tty.debug(f"{compiler} cannot determine libc because: {e}")
if current_libc:
recorder("*").depends_on(
"libc",
when=f"%[deptypes=build] {compiler.name}",
type="link",
description=f"Add libc when using {compiler.name}",
)
recorder("*").depends_on(
f"{current_libc.name}@={current_libc.version}",
when=f"%[deptypes=build] {compiler_str}",
type="link",
description=f"Libc is {current_libc} when using {compiler}",
)
recorder.consume_facts()
return sorted(recorder.injected_dependencies)
def literal_specs(self, specs):
for spec in sorted(specs):
self.gen.h2(f"Spec: {str(spec)}")
condition_id = next(self._id_counter)
trigger_id = next(self._id_counter)
# Special condition triggered by "literal_solved"
self.gen.fact(fn.literal(trigger_id))
self.gen.fact(fn.pkg_fact(spec.name, fn.condition_trigger(condition_id, trigger_id)))
self.gen.fact(fn.condition_reason(condition_id, f"{spec} requested explicitly"))
imposed_spec_key = str(spec), None
cache = self._effect_cache[spec.name]
if imposed_spec_key in cache:
effect_id, requirements = cache[imposed_spec_key]
else:
effect_id = next(self._id_counter)
context = SourceContext()
context.source = "literal"
requirements = self.spec_clauses(spec, context=context)
root_name = spec.name
for clause in requirements:
clause_name = clause.args[0]
if clause_name == "variant_set":
requirements.append(
fn.attr("variant_default_value_from_cli", *clause.args[1:])
)
elif clause_name in ("node", "virtual_node", "hash"):
# These facts are needed to compute the "condition_set" of the root
pkg_name = clause.args[1]
self.gen.fact(fn.mentioned_in_literal(trigger_id, root_name, pkg_name))
requirements.append(
fn.attr(
"virtual_root" if spack.repo.PATH.is_virtual(spec.name) else "root", spec.name
)
)
requirements = [x for x in requirements if x.args[0] != "depends_on"]
cache[imposed_spec_key] = (effect_id, requirements)
self.gen.fact(fn.pkg_fact(spec.name, fn.condition_effect(condition_id, effect_id)))
# Create subcondition with any conditional dependencies
# self.spec_clauses does not do anything with conditional
# dependencies
self.generate_conditional_dep_conditions(spec, condition_id)
if self.concretize_everything:
self.gen.fact(fn.solve_literal(trigger_id))
# Trigger rules are needed to allow conditional specs
self.trigger_rules()
self.effect_rules()
def generate_conditional_dep_conditions(self, spec: spack.spec.Spec, condition_id: int):
"""Generate a subcondition in the trigger for any conditional dependencies.
Dependencies are always modeled by a condition. For conditional dependencies,
the when-spec is added as a subcondition of the trigger to ensure the dependency
is only activated when the subcondition holds.
"""
for dspec in spec.traverse_edges():
# Ignore unconditional deps
if dspec.when == EMPTY_SPEC:
continue
# Cannot use "virtual_node" attr as key for condition
# because reused specs do not track virtual nodes.
# Instead, track whether the parent uses the virtual
def virtual_handler(input_spec, requirements):
ret = remove_facts("virtual_node")(input_spec, requirements)
for edge in input_spec.traverse_edges(root=False, cover="edges"):
if spack.repo.PATH.is_virtual(edge.spec.name):
ret.append(fn.attr("uses_virtual", edge.parent.name, edge.spec.name))
return ret
context = ConditionContext()
context.source = ConstraintOrigin.append_type_suffix(
dspec.parent.name, ConstraintOrigin.CONDITIONAL_SPEC
)
# Default is to remove node-like attrs, override here
context.transform_required = virtual_handler
context.transform_imposed = lambda x, y: y
try:
subcondition_id = self.condition(
dspec.when,
spack.spec.Spec(dspec.format(unconditional=True)),
required_name=dspec.parent.name,
context=context,
msg=f"Conditional dependency in ^[when={dspec.when}]{dspec.spec}",
)
self.gen.fact(fn.subcondition(subcondition_id, condition_id))
except vt.UnknownVariantError as e:
# A variant in the 'when=' condition can't apply to the parent of the edge
tty.debug(f"[{__name__}] cannot emit subcondition for {dspec.format()}: {e}")
def validate_and_define_versions_from_requirements(
self, *, allow_deprecated: bool, require_checksum: bool
):
"""If package requirements mention concrete versions that are not mentioned
elsewhere, then we need to collect those to mark them as possible
versions. If they are abstract and statically have no match, then we
need to throw an error. This function assumes all possible versions are already
registered in self.possible_versions."""
for pkg_name, d in spack.config.CONFIG.get_config("packages").items():
if pkg_name == "all" or "require" not in d:
continue
for s in traverse.traverse_nodes(self._specs_from_requires(pkg_name, d["require"])):
name, versions = s.name, s.versions
if name not in self.pkgs or versions == spack.version.any_version:
continue
s.attach_git_version_lookup()
v = versions.concrete
if not v:
# If the version is not concrete, check it's statically concretizable. If
# not throw an error, which is just so that users know they need to change
# their config, instead of getting a hard to decipher concretization error.
if not any(x for x in self.possible_versions[name] if x.satisfies(versions)):
raise spack.error.ConfigError(
f"Version requirement {versions} on {pkg_name} for {name} "
f"cannot match any known version from package.py or externals"
)
continue
if v in self.possible_versions[name]:
continue
if not allow_deprecated and v in self.deprecated_versions[name]:
continue
# If concrete an not yet defined, conditionally define it, like we do for specs
# from the command line.
if not require_checksum or _is_checksummed_git_version(v):
self.possible_versions[name][v].append(Provenance.PACKAGE_REQUIREMENT)
def _specs_from_requires(self, pkg_name, section):
"""Collect specs from a requirement rule"""
if isinstance(section, str):
yield _spec_with_default_name(section, pkg_name)
return
for spec_group in section:
if isinstance(spec_group, str):
yield _spec_with_default_name(spec_group, pkg_name)
continue
# Otherwise it is an object. The object can contain a single
# "spec" constraint, or a list of them with "any_of" or
# "one_of" policy.
if "spec" in spec_group:
yield _spec_with_default_name(spec_group["spec"], pkg_name)
continue
key = "one_of" if "one_of" in spec_group else "any_of"
for s in spec_group[key]:
yield _spec_with_default_name(s, pkg_name)
def pkg_class(self, pkg_name: str) -> Type[spack.package_base.PackageBase]:
request = pkg_name
if pkg_name in self.explicitly_required_namespaces:
namespace = self.explicitly_required_namespaces[pkg_name]
request = f"{namespace}.{pkg_name}"
return spack.repo.PATH.get_pkg_class(request)
| SpackSolverSetup |
python | getsentry__sentry | tests/sentry/workflow_engine/handlers/condition/test_level_handler.py | {
"start": 419,
"end": 5648
} | class ____(ConditionTestCase):
condition = Condition.LEVEL
payload = {
"id": LevelCondition.id,
"match": MatchType.EQUAL,
"level": "20",
}
def setup_group_event_and_job(self) -> None:
self.group_event = self.event.for_group(self.group)
self.event_data = WorkflowEventData(event=self.group_event, group=self.group)
def setUp(self) -> None:
super().setUp()
self.event = self.store_event(data={"level": "info"}, project_id=self.project.id)
self.group = self.create_group(project=self.project)
self.setup_group_event_and_job()
self.dc = self.create_data_condition(
type=self.condition,
comparison={"match": MatchType.EQUAL, "level": 20},
condition_result=True,
)
def test_dual_write(self) -> None:
dcg = self.create_data_condition_group()
dc = self.translate_to_data_condition(self.payload, dcg)
assert dc.type == self.condition
assert dc.comparison == {
"match": MatchType.EQUAL,
"level": 20,
}
assert dc.condition_result is True
assert dc.condition_group == dcg
def test_dual_write_filter(self) -> None:
self.payload["id"] = LevelFilter.id
dcg = self.create_data_condition_group()
dc = self.translate_to_data_condition(self.payload, dcg)
assert dc.type == self.condition
assert dc.comparison == {
"match": MatchType.EQUAL,
"level": 20,
}
assert dc.condition_result is True
assert dc.condition_group == dcg
def test_json_schema(self) -> None:
self.dc.comparison.update({"match": MatchType.EQUAL, "level": 30})
self.dc.save()
self.dc.comparison.update({"hi": "bye"})
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison.update({"match": MatchType.EQUAL, "level": -1})
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison.update({"match": "invalid_match", "level": 30})
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison.update({"match": MatchType.EQUAL, "level": "invalid_level"})
with pytest.raises(ValidationError):
self.dc.save()
self.dc.comparison.update({"match": MatchType.EQUAL, "level": 30, "hello": "world"})
with pytest.raises(ValidationError):
self.dc.save()
def test_equals(self) -> None:
self.dc.comparison.update({"match": MatchType.EQUAL, "level": 20})
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update({"match": MatchType.EQUAL, "level": 30})
self.assert_does_not_pass(self.dc, self.event_data)
def test_greater_than(self) -> None:
self.dc.comparison.update({"match": MatchType.GREATER_OR_EQUAL, "level": 40})
self.assert_does_not_pass(self.dc, self.event_data)
self.dc.comparison.update({"match": MatchType.GREATER_OR_EQUAL, "level": 20})
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update({"match": MatchType.GREATER_OR_EQUAL, "level": 10})
self.assert_passes(self.dc, self.event_data)
def test_less_than(self) -> None:
self.dc.comparison.update({"match": MatchType.LESS_OR_EQUAL, "level": 40})
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update({"match": MatchType.LESS_OR_EQUAL, "level": 20})
self.assert_passes(self.dc, self.event_data)
self.dc.comparison.update({"match": MatchType.LESS_OR_EQUAL, "level": 10})
self.assert_does_not_pass(self.dc, self.event_data)
def test_without_tag(self) -> None:
self.event = self.store_event(data={}, project_id=self.project.id)
self.setup_group_event_and_job()
self.dc.comparison.update({"match": MatchType.EQUAL, "level": 20})
self.assert_does_not_pass(self.dc, self.event_data)
# This simulates the following case:
# - Rule is setup to accept >= error
# - error event finishes the save_event task, group has a level of error
# - warning event finishes the save event, group now has a level of warning
# - error event starts post_process_group should pass even though the group
# has a warning level set
#
# Specifically here to make sure the check is properly checking the event's level
def test_differing_levels(self) -> None:
eevent = self.store_event(data={"level": "error"}, project_id=self.project.id)
wevent = self.store_event(data={"level": "warning"}, project_id=self.project.id)
assert wevent.event_id != eevent.event_id
assert eevent.group is not None
assert wevent.group is not None
assert wevent.group.id == eevent.group.id
self.dc.comparison.update({"match": MatchType.GREATER_OR_EQUAL, "level": 40})
self.event = wevent
self.setup_group_event_and_job()
self.assert_does_not_pass(self.dc, self.event_data)
self.event = eevent
self.setup_group_event_and_job()
self.assert_passes(self.dc, self.event_data)
| TestLevelCondition |
python | ZoranPandovski__al-go-rithms | others/Blockchain/blockchain.py | {
"start": 40,
"end": 361
} | class ____:
def __init__ (self, timestamp, data, previousHash = ' '):
self.timestamp = timestamp
self.data = data
self.previousHash = previousHash
self.hash = self.calculateHash()
def calculateHash(self):
return sha256((str(self.timestamp) + str(self.data) + str(self.previousHash)).encode()).hexdigest()
| block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.