language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | encode__django-rest-framework | tests/test_generics.py | {
"start": 1455,
"end": 1618
} | class ____(generics.RetrieveUpdateDestroyAPIView):
queryset = BasicModel.objects.exclude(text='filtered out')
serializer_class = BasicSerializer
| InstanceView |
python | Pylons__pyramid | tests/test_integration.py | {
"start": 7302,
"end": 7413
} | class ____(StaticAppBase, unittest.TestCase):
package = 'tests.pkgs.static_abspath'
| TestStaticAppUsingAbsPath |
python | wandb__wandb | tests/unit_tests/test_launch/test_builder/test_kaniko.py | {
"start": 736,
"end": 18812
} | class ____(MagicMock):
"""Mock for async functions."""
async def __call__(self, *args, **kwargs):
return super().__call__(*args, **kwargs)
@pytest.fixture
def azure_environment(mocker):
"""Fixture for AzureEnvironment class."""
mocker.patch(
"wandb.sdk.launch.environment.azure_environment.DefaultAzureCredential",
MagicMock(),
)
config = {
"environment": {
"type": "azure",
}
}
return AzureEnvironment.from_config(config)
@pytest.fixture
def aws_environment(mocker):
"""Fixture for AwsEnvironment class."""
mocker.patch("wandb.sdk.launch.environment.aws_environment.boto3", MagicMock())
config = {
"type": "aws",
"region": "us-east-1",
}
return AwsEnvironment.from_config(config)
@pytest.fixture
def azure_container_registry(mocker, azure_environment):
"""Fixture for AzureContainerRegistry class."""
mocker.patch(
"wandb.sdk.launch.environment.azure_environment.DefaultAzureCredential",
MagicMock(),
)
config = {
"uri": "https://registry.azurecr.io/test-repo",
}
return AzureContainerRegistry.from_config(config)
@pytest.fixture
def elastic_container_registry(mocker):
"""Fixture for ElasticContainerRegistry class."""
config = {
"uri": "12345678.dkr.ecr.us-east-1.amazonaws.com/test-repo",
}
return ElasticContainerRegistry.from_config(config)
@pytest.mark.asyncio
async def test_kaniko_azure(azure_container_registry, azure_environment):
"""Test that the kaniko builder correctly constructs the job spec for Azure."""
builder = KanikoBuilder(
environment=azure_environment,
registry=azure_container_registry,
build_job_name="test",
build_context_store="https://account.blob.core.windows.net/container/blob",
)
core_client = MagicMock()
core_client.read_namespaced_secret = AsyncMock(return_value=None)
api_client = MagicMock()
job = await builder._create_kaniko_job(
"test-job",
"https://registry.azurecr.io/test-repo",
"12345678",
"https://account.blob.core.windows.net/container/blob",
core_client,
api_client,
)
# Check that the AZURE_STORAGE_ACCESS_KEY env var is set correctly.
assert any(
env_var["name"] == "AZURE_STORAGE_ACCESS_KEY"
for env_var in job["spec"]["template"]["spec"]["containers"][0]["env"]
)
# Check the dockerconfig is mounted and the correct secret + value are used.
assert any(
volume["name"] == "docker-config"
for volume in job["spec"]["template"]["spec"]["volumes"]
)
assert any(
volume_mount["name"] == "docker-config"
for volume_mount in job["spec"]["template"]["spec"]["containers"][0][
"volumeMounts"
]
)
def return_kwargs(**kwargs):
return kwargs
@pytest.fixture
def mock_kubernetes_clients(monkeypatch):
mock_config_map = MagicMock()
mock_config_map.metadata = MagicMock()
mock_config_map.metadata.name = "test-config-map"
monkeypatch.setattr(kubernetes_asyncio.client, "V1ConfigMap", mock_config_map)
mock_batch_client = MagicMock(name="batch-client")
mock_batch_client.read_name_spaced_job_log = AsyncMock(return_value=MagicMock())
mock_batch_client.create_namespaced_job = AsyncMock(return_value=MagicMock())
mock_batch_client.delete_namespaced_job = AsyncMock(return_value=MagicMock())
mock_core_client = MagicMock(name="core-client")
mock_core_client.create_namespaced_config_map = AsyncMock(return_value=None)
mock_core_client.delete_namespaced_config_map = AsyncMock(return_value=None)
mock_job = MagicMock(name="mock_job")
mock_job_status = MagicMock()
mock_job.status = mock_job_status
# test success is true
mock_job_status.succeeded = 1
mock_batch_client.read_namespaced_job_status = AsyncMock(return_value=mock_job)
monkeypatch.setattr(
kubernetes_asyncio.client,
"BatchV1Api",
MagicMock(return_value=mock_batch_client),
)
monkeypatch.setattr(
kubernetes_asyncio.client, "CoreV1Api", MagicMock(return_value=mock_core_client)
)
monkeypatch.setattr(kubernetes_asyncio.client, "V1PodSpec", return_kwargs)
monkeypatch.setattr(kubernetes_asyncio.client, "V1Volume", return_kwargs)
monkeypatch.setattr(kubernetes_asyncio.client, "V1JobSpec", return_kwargs)
monkeypatch.setattr(kubernetes_asyncio.client, "V1Job", return_kwargs)
monkeypatch.setattr(kubernetes_asyncio.client, "V1PodTemplateSpec", return_kwargs)
monkeypatch.setattr(kubernetes_asyncio.client, "V1Container", return_kwargs)
monkeypatch.setattr(kubernetes_asyncio.client, "V1VolumeMount", return_kwargs)
monkeypatch.setattr(
kubernetes_asyncio.client, "V1SecretVolumeSource", return_kwargs
)
monkeypatch.setattr(
kubernetes_asyncio.client, "V1ConfigMapVolumeSource", return_kwargs
)
monkeypatch.setattr(kubernetes_asyncio.client, "V1ObjectMeta", return_kwargs)
monkeypatch.setattr(
kubernetes_asyncio.config, "load_incluster_config", return_kwargs
)
yield mock_core_client, mock_batch_client
@pytest.fixture
def mock_v1_object_meta(monkeypatch):
monkeypatch.setattr(kubernetes_asyncio.client, "V1ObjectMeta", return_kwargs)
yield return_kwargs
@pytest.fixture
def mock_v1_config_map(monkeypatch):
monkeypatch.setattr(kubernetes_asyncio.client, "V1ConfigMap", return_kwargs)
yield return_kwargs
@pytest.fixture
def mock_boto3(monkeypatch):
monkeypatch.setattr(boto3, "client", MagicMock())
@pytest.fixture
def mock_storage_client(monkeypatch):
monkeypatch.setattr(storage, "Client", MagicMock())
@pytest.mark.asyncio
async def test_wait_for_completion():
mock_api_client = MagicMock()
mock_job = MagicMock()
mock_job_status = MagicMock()
mock_job.status = mock_job_status
# test success is true
mock_job_status.succeeded = 1
mock_api_client.read_namespaced_job_status = AsyncMock(return_value=mock_job)
assert await _wait_for_completion(mock_api_client, "test", 60)
# test failed is false
mock_job_status.succeeded = None
mock_job_status.failed = 1
assert await _wait_for_completion(mock_api_client, "test", 60) is False
# test timeout is false
mock_job_status.failed = None
assert await _wait_for_completion(mock_api_client, "test", 5) is False
@pytest.mark.asyncio
async def test_create_kaniko_job_static(
mock_kubernetes_clients, elastic_container_registry, runner
):
with runner.isolated_filesystem():
os.makedirs("./test/context/path/", exist_ok=True)
with open("./test/context/path/Dockerfile.wandb", "wb") as f:
f.write(b"docker file test contents")
builder = KanikoBuilder(
MagicMock(),
elastic_container_registry,
build_context_store="s3://test-bucket/test-prefix",
secret_name="test-secret",
secret_key="test-key",
config={
"spec": {
"template": {
"spec": {
"containers": [
{
"args": ["--test-arg=test-value"],
"volumeMounts": [
{
"name": "test-volume",
"mountPath": "/test/path/",
}
],
}
],
"volumes": [{"name": "test-volume"}],
}
}
}
},
)
job_name = "test_job_name"
repo_url = "repository-url"
image_tag = "image_tag:12345678"
context_path = "./test/context/path/"
job = await builder._create_kaniko_job(
job_name,
repo_url,
image_tag,
context_path,
kubernetes_asyncio.client.CoreV1Api(),
MagicMock(),
)
assert job["metadata"]["name"] == "test_job_name"
assert job["metadata"]["namespace"] == "wandb"
assert job["metadata"]["labels"] == {"wandb": "launch"}
assert (
job["spec"]["template"]["spec"]["containers"][0]["image"]
== "gcr.io/kaniko-project/executor:v1.11.0"
)
assert job["spec"]["template"]["spec"]["containers"][0]["args"] == [
f"--context={context_path}",
"--dockerfile=Dockerfile.wandb",
f"--destination={image_tag}",
"--cache=true",
f"--cache-repo={repo_url}",
"--snapshot-mode=redo",
"--compressed-caching=false",
"--test-arg=test-value",
]
assert job["spec"]["template"]["spec"]["containers"][0]["volumeMounts"] == [
{
"name": "test-volume",
"mountPath": "/test/path/",
},
{
"name": "docker-config",
"mountPath": "/kaniko/.docker",
},
{
"name": "test-secret",
"mountPath": "/root/.aws",
"readOnly": True,
},
]
assert job["spec"]["template"]["spec"]["volumes"][0] == {"name": "test-volume"}
assert job["spec"]["template"]["spec"]["volumes"][1] == {
"name": "docker-config",
"configMap": {"name": "docker-config-test_job_name"},
}
assert job["spec"]["template"]["spec"]["volumes"][2]["name"] == "test-secret"
assert (
job["spec"]["template"]["spec"]["volumes"][2]["secret"]["secretName"]
== "test-secret"
)
assert (
job["spec"]["template"]["spec"]["volumes"][2]["secret"]["items"][0]["key"]
== "test-key"
)
assert (
job["spec"]["template"]["spec"]["volumes"][2]["secret"]["items"][0]["path"]
== "credentials"
)
assert (
"mode"
not in job["spec"]["template"]["spec"]["volumes"][2]["secret"]["items"][0]
)
@pytest.mark.asyncio
async def test_create_kaniko_job_instance(
elastic_container_registry, mock_kubernetes_clients, runner
):
with runner.isolated_filesystem():
os.makedirs("./test/context/path/", exist_ok=True)
with open("./test/context/path/Dockerfile.wandb", "wb") as f:
f.write(b"docker file test contents")
builder = KanikoBuilder(
MagicMock(),
elastic_container_registry,
build_context_store="s3://test-bucket/test-prefix",
)
job_name = "test_job_name"
repo_url = "12345678.dkr.ecr.us-east-1.amazonaws.com/test-repo"
image_tag = "image_tag:12345678"
context_path = "./test/context/path/"
job = await builder._create_kaniko_job(
job_name, repo_url, image_tag, context_path, MagicMock(), MagicMock()
)
assert job["metadata"]["name"] == "test_job_name"
assert job["metadata"]["namespace"] == "wandb"
assert job["metadata"]["labels"] == {"wandb": "launch"}
assert (
job["spec"]["template"]["spec"]["containers"][0]["image"]
== "gcr.io/kaniko-project/executor:v1.11.0"
)
assert job["spec"]["template"]["spec"]["containers"][0]["args"] == [
f"--context={context_path}",
"--dockerfile=Dockerfile.wandb",
f"--destination={image_tag}",
"--cache=true",
f"--cache-repo={repo_url}",
"--snapshot-mode=redo",
"--compressed-caching=false",
]
assert job["spec"]["template"]["spec"]["containers"][0]["volumeMounts"] == []
assert job["spec"]["template"]["spec"]["volumes"] == []
@pytest.mark.asyncio
async def test_create_kaniko_job_pvc_dockerconfig(
mock_kubernetes_clients, runner, mocker
):
"""Test that the kaniko builder mounts pvc and dockerconfig correctly."""
mocker.patch("wandb.sdk.launch.builder.kaniko_builder.PVC_NAME", "test-pvc")
mocker.patch(
"wandb.sdk.launch.builder.kaniko_builder.PVC_MOUNT_PATH", "/mnt/test-pvc"
)
mocker.patch(
"wandb.sdk.launch.builder.kaniko_builder.DOCKER_CONFIG_SECRET", "test-secret"
)
with runner.isolated_filesystem():
os.makedirs("./test/context/path/", exist_ok=True)
with open("./test/context/path/Dockerfile.wandb", "wb") as f:
f.write(b"docker file test contents")
job_name = "test_job_name"
repo_url = "myspace.com/test-repo"
image_tag = "12345678"
context_path = "./test/context/path/"
builder = KanikoBuilder(
MagicMock(),
AnonynmousRegistry(repo_url),
)
job = await builder._create_kaniko_job(
job_name, repo_url, image_tag, context_path, MagicMock(), MagicMock()
)
assert job["metadata"]["name"] == "test_job_name"
assert job["metadata"]["namespace"] == "wandb"
assert job["metadata"]["labels"] == {"wandb": "launch"}
assert (
job["spec"]["template"]["spec"]["containers"][0]["image"]
== "gcr.io/kaniko-project/executor:v1.11.0"
)
assert job["spec"]["template"]["spec"]["containers"][0]["args"] == [
f"--context={context_path}",
"--dockerfile=Dockerfile.wandb",
f"--destination={image_tag}",
"--cache=true",
f"--cache-repo={repo_url}",
"--snapshot-mode=redo",
"--compressed-caching=false",
]
assert job["spec"]["template"]["spec"]["containers"][0]["volumeMounts"] == [
{
"name": "kaniko-pvc",
"mountPath": "/context",
},
{
"name": "kaniko-docker-config",
"mountPath": "/kaniko/.docker",
},
]
pvc_volume = job["spec"]["template"]["spec"]["volumes"][0]
dockerconfig_volume = job["spec"]["template"]["spec"]["volumes"][1]
assert pvc_volume["name"] == "kaniko-pvc"
assert pvc_volume["persistentVolumeClaim"]["claimName"] == "test-pvc"
assert "readOnly" not in pvc_volume["persistentVolumeClaim"]
assert dockerconfig_volume["name"] == "kaniko-docker-config"
assert dockerconfig_volume["secret"]["secretName"] == "test-secret"
assert dockerconfig_volume["secret"]["items"][0]["key"] == ".dockerconfigjson"
assert dockerconfig_volume["secret"]["items"][0]["path"] == "config.json"
@pytest.mark.asyncio
async def test_build_image_success(
monkeypatch,
mock_kubernetes_clients,
aws_environment,
elastic_container_registry,
runner,
mock_boto3,
test_settings,
capsys,
tmp_path,
):
api = wandb.sdk.internal.internal_api.Api(
default_settings=test_settings(), load_settings=False
)
monkeypatch.setattr(
wandb.sdk.launch._project_spec.LaunchProject, "build_required", lambda x: True
)
with runner.isolated_filesystem():
os.makedirs("./test/context/path/", exist_ok=True)
with open("./test/context/path/Dockerfile.wandb", "wb") as f:
f.write(b"docker file test contents")
mock_job = MagicMock(name="mock_job")
mock_job.status.succeeded = 1
builder = KanikoBuilder(
aws_environment,
elastic_container_registry,
build_context_store="s3://test-bucket/test-prefix",
)
job_name = "mock_server_entity/test/job-artifact"
job_version = 0
kwargs = {
"uri": None,
"job": f"{job_name}:v{job_version}",
"api": api,
"launch_spec": {},
"target_entity": "mock_server_entity",
"target_project": "test",
"name": None,
"docker_config": {},
"git_info": {},
"overrides": {"entry_point": ["python", "main.py"]},
"resource": "kubernetes",
"resource_args": {},
"run_id": None,
}
project = LaunchProject(**kwargs)
mock_artifact = MagicMock()
mock_artifact.name = job_name
mock_artifact.version = job_version
project._job_artifact = mock_artifact
entry_point = EntryPoint("main.py", ["python", "main.py"])
project.set_job_entry_point(entry_point.command)
image_uri = await builder.build_image(project, entry_point)
assert (
"Created kaniko job wandb-launch-container-build-"
in capsys.readouterr().err
)
assert "12345678.dkr.ecr.us-east-1.amazonaws.com/test-repo" in image_uri
def test_kaniko_builder_from_config(aws_environment, elastic_container_registry):
"""Test that the kaniko builder correctly constructs the job spec for Azure."""
config = {
"type": "kaniko",
"build-context-store": "s3://test-bucket/test-prefix",
"destination": "12345678.dkr.ecr.us-east-1.amazonaws.com/test-repo",
}
builder = KanikoBuilder.from_config(
config, aws_environment, elastic_container_registry
)
assert builder.build_context_store == "s3://test-bucket/test-prefix"
def test_get_pod_name():
job = kubernetes_asyncio.client.V1Job(
api_version="batch/v1",
kind="Job",
metadata=kubernetes_asyncio.client.V1ObjectMeta(name="test-job"),
spec=kubernetes_asyncio.client.V1JobSpec(
template=kubernetes_asyncio.client.V1PodTemplateSpec(
metadata=kubernetes_asyncio.client.V1ObjectMeta(name="test-pod-name"),
)
),
)
assert get_pod_name_safe(job) == "test-pod-name"
job = kubernetes_asyncio.client.V1Job(
api_version="batch/v1",
kind="Job",
)
assert get_pod_name_safe(job) is None
| AsyncMock |
python | kamyu104__LeetCode-Solutions | Python/maximum-total-area-occupied-by-pistons.py | {
"start": 79,
"end": 933
} | class ____(object):
def maxArea(self, height, positions, directions):
"""
:type height: int
:type positions: List[int]
:type directions: str
:rtype: int
"""
diff = [0]*(2*height+1)
for d, i in itertools.izip(directions, positions):
if d == 'U':
diff[height-i] -= 1
diff[(height-i)+height] += 1
else:
diff[i] += 1
diff[i+height] -= 1
result = total = sum(positions)
cnt = directions.count('U')
for t in xrange(1, len(diff)):
total += -(len(directions)-cnt)+cnt
result = max(result, total)
cnt += diff[t]
return result
# Time: O(nlogn)
# Space: O(n)
import collections
import itertools
# sort, line sweep, difference array
| Solution |
python | getsentry__sentry | src/sentry/profiles/flamegraph.py | {
"start": 1376,
"end": 1578
} | class ____(TypedDict):
transaction: list[TransactionProfileCandidate]
continuous: list[ContinuousProfileCandidate]
generate_metrics: NotRequired[bool]
@dataclass(frozen=True)
| ProfileCandidates |
python | huggingface__transformers | src/transformers/models/rag/retrieval_rag.py | {
"start": 10498,
"end": 13605
} | class ____(HFIndexBase):
"""
A wrapper around an instance of [`~datasets.Datasets`]. If `index_path` is set to `None`, we load the pre-computed
index available with the [`~datasets.arrow_dataset.Dataset`], otherwise, we load the index from the indicated path
on disk.
Args:
vector_size (`int`): the dimension of the passages embeddings used by the index
dataset_name (`str`, optional, defaults to `wiki_dpr`):
A dataset identifier of the indexed dataset on HuggingFace AWS bucket (list all available datasets and ids
with `datasets.list_datasets()`).
dataset_split (`str`, optional, defaults to `train`)
Which split of the `dataset` to load.
index_name (`str`, optional, defaults to `train`)
The index_name of the index associated with the `dataset`. The index loaded from `index_path` will be saved
under this name.
index_path (`str`, optional, defaults to `None`)
The path to the serialized faiss index on disk.
use_dummy_dataset (`bool`, optional, defaults to `False`):
If True, use the dummy configuration of the dataset for tests.
"""
def __init__(
self,
vector_size: int,
dataset_name: str = "wiki_dpr",
dataset_split: str = "train",
index_name: Optional[str] = None,
index_path: Optional[str] = None,
use_dummy_dataset=False,
dataset_revision=None,
):
requires_backends(self, ["faiss"])
if int(index_path is None) + int(index_name is None) != 1:
raise ValueError("Please provide `index_name` or `index_path`.")
self.dataset_name = dataset_name
self.dataset_split = dataset_split
self.index_name = index_name
self.index_path = index_path
self.use_dummy_dataset = use_dummy_dataset
self.dataset_revision = dataset_revision
logger.info(f"Loading passages from {self.dataset_name}")
dataset = load_dataset(
self.dataset_name,
with_index=False,
split=self.dataset_split,
dummy=self.use_dummy_dataset,
revision=dataset_revision,
)
super().__init__(vector_size, dataset, index_initialized=False)
def init_index(self):
if self.index_path is not None:
logger.info(f"Loading index from {self.index_path}")
self.dataset.load_faiss_index("embeddings", file=self.index_path)
else:
logger.info(f"Loading index from {self.dataset_name} with index name {self.index_name}")
self.dataset = load_dataset(
self.dataset_name,
with_embeddings=True,
with_index=True,
split=self.dataset_split,
index_name=self.index_name,
dummy=self.use_dummy_dataset,
revision=self.dataset_revision,
)
self.dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True)
self._index_initialized = True
| CanonicalHFIndex |
python | getsentry__sentry | src/sentry/migrations/0957_projecttemplateoption_json.py | {
"start": 244,
"end": 1774
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0956_add_group_by_to_snuba_query"),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[mod.to_jsonb("sentry_projecttemplateoption", "value")],
state_operations=[
migrations.AlterField(
model_name="projecttemplateoption",
name="value",
field=models.JSONField(null=True),
),
],
)
]
| Migration |
python | doocs__leetcode | solution/3400-3499/3423.Maximum Difference Between Adjacent Elements in a Circular Array/Solution.py | {
"start": 0,
"end": 145
} | class ____:
def maxAdjacentDistance(self, nums: List[int]) -> int:
return max(abs(a - b) for a, b in pairwise(nums + [nums[0]]))
| Solution |
python | plotly__plotly.py | _plotly_utils/exceptions.py | {
"start": 93,
"end": 1054
} | class ____(PlotlyError):
def __init__(self, message="", path=(), notes=()):
"""
General graph object error for validation failures.
:param (str|unicode) message: The error message.
:param (iterable) path: A path pointing to the error.
:param notes: Add additional notes, but keep default exception message.
"""
self.message = message
self.plain_message = message # for backwards compat
self.path = list(path)
self.notes = notes
super(PlotlyGraphObjectError, self).__init__(message)
def __str__(self):
"""This is called by Python to present the error message."""
format_dict = {
"message": self.message,
"path": "[" + "][".join(repr(k) for k in self.path) + "]",
"notes": "\n".join(self.notes),
}
return "{message}\n\nPath To Error: {path}\n\n{notes}".format(**format_dict)
| PlotlyGraphObjectError |
python | google__jax | jax/_src/basearray.py | {
"start": 1204,
"end": 6979
} | class ____:
"""Array base class for JAX
``jax.Array`` is the public interface for instance checks and type annotation
of JAX arrays and tracers. Its main applications are in instance checks and
type annotations; for example::
x = jnp.arange(5)
isinstance(x, jax.Array) # returns True both inside and outside traced functions.
def f(x: Array) -> Array: # type annotations are valid for traced and non-traced types.
return x
``jax.Array`` should not be used directly for creation of arrays; instead you
should use array creation routines offered in :mod:`jax.numpy`, such as
:func:`jax.numpy.array`, :func:`jax.numpy.zeros`, :func:`jax.numpy.ones`,
:func:`jax.numpy.full`, :func:`jax.numpy.arange`, etc.
"""
# For the sake of static type analysis, these definitions are mirrored in the
# associated basearray.pyi file.
__slots__ = ['__weakref__']
__hash__ = None
@property
def dtype(self) -> np.dtype:
"""The data type (:class:`numpy.dtype`) of the array."""
raise NotImplementedError
@property
def ndim(self) -> int:
"""The number of dimensions in the array."""
raise NotImplementedError
@property
def size(self) -> int:
"""The total number of elements in the array."""
raise NotImplementedError
@property
def shape(self) -> tuple[int, ...]:
"""The shape of the array."""
raise NotImplementedError
# Documentation for sharding-related methods and properties defined on ArrayImpl:
def addressable_data(self, index: int) -> Array:
"""Return an array of the addressable data at a particular index."""
raise NotImplementedError
@property
def addressable_shards(self) -> Sequence[Shard]:
"""List of addressable shards."""
raise NotImplementedError
@property
def global_shards(self) -> Sequence[Shard]:
"""List of global shards."""
raise NotImplementedError
@property
def is_fully_addressable(self) -> bool:
"""Is this Array fully addressable?
A jax.Array is fully addressable if the current process can address all of
the devices named in the :class:`Sharding`. ``is_fully_addressable`` is
equivalent to "is_local" in multi-process JAX.
Note that fully replicated is not equal to fully addressable i.e.
a jax.Array which is fully replicated can span across multiple hosts and is
not fully addressable.
"""
raise NotImplementedError
@property
def is_fully_replicated(self) -> bool:
"""Is this Array fully replicated?"""
raise NotImplementedError
@property
def sharding(self) -> Sharding:
"""The sharding for the array."""
raise NotImplementedError
@property
def committed(self) -> bool:
"""Whether the array is committed or not.
An array is committed when it is explicitly placed on device(s) via JAX
APIs. For example, `jax.device_put(np.arange(8), jax.devices()[0])` is
committed to device 0. While `jax.device_put(np.arange(8))` is uncommitted
and will be placed on the default device.
Computations involving some committed inputs will happen on the committed
device(s) and the result will be committed on the same device(s).
Invoking an operation on arguments that are committed to different device(s)
will raise an error.
For example:
```
a = jax.device_put(np.arange(8), jax.devices()[0])
b = jax.device_put(np.arange(8), jax.devices()[1])
a + b # Raises an error
```
See https://docs.jax.dev/en/latest/faq.html#controlling-data-and-computation-placement-on-devices
for more information.
"""
raise NotImplementedError
@property
def device(self) -> Device | Sharding:
"""Array API-compatible device attribute.
For single-device arrays, this returns a Device. For sharded arrays, this
returns a Sharding.
"""
raise NotImplementedError
def copy_to_host_async(self):
"""Copies an ``Array`` to the host asynchronously.
For arrays that live an an accelerator, such as a GPU or a TPU, JAX may
cache the value of the array on the host. Normally this happens
behind the scenes when the value of an on-device array is requested by the
user, but waiting to initiate a device-to-host copy until the value is
requested requires that JAX block the caller while waiting for the copy to
complete.
``copy_to_host_async`` requests that JAX populate its on-host cache of an
array, but does not wait for the copy to complete. This may speed up a
future on-host access to the array's contents.
"""
raise NotImplementedError
Array = use_cpp_class(xc.Array)(Array)
Array.__module__ = "jax"
# StaticScalar is the Union of all scalar types that can be converted to
# JAX arrays, and are possible to mark as static arguments.
StaticScalar = Union[
np.bool_, np.number, # NumPy scalar types
bool, int, float, complex, # Python scalar types
]
if sys.version_info[:2] < (3, 14):
# Python 3.14 raises
# AttributeError: 'typing.Union' object attribute '__doc__' is read-only
StaticScalar.__doc__ = "Type annotation for JAX-compatible static scalars."
# ArrayLike is a Union of all objects that can be implicitly converted to a
# standard JAX array (i.e. not including future non-standard array types like
# KeyArray and BInt). It's different than np.typing.ArrayLike in that it doesn't
# accept arbitrary sequences, nor does it accept string data.
ArrayLike = Union[
Array, # JAX array type
np.ndarray, # NumPy array type
StaticScalar, # valid scalars
literals.TypedNdArray, # Typed array type
]
if sys.version_info[:2] < (3, 14):
# Python 3.14 raises
# AttributeError: 'typing.Union' object attribute '__doc__' is read-only
ArrayLike.__doc__ = "Type annotation for JAX array-like objects."
| Array |
python | pandas-dev__pandas | pandas/tests/indexes/datetimes/test_date_range.py | {
"start": 1897,
"end": 3900
} | class ____:
# Older tests in TestTimeSeries constructed their `stamp` objects
# using `date_range` instead of the `Timestamp` constructor.
# TestTimestampEquivDateRange checks that these are equivalent in the
# pertinent cases.
def test_date_range_timestamp_equiv(self):
rng = date_range("20090415", "20090519", tz="US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="US/Eastern")
assert ts == stamp
def test_date_range_timestamp_equiv_dateutil(self):
rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="dateutil/US/Eastern")
assert ts == stamp
def test_date_range_timestamp_equiv_explicit_pytz(self):
pytz = pytest.importorskip("pytz")
rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern"))
assert ts == stamp
@td.skip_if_windows
def test_date_range_timestamp_equiv_explicit_dateutil(self):
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
rng = date_range("20090415", "20090519", tz=gettz("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=gettz("US/Eastern"))
assert ts == stamp
def test_date_range_timestamp_equiv_from_datetime_instance(self):
datetime_instance = datetime(2014, 3, 4)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0]
ts = Timestamp(datetime_instance)
assert ts == timestamp_instance
def test_date_range_timestamp_equiv_preserve_frequency(self):
timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0]
ts = Timestamp("2014-03-05")
assert timestamp_instance == ts
| TestTimestampEquivDateRange |
python | getsentry__sentry | tests/sentry/releases/endpoints/test_organization_release_health_data.py | {
"start": 56849,
"end": 100007
} | class ____(MetricsAPIBaseTestCase):
endpoint = "sentry-api-0-organization-metrics-data"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.login_as(user=self.create_user(is_staff=True), staff=True)
org_id = self.organization.id
self.session_duration_metric = rh_indexer_record(org_id, SessionMRI.RAW_DURATION.value)
self.session_metric = rh_indexer_record(org_id, SessionMRI.RAW_SESSION.value)
self.session_user_metric = rh_indexer_record(org_id, SessionMRI.RAW_USER.value)
self.session_error_metric = rh_indexer_record(org_id, SessionMRI.RAW_ERROR.value)
self.session_status_tag = rh_indexer_record(org_id, "session.status")
self.release_tag = rh_indexer_record(self.organization.id, "release")
self.tx_metric = perf_indexer_record(org_id, TransactionMRI.DURATION.value)
self.tx_status = perf_indexer_record(org_id, TransactionTagsKey.TRANSACTION_STATUS.value)
self.transaction_lcp_metric = perf_indexer_record(
self.organization.id, TransactionMRI.MEASUREMENTS_LCP.value
)
self.tx_satisfaction = perf_indexer_record(
self.organization.id, TransactionTagsKey.TRANSACTION_SATISFACTION.value
)
self.tx_user_metric = perf_indexer_record(self.organization.id, TransactionMRI.USER.value)
@property
def now(self):
return MetricsAPIBaseTestCase.MOCK_DATETIME
@patch("sentry.snuba.metrics.fields.base.DERIVED_METRICS", MOCKED_DERIVED_METRICS)
@patch("sentry.snuba.metrics.query.parse_mri")
@patch("sentry.snuba.metrics.fields.base.get_public_name_from_mri")
@patch("sentry.snuba.metrics.query_builder.get_mri")
@patch("sentry.snuba.metrics.query.get_public_name_from_mri")
@patch("sentry.releases.endpoints.organization_release_health_data.parse_field")
def test_derived_metric_incorrectly_defined_as_singular_entity(
self,
mocked_parse_field,
mocked_get_public_name_from_mri,
mocked_get_mri_query,
mocked_reverse_mri,
mocked_parse_mri,
):
mocked_get_public_name_from_mri.return_value = "crash_free_fake"
mocked_get_mri_query.return_value = "crash_free_fake"
mocked_reverse_mri.return_value = "crash_free_fake"
mocked_parse_mri.return_value = ParsedMRI("e", "sessions", "crash_free_fake", "none")
mocked_parse_field.return_value = MetricField(None, "e:sessions/crashed_free_fake@none")
for status in ["ok", "crashed"]:
for minute in range(4):
self.build_and_store_session(
project_id=self.project.id,
minutes_before_now=minute,
status=status,
)
response = self.get_error_response(
self.organization.slug,
field=["crash_free_fake"],
statsPeriod="6m",
interval="1m",
useCase="sessions",
status_code=400,
)
assert response.data["detail"] == (
"Derived Metric crash_free_fake cannot be calculated from a single entity"
)
def test_derived_metric_does_not_exist(self) -> None:
"""
Test that ensures appropriate exception is raised when a request is made for a field with no
operation and a field that is not a valid derived metric
"""
response = self.get_error_response(
self.organization.slug,
project=[self.project.id],
field=["crash_free_fake"],
statsPeriod="6m",
interval="1m",
status_code=400,
)
assert response.data["detail"] == (
"Failed to parse 'crash_free_fake'. The metric name must belong to a public metric."
)
def test_staff_crash_free_percentage(self) -> None:
staff_user = self.create_user(is_staff=True)
self.login_as(user=staff_user, staff=True)
for status in ["ok", "crashed"]:
for minute in range(4):
self.build_and_store_session(
project_id=self.project.id,
minutes_before_now=minute,
status=status,
)
response = self.get_success_response(
self.organization.slug,
field=["session.crash_free_rate", "session.all", "session.crashed"],
statsPeriod="6m",
interval="1m",
)
group = response.data["groups"][0]
assert group["totals"]["session.crash_free_rate"] == 0.5
assert group["totals"]["session.all"] == 8
assert group["totals"]["session.crashed"] == 4
assert group["series"]["session.crash_free_rate"] == [None, None, 0.5, 0.5, 0.5, 0.5]
def test_crash_free_percentage(self) -> None:
for status in ["ok", "crashed"]:
for minute in range(4):
self.build_and_store_session(
project_id=self.project.id,
minutes_before_now=minute,
status=status,
)
response = self.get_success_response(
self.organization.slug,
field=["session.crash_free_rate", "session.all", "session.crashed"],
statsPeriod="6m",
interval="1m",
)
group = response.data["groups"][0]
assert group["totals"]["session.crash_free_rate"] == 0.5
assert group["totals"]["session.all"] == 8
assert group["totals"]["session.crashed"] == 4
assert group["series"]["session.crash_free_rate"] == [None, None, 0.5, 0.5, 0.5, 0.5]
def test_crash_free_percentage_with_orderby(self) -> None:
for status in ["ok", "crashed"]:
for minute in range(4):
self.build_and_store_session(
project_id=self.project.id,
minutes_before_now=minute,
status=status,
release="foobar@1.0",
)
for minute in range(4):
self.build_and_store_session(
project_id=self.project.id,
minutes_before_now=minute,
status="ok",
release="foobar@2.0",
)
response = self.get_success_response(
self.organization.slug,
field=["session.crash_free_rate"],
statsPeriod="6m",
interval="1m",
groupBy="release",
orderBy="-session.crash_free_rate",
)
group = response.data["groups"][0]
assert group["by"]["release"] == "foobar@2.0"
assert group["totals"]["session.crash_free_rate"] == 1
assert group["series"]["session.crash_free_rate"] == [None, None, 1, 1, 1, 1]
group = response.data["groups"][1]
assert group["by"]["release"] == "foobar@1.0"
assert group["totals"]["session.crash_free_rate"] == 0.5
assert group["series"]["session.crash_free_rate"] == [None, None, 0.5, 0.5, 0.5, 0.5]
def test_crash_free_rate_when_no_session_metrics_data_exist(self) -> None:
response = self.get_success_response(
self.organization.slug,
project=[self.project.id],
field=["session.crash_free_rate", "sum(sentry.sessions.session)"],
statsPeriod="6m",
interval="6m",
orderBy="-session.crash_free_rate",
)
group = response.data["groups"][0]
assert group["totals"]["session.crash_free_rate"] is None
assert group["totals"]["sum(sentry.sessions.session)"] == 0
assert group["series"]["sum(sentry.sessions.session)"] == [0]
assert group["series"]["session.crash_free_rate"] == [None]
def test_crash_free_rate_when_no_session_metrics_data_with_orderby_and_groupby(self) -> None:
response = self.get_success_response(
self.organization.slug,
project=[self.project.id],
field=[
SessionMetricKey.CRASH_FREE_RATE.value,
"sum(sentry.sessions.session)",
],
statsPeriod="6m",
interval="6m",
groupBy=["release"],
orderBy="-session.crash_free_rate",
)
assert response.data["groups"] == []
def test_incorrect_crash_free_rate(self) -> None:
response = self.get_error_response(
self.organization.slug,
project=[self.project.id],
field=[f"sum({SessionMetricKey.CRASH_FREE_RATE.value})"],
statsPeriod="6m",
interval="1m",
status_code=400,
)
assert (response.data["detail"]) == (
"Failed to parse sum(session.crash_free_rate). No operations can be applied on this "
"field as it is already a derived metric with an aggregation applied to it."
)
def test_errored_sessions(self) -> None:
for tag_value, value in (
("errored_preaggr", 10),
("crashed", 2),
("abnormal", 4),
("init", 15),
):
self.store_release_health_metric(
name=SessionMRI.RAW_SESSION.value,
tags={"session.status": tag_value},
value=value,
minutes_before_now=4,
)
for value in range(3):
self.store_release_health_metric(
name=SessionMRI.RAW_ERROR.value,
tags={"release": "foo"},
value=value,
)
response = self.get_success_response(
self.organization.slug,
field=[SessionMetricKey.ERRORED.value],
statsPeriod="6m",
interval="1m",
)
group = response.data["groups"][0]
assert group["totals"]["session.errored"] == 7
assert group["series"]["session.errored"] == [0, 4, 0, 0, 0, 3]
def test_orderby_composite_entity_derived_metric(self) -> None:
self.build_and_store_session(
project_id=self.project.id,
status="ok",
release="foobar@2.0",
errors=2,
)
response = self.get_error_response(
self.organization.slug,
field=["session.errored"],
statsPeriod="6m",
interval="1m",
groupBy=["release"],
orderBy=["session.errored"],
status_code=400,
)
assert response.data["detail"] == (
"Selected 'orderBy' columns must belongs to the same entity"
)
def test_abnormal_sessions(self) -> None:
for tag_value, value, minutes in (
("foo", 4, 4),
("bar", 3, 2),
):
self.store_release_health_metric(
name=SessionMRI.RAW_SESSION.value,
tags={"session.status": "abnormal", "release": tag_value},
value=value,
minutes_before_now=minutes,
)
response = self.get_success_response(
self.organization.slug,
field=["session.abnormal"],
statsPeriod="6m",
interval="1m",
groupBy=["release"],
orderBy=["-session.abnormal"],
)
foo_group, bar_group = response.data["groups"][0], response.data["groups"][1]
assert foo_group["by"]["release"] == "foo"
assert foo_group["totals"] == {"session.abnormal": 4}
assert foo_group["series"] == {"session.abnormal": [0, 4, 0, 0, 0, 0]}
assert bar_group["by"]["release"] == "bar"
assert bar_group["totals"] == {"session.abnormal": 3}
assert bar_group["series"] == {"session.abnormal": [0, 0, 0, 3, 0, 0]}
def test_crashed_user_sessions(self) -> None:
for tag_value, values in (
("foo", [1, 2, 4]),
("bar", [1, 2, 4, 8, 9, 5]),
):
for value in values:
self.store_release_health_metric(
name=SessionMRI.RAW_USER.value,
tags={"session.status": "crashed", "release": tag_value},
value=value,
)
response = self.get_success_response(
self.organization.slug,
field=["session.crashed_user"],
statsPeriod="6m",
interval="1m",
groupBy=["release"],
orderBy=["-session.crashed_user"],
)
foo_group, bar_group = response.data["groups"][1], response.data["groups"][0]
assert foo_group["by"]["release"] == "foo"
assert foo_group["totals"] == {"session.crashed_user": 3}
assert foo_group["series"] == {"session.crashed_user": [0, 0, 0, 0, 0, 3]}
assert bar_group["by"]["release"] == "bar"
assert bar_group["totals"] == {"session.crashed_user": 6}
assert bar_group["series"] == {"session.crashed_user": [0, 0, 0, 0, 0, 6]}
def test_all_user_sessions(self) -> None:
for value in [1, 2, 4]:
self.store_release_health_metric(
name=SessionMRI.RAW_USER.value,
tags={},
value=value,
)
response = self.get_success_response(
self.organization.slug,
field=["session.all_user"],
statsPeriod="6m",
interval="1m",
)
group = response.data["groups"][0]
assert group["totals"] == {"session.all_user": 3}
assert group["series"] == {"session.all_user": [0, 0, 0, 0, 0, 3]}
def test_abnormal_user_sessions(self) -> None:
cases: tuple[tuple[dict[str, str], list[int]], ...] = (
({"session.status": "abnormal"}, [1, 2, 4]),
({}, [1, 2, 4, 7, 9]),
)
for tags, values in cases:
for value in values:
self.store_release_health_metric(
name=SessionMRI.RAW_USER.value,
tags=tags,
value=value,
)
response = self.get_success_response(
self.organization.slug,
field=["session.abnormal_user"],
statsPeriod="6m",
interval="1m",
)
group = response.data["groups"][0]
assert group["totals"] == {"session.abnormal_user": 3}
assert group["series"] == {"session.abnormal_user": [0, 0, 0, 0, 0, 3]}
def test_crash_free_user_percentage_with_orderby(self) -> None:
for tags, values in (
({"release": "foobar@1.0"}, [1, 2, 4, 8]),
({"session.status": "crashed", "release": "foobar@1.0"}, [1, 2]),
({"release": "foobar@2.0"}, [3, 5]),
):
for value in values:
self.store_release_health_metric(
name=SessionMRI.RAW_USER.value,
tags=tags,
value=value,
)
response = self.get_success_response(
self.organization.slug,
field=["session.crash_free_user_rate"],
statsPeriod="6m",
interval="6m",
groupBy="release",
orderBy="-session.crash_free_user_rate",
)
group = response.data["groups"][0]
assert group["by"]["release"] == "foobar@2.0"
assert group["totals"]["session.crash_free_user_rate"] == 1
assert group["series"]["session.crash_free_user_rate"] == [1]
group = response.data["groups"][1]
assert group["by"]["release"] == "foobar@1.0"
assert group["totals"]["session.crash_free_user_rate"] == 0.5
assert group["series"]["session.crash_free_user_rate"] == [0.5]
def test_crash_free_user_rate_orderby_crash_free_rate(self) -> None:
# Users crash free rate
# foobar@1.0 -> 0.5
# foobar@2.0 -> 1
for tags, values in (
({"release": "foobar@1.0"}, [1, 2, 4, 8]),
({"session.status": "crashed", "release": "foobar@1.0"}, [1, 2]),
({"release": "foobar@2.0"}, [3, 5]),
):
for value in values:
self.store_release_health_metric(
name=SessionMRI.RAW_USER.value,
tags=tags,
value=value,
)
# Crash free rate
# foobar@1.0 -> 0.75
# foobar@2.0 -> 0.25
for tag_value, release_tag_value, value, second in (
("init", "foobar@1.0", 4, 4),
("crashed", "foobar@1.0", 1, 2),
("init", "foobar@2.0", 4, 4),
("crashed", "foobar@2.0", 3, 2),
):
self.store_release_health_metric(
name=SessionMRI.RAW_SESSION.value,
tags={"session.status": tag_value, "release": release_tag_value},
value=value,
)
response = self.get_success_response(
self.organization.slug,
field=[
"session.crash_free_user_rate",
"session.crash_free_rate",
"session.crash_user_rate",
"session.crash_rate",
],
statsPeriod="1h",
interval="1h",
groupBy="release",
orderBy="-session.crash_free_rate",
)
group = response.data["groups"][0]
assert group["by"]["release"] == "foobar@1.0"
assert group["totals"]["session.crash_free_rate"] == 0.75
assert group["totals"]["session.crash_free_user_rate"] == 0.5
assert group["totals"]["session.crash_rate"] == 0.25
assert group["totals"]["session.crash_user_rate"] == 0.5
group = response.data["groups"][1]
assert group["by"]["release"] == "foobar@2.0"
assert group["totals"]["session.crash_free_rate"] == 0.25
assert group["totals"]["session.crash_free_user_rate"] == 1.0
assert group["totals"]["session.crash_rate"] == 0.75
assert group["totals"]["session.crash_user_rate"] == 0.0
def test_healthy_sessions(self) -> None:
for tags, value in (
({"session.status": "errored_preaggr", "release": "foo"}, 4),
({"session.status": "init", "release": "foo"}, 10),
):
self.store_release_health_metric(
name=SessionMRI.RAW_SESSION.value,
tags=tags,
value=value,
)
for value in range(3):
self.store_release_health_metric(
name=SessionMRI.RAW_ERROR.value,
tags={"release": "foo"},
value=value,
)
response = self.get_success_response(
self.organization.slug,
field=["session.healthy", "session.errored", "session.all"],
statsPeriod="6m",
interval="6m",
)
group = response.data["groups"][0]
assert group["totals"]["session.healthy"] == 3
assert group["series"]["session.healthy"] == [3]
def test_healthy_sessions_preaggr(self) -> None:
"""Healthy sessions works also when there are no individual errors"""
for tag_value, value in (
("errored_preaggr", 4),
("init", 10),
):
self.store_release_health_metric(
name=SessionMRI.RAW_SESSION.value,
tags={"session.status": tag_value, "release": "foo"},
value=value,
)
# Can get session healthy even before all components exist
# (projects that send errored_preaggr usually do not send individual errors)
response = self.get_success_response(
self.organization.slug,
field=["session.healthy"],
statsPeriod="6m",
interval="6m",
)
group = response.data["groups"][0]
assert group["totals"]["session.healthy"] == 6
assert group["series"]["session.healthy"] == [6]
def test_errored_user_sessions(self) -> None:
# Crashed 3
# Abnormal 6
# Errored all 9
# Errored = 3
for tag_value, values in (
("crashed", [1, 2, 4]),
("errored", [1, 2, 4]),
("abnormal", [99, 3, 6, 8, 9, 5]),
("errored", [99, 3, 6, 8, 9, 5]),
("errored", [22, 33, 44]),
):
for value in values:
self.store_release_health_metric(
name=SessionMRI.RAW_USER.value,
tags={"session.status": tag_value},
value=value,
)
response = self.get_success_response(
self.organization.slug,
field=["session.errored_user"],
statsPeriod="6m",
interval="6m",
)
group = response.data["groups"][0]
assert group["totals"]["session.errored_user"] == 3
assert group["series"]["session.errored_user"] == [3]
def test_errored_user_sessions_clamped_to_zero(self) -> None:
# Crashed 3
# Errored all 0
# Errored = -3
for value in [1, 2, 4]:
self.store_release_health_metric(
name=SessionMRI.RAW_USER.value,
tags={"session.status": "crashed"},
value=value,
)
response = self.get_success_response(
self.organization.slug,
field=["session.errored_user"],
statsPeriod="6m",
interval="6m",
)
group = response.data["groups"][0]
assert group["totals"]["session.errored_user"] == 0
assert group["series"]["session.errored_user"] == [0]
def test_healthy_user_sessions(self) -> None:
cases: tuple[tuple[dict[str, str], list[int]], ...] = (
({}, [1, 2, 4, 5, 7]), # 3 and 6 did not recorded at init
({"session.status": "ok"}, [3]), # 3 was not in init, but still counts
({"session.status": "errored"}, [1, 2, 6]), # 6 was not in init, but still counts
)
for tags, values in cases:
for value in values:
self.store_release_health_metric(
name=SessionMRI.RAW_USER.value,
tags=tags,
value=value,
)
response = self.get_success_response(
self.organization.slug,
field=["session.healthy_user"],
statsPeriod="6m",
interval="6m",
)
group = response.data["groups"][0]
assert group["totals"]["session.healthy_user"] == 4
assert group["series"]["session.healthy_user"] == [4]
def test_healthy_user_sessions_clamped_to_zero(self) -> None:
# init = 0
# errored_all = 1
self.store_release_health_metric(
name=SessionMRI.RAW_USER.value,
tags={"session.status": "errored"},
value=1,
)
response = self.get_success_response(
self.organization.slug,
field=["session.healthy_user"],
statsPeriod="6m",
interval="6m",
)
group = response.data["groups"][0]
assert group["totals"]["session.healthy_user"] == 0
assert group["series"]["session.healthy_user"] == [0]
def test_private_transactions_derived_metric(self) -> None:
response = self.get_error_response(
self.organization.slug,
project=[self.project.id],
field=["transaction.all"],
statsPeriod="1m",
interval="1m",
status_code=400,
)
assert response.data["detail"] == (
"Failed to parse 'transaction.all'. The metric name must belong to a public metric."
)
def test_failure_rate_transaction(self) -> None:
for value, tag_value in (
(3.4, TransactionStatusTagValue.OK.value),
(0.3, TransactionStatusTagValue.CANCELLED.value),
(2.3, TransactionStatusTagValue.UNKNOWN.value),
(0.5, TransactionStatusTagValue.ABORTED.value),
):
self.store_performance_metric(
name=TransactionMRI.DURATION.value,
tags={TransactionTagsKey.TRANSACTION_STATUS.value: tag_value},
value=value,
)
response = self.get_success_response(
self.organization.slug,
field=["transaction.failure_rate"],
statsPeriod="1m",
interval="1m",
useCase="transactions",
)
assert len(response.data["groups"]) == 1
group = response.data["groups"][0]
assert group["by"] == {}
assert group["totals"] == {"transaction.failure_rate": 0.25}
assert group["series"] == {"transaction.failure_rate": [0.25]}
def test_failure_rate_without_transactions(self) -> None:
"""
Ensures the absence of transactions isn't an issue to calculate the rate.
The `nan` a division by 0 may produce must not be in the response, yet
they are an issue in javascript:
```
$ node
Welcome to Node.js v16.13.1.
Type ".help" for more information.
> JSON.parse('NaN')
Uncaught SyntaxError: Unexpected token N in JSON at position 0
> JSON.parse('nan')
Uncaught SyntaxError: Unexpected token a in JSON at position 1
```
"""
# Not sending buckets means no project is created automatically. We need
# a project without transaction data, so create one:
self.project
response = self.get_success_response(
self.organization.slug,
field=["transaction.failure_rate"],
statsPeriod="1m",
interval="1m",
useCase="transactions",
)
assert response.data["groups"] == [
{
"by": {},
"series": {"transaction.failure_rate": [None]},
"totals": {"transaction.failure_rate": None},
},
]
def test_request_private_derived_metric(self) -> None:
for private_name in [
"session.crashed_and_abnormal_user",
"session.errored_set",
"session.errored_user_all",
]:
response = self.get_error_response(
self.organization.slug,
project=[self.project.id],
field=[private_name],
statsPeriod="6m",
interval="6m",
status_code=400,
)
assert response.data["detail"] == (
f"Failed to parse '{private_name}'. The metric name must belong to a public metric."
)
def test_apdex_transactions(self) -> None:
# See https://docs.sentry.io/product/performance/metrics/#apdex
self.store_performance_metric(
name=TransactionMRI.DURATION.value,
tags={
TransactionTagsKey.TRANSACTION_SATISFACTION.value: TransactionSatisfactionTagValue.SATISFIED.value
},
value=3.4,
)
for subvalue in [0.3, 2.3]:
self.store_performance_metric(
name=TransactionMRI.DURATION.value,
tags={
TransactionTagsKey.TRANSACTION_SATISFACTION.value: TransactionSatisfactionTagValue.TOLERATED.value
},
value=subvalue,
)
response = self.get_success_response(
self.organization.slug,
field=["transaction.apdex"],
statsPeriod="1m",
interval="1m",
useCase="transactions",
)
assert len(response.data["groups"]) == 1
assert response.data["groups"][0]["totals"] == {"transaction.apdex": 0.6666666666666666}
def test_miserable_users(self) -> None:
for subvalue in [1, 2]:
self.store_performance_metric(
name=TransactionMRI.USER.value,
tags={
TransactionTagsKey.TRANSACTION_SATISFACTION.value: TransactionSatisfactionTagValue.FRUSTRATED.value
},
value=subvalue,
)
for subvalue in [1, 3]:
self.store_performance_metric(
name=TransactionMRI.USER.value,
tags={
TransactionTagsKey.TRANSACTION_SATISFACTION.value: TransactionSatisfactionTagValue.SATISFIED.value
},
value=subvalue,
)
response = self.get_success_response(
self.organization.slug,
field=["transaction.miserable_user"],
statsPeriod="1m",
interval="1m",
useCase="transactions",
)
assert len(response.data["groups"]) == 1
assert response.data["groups"][0]["totals"] == {"transaction.miserable_user": 2}
def test_user_misery(self) -> None:
for subvalue in [3, 4]:
self.store_performance_metric(
name=TransactionMRI.USER.value,
tags={
TransactionTagsKey.TRANSACTION_SATISFACTION.value: TransactionSatisfactionTagValue.FRUSTRATED.value
},
value=subvalue,
)
for subvalue in [5, 6]:
self.store_performance_metric(
name=TransactionMRI.USER.value,
tags={
TransactionTagsKey.TRANSACTION_SATISFACTION.value: TransactionSatisfactionTagValue.SATISFIED.value
},
value=subvalue,
)
response = self.get_success_response(
self.organization.slug,
field=["transaction.user_misery"],
statsPeriod="1m",
interval="1m",
useCase="transactions",
)
assert len(response.data["groups"]) == 1
assert response.data["groups"][0]["totals"] == {
"transaction.user_misery": 0.06478439425051336
}
def test_session_duration_derived_alias(self) -> None:
for tag_value, numbers in (
("exited", [2, 6, 8]),
("crashed", [11, 13, 15]),
):
for value in numbers:
self.store_release_health_metric(
name=SessionMRI.RAW_DURATION.value,
tags={"session.status": tag_value},
value=value,
)
response = self.get_success_response(
self.organization.slug,
field=["p50(session.duration)"],
statsPeriod="6m",
interval="6m",
)
group = response.data["groups"][0]
assert group == {
"by": {},
"totals": {"p50(session.duration)": 6.0},
"series": {"p50(session.duration)": [6.0]},
}
def test_do_not_return_negative_crash_free_rate_value_to_the_customer(self) -> None:
"""
Bug: https://github.com/getsentry/sentry/issues/73172
Assert that negative value is never returned to the user, even
in case when there is a problem with the data, instead of negative value we return 0.
This problem happens during ingestion when 'e:session/crashed' metric is
ingested, but 'e:session/all' for some reason is not. That would cause
crash_free_rate value to be negative since it is calculated as:
crash_free_rate = 1 - (count('e:session/crashed') / count('e:session/all'))
"""
# ingesting 'e:session/all' and 'e:session/crashed'
self.build_and_store_session(
project_id=self.project.id,
minutes_before_now=1,
status="crashed",
)
# manually ingesting only 'e:session/crashed' metric
# to make sure that there are more 'e:session/crashed' metrics ingested
# than 'e:session/all'
for i in range(2):
session = self.build_session(
started=self.adjust_timestamp(
self.now
- timedelta(
minutes=i,
)
).timestamp()
)
# ingesting only 'e:session/crashed'
self.store_metric(
self.organization.id,
self.project.id,
SessionMRI.RAW_SESSION.value,
{"session.status": "crashed"},
int(session["started"]),
+1,
)
response = self.get_success_response(
self.organization.slug,
field=["session.crash_free_rate", "session.all", "session.crashed"],
statsPeriod="6m",
interval="1m",
)
group = response.data["groups"][0]
assert group["totals"]["session.all"] == 1.0
assert group["totals"]["session.crashed"] == 3.0
assert group["totals"]["session.crash_free_rate"] == 0.0
for value in group["series"]["session.crash_free_rate"]:
assert value is None or value >= 0
def test_do_not_return_crash_rate_value_greater_than_one(self) -> None:
"""
Assert that value for crash_rate won't be greater than 1.
This can happen due to possible corruption of data. This problem
happens during ingestion when 'e:session/crashed' metric is
ingested, but 'e:session/all' for some reason is not.
"""
# ingesting 'e:session/all' and 'e:session/crashed'
self.build_and_store_session(
project_id=self.project.id,
minutes_before_now=1,
status="crashed",
)
# manually ingesting only 'e:session/crashed' metric
# to make sure that there are more 'e:session/crashed' metrics ingested
# than 'e:session/all'
for i in range(2):
session = self.build_session(
started=self.adjust_timestamp(
self.now
- timedelta(
minutes=i,
)
).timestamp()
)
# ingesting only 'e:session/crashed'
self.store_metric(
self.organization.id,
self.project.id,
SessionMRI.RAW_SESSION.value,
{"session.status": "crashed"},
int(session["started"]),
+1,
)
response = self.get_success_response(
self.organization.slug,
field=["session.crash_rate", "session.all", "session.crashed"],
statsPeriod="6m",
interval="1m",
)
group = response.data["groups"][0]
assert group["totals"]["session.all"] == 1.0
assert group["totals"]["session.crashed"] == 3.0
assert group["totals"]["session.crash_rate"] == 1.0 # value is capped at 1.0
for value in group["series"]["session.crash_rate"]:
assert value is None or value <= 1
def test_metric_without_operation_is_not_allowed(self) -> None:
"""
Do not allow to query for a metric without operation.
For example:
- querying for `sentry.sessions.session` is not valid because operation is missing
- querying for `sum(sentry.sessions.session)` is valid because operation `sum` exists
"""
self.build_and_store_session(
project_id=self.project.id,
minutes_before_now=1,
status="crashed",
)
response = self.get_error_response(
self.organization.slug,
field=["sentry.sessions.session"],
includeSeries=0,
statsPeriod="6m",
interval="1m",
status_code=400,
)
result = response.json()
assert "detail" in result
assert result["detail"] == "You can not use generic metric public field without operation"
response = self.get_success_response(
self.organization.slug,
field=["sum(sentry.sessions.session)"],
includeSeries=0,
statsPeriod="6m",
interval="1m",
)
assert response.status_code == 200
def test_abnormal_rate_sessions_and_users(self) -> None:
for tags, values in (
({"release": "foobar@1.0"}, [1, 2, 4, 8]),
({"session.status": "abnormal", "release": "foobar@1.0"}, [1, 2]),
({"release": "foobar@2.0"}, [3, 5]),
):
for value in values:
self.store_release_health_metric(
name=SessionMRI.RAW_USER.value,
tags=tags,
value=value,
)
for tag_value, release_tag_value, value, second in (
("init", "foobar@1.0", 4, 4),
("abnormal", "foobar@1.0", 1, 2),
("init", "foobar@2.0", 4, 4),
("abnormal", "foobar@2.0", 3, 2),
):
self.store_release_health_metric(
name=SessionMRI.RAW_SESSION.value,
tags={"session.status": tag_value, "release": release_tag_value},
value=value,
)
response = self.get_success_response(
self.organization.slug,
field=[
"session.abnormal_rate",
"session.abnormal_user_rate",
],
statsPeriod="1h",
interval="1h",
groupBy="release",
orderBy="-session.abnormal_user_rate",
)
group = response.data["groups"][0]
assert group["by"]["release"] == "foobar@1.0"
assert group["totals"]["session.abnormal_rate"] == 0.25
assert group["totals"]["session.abnormal_user_rate"] == 0.5
group = response.data["groups"][1]
assert group["by"]["release"] == "foobar@2.0"
assert group["totals"]["session.abnormal_rate"] == 0.75
assert group["totals"]["session.abnormal_user_rate"] == 0.0
def test_errored_rate_sessions_and_users(self) -> None:
for tags, values in (
({"release": "foobar@1.0"}, [1, 2, 4, 8]),
({"session.status": "errored", "release": "foobar@1.0"}, [1, 2]),
({"release": "foobar@2.0"}, [3, 5]),
):
for value in values:
self.store_release_health_metric(
name=SessionMRI.RAW_USER.value,
tags=tags,
value=value,
)
for tag_value, release_tag_value, value in (
("init", "foobar@1.0", 4),
("errored_preaggr", "foobar@1.0", 1),
("init", "foobar@2.0", 4),
("errored_preaggr", "foobar@2.0", 3),
):
self.store_release_health_metric(
name=SessionMRI.RAW_SESSION.value,
tags={"session.status": tag_value, "release": release_tag_value},
value=value,
)
response = self.get_success_response(
self.organization.slug,
field=[
"session.errored_rate",
"session.errored_user_rate",
"sum(sentry.sessions.session)",
],
statsPeriod="1h",
interval="1h",
groupBy="release",
orderBy="sum(sentry.sessions.session)",
)
group = response.data["groups"][0]
assert group["by"]["release"] == "foobar@1.0"
assert group["totals"]["session.errored_rate"] == 0.25
assert group["totals"]["session.errored_user_rate"] == 0.5
group = response.data["groups"][1]
assert group["by"]["release"] == "foobar@2.0"
assert group["totals"]["session.errored_rate"] == 0.75
assert group["totals"]["session.errored_user_rate"] == 0.0
def test_unhandled_rate_sessions_and_users(self) -> None:
for tags, values in (
({"release": "foobar@1.0"}, [1, 2, 4, 8]),
({"session.status": "unhandled", "release": "foobar@1.0"}, [1, 2]),
({"release": "foobar@2.0"}, [3, 5]),
):
for value in values:
self.store_release_health_metric(
name=SessionMRI.RAW_USER.value,
tags=tags,
value=value,
)
for tag_value, release_tag_value, value in (
("init", "foobar@1.0", 4),
("unhandled", "foobar@1.0", 1),
("init", "foobar@2.0", 4),
("unhandled", "foobar@2.0", 3),
):
self.store_release_health_metric(
name=SessionMRI.RAW_SESSION.value,
tags={"session.status": tag_value, "release": release_tag_value},
value=value,
)
response = self.get_success_response(
self.organization.slug,
field=[
"session.unhandled_rate",
"session.unhandled_user_rate",
"sum(sentry.sessions.session)",
],
statsPeriod="1h",
interval="1h",
groupBy="release",
orderBy="sum(sentry.sessions.session)",
)
group = response.data["groups"][0]
assert group["by"]["release"] == "foobar@1.0"
assert group["totals"]["session.unhandled_rate"] == 0.25
assert group["totals"]["session.unhandled_user_rate"] == 0.5
group = response.data["groups"][1]
assert group["by"]["release"] == "foobar@2.0"
assert group["totals"]["session.unhandled_rate"] == 0.75
assert group["totals"]["session.unhandled_user_rate"] == 0.0
def test_unhealthy_rate_sessions(self) -> None:
for tag_value, release_tag_value, value in (
("init", "foobar@1.0", 4),
("errored_preaggr", "foobar@1.0", 1),
("init", "foobar@2.0", 4),
):
self.store_release_health_metric(
name=SessionMRI.RAW_SESSION.value,
tags={"session.status": tag_value, "release": release_tag_value},
value=value,
)
# Unhealthy rate relies on RAW_ERROR metric
for tag_value, release_tag_value, value in (
("abnormal", "foobar@2.0", 1),
("unhandled", "foobar@2.0", 2),
("crashed", "foobar@2.0", 3),
):
self.store_release_health_metric(
name=SessionMRI.RAW_ERROR.value,
tags={"release": release_tag_value},
value=value,
)
response = self.get_success_response(
self.organization.slug,
field=[
"session.unhealthy_rate",
"sum(sentry.sessions.session)",
],
statsPeriod="1h",
interval="1h",
groupBy="release",
orderBy="-sum(sentry.sessions.session)",
)
group = response.data["groups"][0]
assert group["by"]["release"] == "foobar@1.0"
assert group["totals"]["session.unhealthy_rate"] == 0.25
group = response.data["groups"][1]
assert group["by"]["release"] == "foobar@2.0"
assert group["totals"]["session.unhealthy_rate"] == 0.75
| DerivedMetricsDataTest |
python | tensorflow__tensorflow | tensorflow/python/ops/collective_ops_test.py | {
"start": 1652,
"end": 24329
} | class ____(test.TestCase):
def setUp(self):
context._reset_context() # pylint: disable=protected-access
super(CollectiveOpTest, self).setUp()
def _testCollectiveReduce(self,
inputs,
expected,
set_graph_key,
communication_hint='auto',
fp16=False,
instance_key=1,
merge_op='Add',
final_op='Div',
timeout=0,
reported_group_size=None):
group_key = 1
group_size = len(inputs)
if reported_group_size is None:
reported_group_size = group_size
device_type = 'CPU'
config = config_pb2.ConfigProto(device_count={device_type: group_size})
devices = ['/{}:{}'.format(device_type, i) for i in range(group_size)]
with self.session(config=config) as sess:
colred = []
for i in range(group_size):
with ops.device(devices[i]):
tensor = constant_op.constant(inputs[i], dtype=(
dtypes.float16 if fp16 else dtypes.float32))
colred.append(
collective_ops.all_reduce(
tensor,
reported_group_size,
group_key,
instance_key,
merge_op,
final_op,
communication_hint=communication_hint,
timeout=timeout))
run_options = config_pb2.RunOptions()
if set_graph_key:
run_options.experimental.collective_graph_key = 1
results = sess.run(colred, options=run_options)
tolerance = 1e-3 if fp16 else 1e-5
for i in range(group_size):
logging.info('i {} result {} expected {}'.format(i, results[i], expected))
self.assertAllClose(results[i], expected, rtol=tolerance, atol=tolerance)
def _testMultipleConcurrentCollectiveReduce(self, t0, t1, expected):
group_key = 1
group_size = 2
num_instances = 2
all_reduces = []
config = config_pb2.ConfigProto(device_count={'CPU': group_size})
config.experimental.collective_deterministic_sequential_execution = True
with self.session(config=config) as sess:
for cpu in range(group_size):
with ops.device('/CPU:%d' % cpu):
in_tensor = constant_op.constant(t0 if cpu == 0 else t1)
for instance in range(num_instances):
all_reduces.append(collective_ops.all_reduce(
in_tensor, group_size, group_key, instance, 'Add', 'Div'))
results = sess.run(all_reduces)
for i in range(group_size * num_instances):
self.assertAllClose(results[i], expected, rtol=1e-5, atol=1e-5)
def testCollectiveReduce(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveReduce(
inputs=[[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]],
expected=[0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2],
set_graph_key=True)
def testCollectiveAutoGraphKey(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveReduce(
inputs=[[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]],
expected=[0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2],
set_graph_key=False)
def testFp16Reduce(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveReduce(
inputs=[[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]],
expected=[0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2],
set_graph_key=True,
fp16=True)
def testCollectiveMultipleConcurrentReduce(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testMultipleConcurrentCollectiveReduce(
[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3],
[0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2])
def testCollectiveTimeoutV1(self):
timeout = 4.5
kwargs = dict(
inputs=[[i + j + 0.1 for i in range(8)] for j in range(3)],
expected=[1 + i + 0.1 for i in range(8)],
set_graph_key=True,
timeout=timeout)
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveReduce(**kwargs)
start_time = time.time()
with ops.Graph().as_default():
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
self._testCollectiveReduce(
reported_group_size=len(kwargs['inputs']) + 1, **kwargs)
elapsed = time.time() - start_time
self.assertAllGreaterEqual(elapsed, timeout)
def testNcclHintFallbackToRingReduce(self):
"""Tests that setting `communication_hint=nccl` works on non-GPU builds."""
if kernels.get_registered_kernels_for_op('NcclAllReduce'):
self.skipTest('Run only on non-GPU environments')
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveReduce(
inputs=[[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]],
expected=[0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2],
set_graph_key=False,
communication_hint='nccl')
def _testWhile(self, num_vars, num_iterations, key_base):
group_size = 2
group_key = 1
instances = [(key_base + i) for i in range(num_vars)]
devices = ['CPU:{}'.format(i) for i in range(group_size)]
config = config_pb2.ConfigProto(device_count={'CPU': group_size})
rewrite_options = config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append('CollectiveReduce')
with self.session(config=config) as sess:
loop_vars = []
for device in devices:
with ops.device(device):
loop_vars.append(
[variable_v1.VariableV1((1 << i) * 1.) for i in range(num_vars)])
# This variable controls number of iterations.
loop_vars.append(variable_v1.VariableV1(0.))
def loop_body(dev0_tensors, dev1_tensors, loop_tensor):
return_ops = []
for i in range(len(devices)):
device = devices[i]
device_tensors = dev0_tensors if i == 0 else dev1_tensors
with ops.device(device):
device_collectives = []
for j in range(num_vars):
# NOTE(ayushd): we need the `cast` here to ensure that the input
# to `all_reduce` has an explicit device string. We don't use
# `identity` because `cast` is more resilient to getting optimized
# away by various optimization passes.
input_tensor = math_ops.cast(device_tensors[j], dtypes.float16)
collective_op = collective_ops.all_reduce(
input_tensor, group_size, group_key, instances[j],
'Add', 'Id')
output_tensor = math_ops.cast(collective_op, dtypes.float32)
device_collectives.append(output_tensor)
return_ops.append(device_collectives)
return_ops.append(math_ops.add(loop_tensor, 1.))
return return_ops
# Run until last variable exceeds number of iterations.
loop_cond = lambda d0, d1, i: math_ops.less(i, num_iterations)
sess.run(variables.global_variables_initializer())
results = sess.run(while_loop.while_loop(loop_cond, loop_body, loop_vars))
self.assertEqual(results[:-1], [
[((1 << (num_iterations + v)) * 1.) for v in range(num_vars)]
for _ in range(group_size)])
def testSimpleWhile(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testWhile(num_vars=1, num_iterations=4, key_base=20)
def testWhileMultipleAllReduce(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testWhile(num_vars=2, num_iterations=4, key_base=20)
def testWhileWithScopedAllocator(self):
group_size = 2
group_key = 1
instance_key0 = 1
instance_key1 = 2
config = config_pb2.ConfigProto(device_count={'CPU': group_size})
rewrite_options = config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append('CollectiveReduce')
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
with self.session(config=config) as sess:
run_ops = []
for i in range(group_size):
with ops.device('CPU:%d' % i):
constant = constant_op.constant(0.)
cond = lambda i: math_ops.less(i, 10.)
body = lambda i: math_ops.add(i, 1.)
input0 = while_loop.while_loop(cond, body, [constant])
input1 = math_ops.add(constant, 5)
colred0 = collective_ops.all_reduce(input0, group_size, group_key,
instance_key0, 'Add', 'Id')
colred1 = collective_ops.all_reduce(input1, group_size, group_key,
instance_key1, 'Add', 'Id')
run_ops.append(math_ops.add_n([colred0, colred1]))
results = sess.run(run_ops)
self.assertEqual(results, [30., 30.])
def testCollectiveReduceScalar(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveReduce(inputs=[0.1, 0.3], expected=0.2,
set_graph_key=True)
def testCollectiveReduceMaximum(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveReduce(
inputs=[[1., 20., 3., 40., 5.], [10., 2., 30., 4., 50.]],
expected=[10., 20., 30., 40., 50.],
set_graph_key=True,
instance_key=30,
merge_op='Max',
final_op='Id')
def testCollectiveReduceMinimum(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveReduce(
inputs=[[1., 20., 3., 40., 5.], [10., 2., 30., 4., 50.]],
expected=[1., 2., 3., 4., 5.],
set_graph_key=True,
instance_key=40,
merge_op='Min',
final_op='Id')
def _testCollectiveBroadcast(self, in_val):
group_key = 1
instance_key = 1
with self.session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})) as sess:
with ops.device('/CPU:0'):
in0 = constant_op.constant(in_val)
out0 = collective_ops.broadcast_send(in0, in0.shape, in0.dtype,
2, group_key, instance_key)
with ops.device('/CPU:1'):
c1 = constant_op.constant(in_val)
out1 = collective_ops.broadcast_recv(c1.shape, c1.dtype,
2, group_key, instance_key)
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 1
results = sess.run([out0, out1], options=run_options)
self.assertAllClose(results[0], in_val, rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], in_val, rtol=1e-5, atol=1e-5)
def testCollectiveBroadcast(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveBroadcast([0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1])
def testCollectiveBroadcastBool(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveBroadcast([True, False])
def _testCollectiveGather(self, t0, t1, expected, set_graph_key):
group_key = 1
instance_key = 1
with self.session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})) as sess:
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_gather(in0, 2, group_key, instance_key)
with ops.device('/CPU:1'):
in1 = constant_op.constant(t1)
c1 = collective_ops.all_gather(in1, 2, group_key, instance_key)
run_options = config_pb2.RunOptions()
if set_graph_key:
run_options.experimental.collective_graph_key = 1
results = sess.run([c0, c1], options=run_options)
self.assertAllClose(results[0], expected, rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], expected, rtol=1e-5, atol=1e-5)
def testCollectiveGather(self):
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
self._testCollectiveGather([0, 1, 2, 3, 4, 5, 6, 7],
[10, 11, 12, 13, 14, 15, 16, 17],
[0, 1, 2, 3, 4, 5, 6, 7,
10, 11, 12, 13, 14, 15, 16, 17],
True)
self._testCollectiveGather([[0, 1, 2, 3], [4, 5, 6, 7]],
[[10, 11, 12, 13], [14, 15, 16, 17]],
[[0, 1, 2, 3], [4, 5, 6, 7],
[10, 11, 12, 13], [14, 15, 16, 17]],
True)
self._testCollectiveGather([[[0, 1], [2, 3]], [[4, 5], [6, 7]]],
[[[10, 11], [12, 13]], [[14, 15], [16, 17]]],
[[[0, 1], [2, 3]], [[4, 5], [6, 7]],
[[10, 11], [12, 13]], [[14, 15], [16, 17]]],
True)
def testCollectiveGatherShapeMismatch(self):
group_key = 1
instance_key = 1
t0 = [1, 2, 3, 4]
t1 = [5, 6, 7, 8]
t2 = [9, 10]
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
with self.session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})) as sess:
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_gather(in0, 2, group_key, instance_key)
with ops.device('/CPU:1'):
in1 = constant_op.constant(t1)
in2 = constant_op.constant(t2)
c1 = collective_ops.all_gather(in1, 2, group_key, instance_key)
c2 = collective_ops.all_gather(in2, 2, group_key, instance_key)
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 1
sess.run([c0, c1], options=run_options)
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Shape mismatch'):
sess.run([c0, c2], options=run_options)
def testCollectiveGatherShapeMismatchAcrossDevices(self):
group_key = 1
instance_key = 1
t0 = [1, 2, 3, 4]
t1 = [5, 6]
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
with self.session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})) as sess:
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_gather(in0, 2, group_key, instance_key)
with ops.device('/CPU:1'):
in1 = constant_op.constant(t1)
c1 = collective_ops.all_gather(in1, 2, group_key, instance_key)
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 1
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Shape mismatch'):
sess.run([c0, c1], options=run_options)
def testCollectiveGatherPolymorphicShape(self):
t0 = [0, 1, 2, 3, 4, 5, 6, 7]
t1 = [10, 11, 12, 13, 14, 15, 16, 17]
group_size = 2
group_key = 1
instance_key = 123
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
with self.session(
config=config_pb2.ConfigProto(
device_count={'CPU': group_size})) as sess:
with ops.device('/CPU:0'):
in0 = array_ops.placeholder(dtype=dtypes.int32, shape=[None])
c0 = collective_ops.all_gather(in0, group_size, group_key,
instance_key)
with ops.device('/CPU:1'):
in1 = array_ops.placeholder(dtype=dtypes.int32, shape=[None])
c1 = collective_ops.all_gather(in1, group_size, group_key,
instance_key)
results = sess.run([c0, c1], feed_dict={in0: t0, in1: t1})
results_ = sess.run([c0, c1], feed_dict={in0: t0[1:], in1: t1[1:]})
expected_output = [0, 1, 2, 3, 4, 5, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17]
self.assertAllClose(results[0], expected_output, rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], expected_output, rtol=1e-5, atol=1e-5)
expected_output_ = [1, 2, 3, 4, 5, 6, 7, 11, 12, 13, 14, 15, 16, 17]
self.assertAllClose(results_[0], expected_output_, rtol=1e-5, atol=1e-5)
self.assertAllClose(results_[1], expected_output_, rtol=1e-5, atol=1e-5)
@test_util.run_v2_only
@test_util.disable_tfrt(
'b/177270918: TFRT has dead lock when executing collective ops.')
def testCollectiveGroupSizeMismatch(self):
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
context.ensure_initialized()
@test_util.run_v2_only
def testCollectiveGatherShapeCheckFailure(self):
with self.assertRaisesRegex(errors.InvalidArgumentError,
'input should have rank > 0'):
collective_ops.gen_collective_ops.CollectiveGather(
input=1,
group_size=1,
group_key=1,
instance_key=1,
shape=(3, 3, 3),
communication_hint='auto',
timeout_seconds=0,
name='')
@def_function.function
def run_all_reduce():
group_key = 10
instance_key = 20
t0 = [1, 2, 3, 4]
t1 = [5, 6, 7, 8]
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_reduce(
in0, group_size=2, group_key=group_key, instance_key=instance_key,
merge_op='Add', final_op='Id')
with ops.device('/CPU:1'):
in1 = constant_op.constant(t1)
c1 = collective_ops.all_reduce(
in1, group_size=3, group_key=group_key, instance_key=instance_key,
merge_op='Add', final_op='Id')
return c0, c1
with self.assertRaisesRegex(errors.InternalError,
'but that group has size'):
run_all_reduce()
@test_util.run_v2_only
def testCollectiveTensorsHaveNoDeviceSpecified(self):
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
context.ensure_initialized()
group_size = 2
group_key = 1
instance_key = 1
@def_function.function
def fn(all_args):
results = []
# The inputs have no devices set. This is expected to be a trace-time
# check only.
self.assertEqual(all_args[0].device, '')
self.assertEqual(all_args[1].device, '')
with ops.device('/CPU:0'):
results.append(
collective_ops.all_reduce(all_args[0], group_size, group_key,
instance_key, 'Add', 'Div'))
with ops.device('/CPU:1'):
results.append(
collective_ops.all_reduce(all_args[1], group_size, group_key,
instance_key, 'Add', 'Div'))
return results
with ops.device('/CPU:0'):
in0 = constant_op.constant(1)
with ops.device('/CPU:1'):
in1 = constant_op.constant(3)
result = fn([in0, in1])
self.assertAllClose(result, [2, 2])
def testConstantWithScopedAllocator(self):
group_size = 2
group_key = 1
instance_key1 = 1
instance_key2 = 2
graph_options = config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(do_constant_folding=True))
cfg = config_pb2.ConfigProto(device_count={'CPU': group_size},
graph_options=graph_options)
rewrite_options = cfg.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append('CollectiveReduce')
# Tests that execute collectives need to be enclosed in graph or tf.function
with ops.Graph().as_default():
with self.session(config=cfg) as sess:
run_ops = []
for i in range(group_size):
with ops.device('CPU:%d' % i):
constant = constant_op.constant(i + 1.)
input_tensor1 = array_ops.identity(constant)
input_tensor2 = array_ops.identity(constant)
reduced_tensor1 = collective_ops.all_reduce(
input_tensor1, group_size, group_key, instance_key1, 'Add',
'Id')
reduced_tensor2 = collective_ops.all_reduce(
input_tensor2, group_size, group_key, instance_key2, 'Add',
'Id')
run_ops.append(array_ops.identity(reduced_tensor1))
run_ops.append(array_ops.identity(reduced_tensor2))
results = sess.run(run_ops)
self.assertEqual(results, [3., 3., 3., 3.])
if __name__ == '__main__':
test.main()
| CollectiveOpTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/compiler.py | {
"start": 18260,
"end": 18685
} | class ____(FastIntFlag):
"""bitflag enum indicating styles of PK defaults
which can work as implicit sentinel columns
"""
NOT_SUPPORTED = 1
AUTOINCREMENT = 2
IDENTITY = 4
SEQUENCE = 8
ANY_AUTOINCREMENT = AUTOINCREMENT | IDENTITY | SEQUENCE
_SUPPORTED_OR_NOT = NOT_SUPPORTED | ANY_AUTOINCREMENT
USE_INSERT_FROM_SELECT = 16
RENDER_SELECT_COL_CASTS = 64
| InsertmanyvaluesSentinelOpts |
python | gevent__gevent | src/gevent/_config.py | {
"start": 10096,
"end": 10663
} | class ____(FloatSettingMixin, Setting):
document = True
name = 'threadpool_idle_task_timeout'
environment_key = 'GEVENT_THREADPOOL_IDLE_TASK_TIMEOUT'
desc = """\
How long threads in the default threadpool (used for
DNS by default) are allowed to be idle before exiting.
Use -1 for no timeout.
.. versionadded:: 22.08.0
"""
# This value is picked pretty much arbitrarily.
# We want to balance performance (keeping threads around)
# with memory/cpu usage (letting threads go).
default = 5.0
| ThreadpoolIdleTaskTimeout |
python | aimacode__aima-python | agents.py | {
"start": 24785,
"end": 26133
} | class ____(XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=10, height=10):
super().__init__(width, height)
self.add_walls()
def thing_classes(self):
return [Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent,
TableDrivenVacuumAgent, ModelBasedVacuumAgent]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else 'None')
return status, bump
def execute_action(self, agent, action):
agent.bump = False
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list[0]
agent.performance += 100
self.delete_thing(dirt)
else:
super().execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
| VacuumEnvironment |
python | sympy__sympy | sympy/physics/mechanics/tests/test_wrapping_geometry.py | {
"start": 9790,
"end": 15151
} | class ____:
@staticmethod
def test_valid_constructor():
N = ReferenceFrame('N')
alpha, apex, axis = Symbol('alpha'), Point('p0'), N.z
cone = WrappingCone(alpha, apex, axis)
assert isinstance(cone, WrappingCone)
assert hasattr(cone, 'alpha')
assert cone.alpha == alpha
assert hasattr(cone, 'apex')
assert cone.apex == apex
assert hasattr(cone, 'axis')
assert cone.axis == axis
@staticmethod
@pytest.mark.parametrize(
'position, expected',
[
(S.Zero, Eq(0, 0, evaluate=False)),
(N.x + N.y + N.z, Eq(2, Rational(1, 3), evaluate=False)),
(N.x + sqrt(3) * N.z, Eq(1, 1, evaluate=False)),
(N.y + sqrt(3) * N.z, Eq(1, 1, evaluate=False)),
((N.x + N.y) / sqrt(2) + sqrt(3) * N.z, Eq(1, 1, evaluate=False)),
(N.x / sqrt(3) + sqrt(2) * N.y / sqrt(3) + sqrt(3) * N.z, Eq(1, 1, evaluate=False)),
(2 * N.x + sqrt(12) * N.z, Eq(4, 4, evaluate=False)),
(2 * N.y + sqrt(12) * N.z, Eq(4, 4, evaluate=False)),
(5 * N.x + sqrt(12) * N.z, Eq(25, 4, evaluate=False)),
(sqrt(2) * (N.x + N.y) + sqrt(12) * N.z, Eq(4, 4, evaluate=False))
]
)
def test_point_on_surface(position, expected):
axis = N.z
apex = Point('p0')
alpha = pi/6
cone = WrappingCone(alpha, apex, axis)
p1 = Point('p1')
p1.set_pos(apex, position)
assert cone.point_on_surface(p1) == expected
@staticmethod
@pytest.mark.parametrize(
'axis, alpha, position_1, position_2, expected',
[
(N.z, pi/4, (N.x + N.z)/sqrt(2), (N.y + N.z)/sqrt(2), sqrt(2 - 2*cos(pi/(2*sqrt(2))))),
(N.z, pi/6, N.x/sqrt(3) + N.z, N.y/sqrt(3) + N.z, sqrt(Rational(8, 3) - 4*sqrt(2)/3)),
(N.z, pi/4, (N.x + N.z)/sqrt(2), (2*N.x + 2*N.z)/sqrt(2), 1),
(N.z, pi/4, (N.x + N.z)/sqrt(2), (-N.x + N.z)/sqrt(2), sqrt(2 - 2*cos(pi/sqrt(2)))),
(N.x, pi/3, (N.y + N.x)/2, (N.z + N.x)/2, sqrt(2 - 2*cos(pi*sqrt(3)/4))),
(N.z, pi/6, (N.x + N.z*sqrt(3))/2, (N.y + N.z*sqrt(3))/2, sqrt(2 - sqrt(2))),
(N.z, pi/3, (N.x*sqrt(3) + N.z)/2, (N.y*sqrt(3) + N.z)/2, sqrt(2 - 2*cos(pi*sqrt(3)/4))),
(N.z, pi/4, (N.x + N.z)/sqrt(11), (N.x + N.z)/sqrt(11), 0),
(N.z, pi/6, N.x/sqrt(3) + N.z, 2*N.y/sqrt(3) + 2*N.z, sqrt(Rational(20, 3) - 8*sqrt(2)/3)),
(N.z, pi/6, (N.x + N.y)/(sqrt(2)*sqrt(3)) + N.z, (3*N.x - 3*N.y)/(sqrt(2)*sqrt(3)) + 3*N.z, sqrt(Rational(40, 3) - 4*sqrt(2))),
(N.z, pi/4, (cos(pi/6)*N.x + sin(pi/6)*N.y + N.z)/sqrt(2), (3*cos(2*pi/3)*N.x + 3*sin(2*pi/3)*N.y + 3*N.z)/sqrt(2), sqrt(10 - 6*cos(pi/(2*sqrt(2))))),
(N.z, pi/4, (N.x + N.z)/sqrt(2), (2*N.y + 2*N.z)/sqrt(2), sqrt(5 - 4*cos(pi/(2*sqrt(2))))),
]
)
def test_geodesic_length(axis, alpha, position_1, position_2, expected):
apex = Point('p0')
cone = WrappingCone(alpha, apex, axis)
p1 = Point('p1')
p1.set_pos(apex, position_1)
p2 = Point('p2')
p2.set_pos(apex, position_2)
assert cone.geodesic_length(p1, p2) == expected
@staticmethod
@pytest.mark.parametrize(
'axis, alpha, position_1, position_2',
[
(N.z, pi/4, N.x + N.z, N.y + N.z),
(N.z, pi/6, N.x/sqrt(3) + N.z, 2*N.x/sqrt(3) + 2*N.z),
(N.x, pi/4, N.y + N.x, N.z + 2*N.x),
(N.z, pi/6, N.x/sqrt(3) + N.z, 2*N.y/sqrt(3) + 2*N.z),
]
)
def test_geodesic_end_vectors(axis, alpha, position_1, position_2):
apex = Point('p0')
cone = WrappingCone(alpha, apex, axis)
p1 = Point('p1'); p1.set_pos(apex, position_1)
p2 = Point('p2'); p2.set_pos(apex, position_2)
v1_calc, v2_calc = cone.geodesic_end_vectors(p1, p2)
pos1 = p1.pos_from(apex)
pos2 = p2.pos_from(apex)
z1 = pos1.dot(axis)
z2 = pos2.dot(axis)
s1 = z1 / cos(alpha)
s2 = z2 / cos(alpha)
L = cone.geodesic_length(p1, p2)
n1 = (pos1 - z1*axis).normalize()
n2 = (pos2 - z2*axis).normalize()
central = acos(cancel(n1.dot(n2)))
delta_u = central * sin(alpha)
g1 = pos1.normalize()
c1 = axis.cross(n1)
g2 = pos2.normalize()
c2 = axis.cross(n2)
v1_radial_comp = (s2 * cos(delta_u) - s1) / L
v1_circ_comp = (s2 * sin(delta_u)) / L
expected_v1 = v1_radial_comp * g1 + v1_circ_comp * c1
v2_radial_comp = (s1 * cos(delta_u) - s2) / L
v2_circ_comp = (-s1 * sin(delta_u)) / L
expected_v2 = v2_radial_comp * g2 + v2_circ_comp * c2
assert v1_calc == expected_v1
assert v2_calc == expected_v2
@staticmethod
@pytest.mark.parametrize(
'position',
[
(N.x + N.z),
(N.y/sqrt(3) + 2*N.z),
]
)
def test_geodesic_end_vectors_invalid_coincident(position):
apex = Point('p0')
cone = WrappingCone(pi/6, apex, N.z)
p1 = Point('p1')
p1.set_pos(apex, position)
with pytest.raises(ValueError,
match='No unique geodesic exists for coincident points'):
cone.geodesic_end_vectors(p1, p1)
| TestWrappingCone |
python | getsentry__sentry | tests/sentry/integrations/slack/test_link_team.py | {
"start": 8124,
"end": 13484
} | class ____(SlackIntegrationLinkTeamTestBase):
def setUp(self) -> None:
super().setUp()
self.link_team()
self.url = build_team_unlinking_url(
integration=self.integration,
organization_id=self.organization.id,
slack_id=self.external_id,
channel_id=self.channel_id,
channel_name=self.channel_name,
response_url=self.response_url,
)
@pytest.fixture(autouse=True)
def mock_chat_postMessage(self):
with patch(
"slack_sdk.web.WebClient.chat_postMessage",
return_value=SlackResponse(
client=None,
http_verb="POST",
api_url="https://slack.com/api/chat.postMessage",
req_args={},
data={"ok": True},
headers={},
status_code=200,
),
) as self.mock_post:
yield
def test_unlink_team(self) -> None:
"""Test that a team can be unlinked from a Slack channel"""
response = self.get_success_response()
self.assertTemplateUsed(response, "sentry/integrations/slack/unlink-team.html")
response = self.get_success_response(data={})
self.assertTemplateUsed(response, "sentry/integrations/slack/unlinked-team.html")
external_actors = self.get_linked_teams()
assert len(external_actors) == 0
assert self.mock_post.call_count == 1
text = self.mock_post.call_args.kwargs["text"]
assert (
f"This channel will no longer receive issue alert notifications for the {self.team.slug} team."
in text
)
with assume_test_silo_mode(SiloMode.CONTROL):
team_settings = NotificationSettingProvider.objects.filter(team_id=self.team.id)
assert len(team_settings) == 0
def test_unlink_team_valid_through_team_admin(self) -> None:
"""Test that a team can be unlinked from a Slack channel as a valid team admin"""
self._create_user_valid_through_team_admin()
self.test_unlink_team()
def test_unlink_multiple_teams(self) -> None:
"""
Test that if you have linked multiple teams to a single channel, when
you type `/sentry unlink team`, we unlink all teams from that channel.
This should only apply to the one organization who did this before we
blocked users from doing so.
"""
team2 = self.create_team(organization=self.organization, name="Team Hellboy")
self.link_team(team2)
external_actors = self.get_linked_teams([self.team.id, team2.id])
assert len(external_actors) == 2
response = self.get_success_response()
self.assertTemplateUsed(response, "sentry/integrations/slack/unlink-team.html")
response = self.get_success_response(data={})
self.assertTemplateUsed(response, "sentry/integrations/slack/unlinked-team.html")
external_actors = self.get_linked_teams([self.team.id, team2.id])
assert len(external_actors) == 0
assert self.mock_post.call_count >= 1
text = self.mock_post.call_args_list[0].kwargs["text"]
assert (
f"This channel will no longer receive issue alert notifications for the {self.team.slug} team."
in text
)
with assume_test_silo_mode(SiloMode.CONTROL):
team_settings = NotificationSettingProvider.objects.filter(team_id=self.team.id)
assert len(team_settings) == 0
def test_unlink_team_multiple_organizations(self) -> None:
# Create another organization and team for this user that is linked through `self.integration`.
organization2 = self.create_organization(owner=self.user)
team2 = self.create_team(organization=organization2, members=[self.user])
with assume_test_silo_mode(SiloMode.CONTROL):
self.create_organization_integration(
organization_id=organization2.id, integration=self.integration
)
self.link_team(team2)
# Team order should not matter.
for team in (self.team, team2):
external_actors = self.get_linked_teams(
organization=team.organization, team_ids=[team.id]
)
assert len(external_actors) == 1
# Override the URL.
self.url = build_team_unlinking_url(
integration=self.integration,
organization_id=team.organization.id,
slack_id=self.external_id,
channel_id=self.channel_id,
channel_name=self.channel_name,
response_url=self.response_url,
)
response = self.get_success_response(data={})
self.assertTemplateUsed(response, "sentry/integrations/slack/unlinked-team.html")
external_actors = self.get_linked_teams(
organization=team.organization, team_ids=[team.id]
)
assert len(external_actors) == 0
def test_unlink_team_invalid_method(self) -> None:
"""Test for an invalid method response"""
response = self.client.put(self.url, content_type="application/x-www-form-urlencoded")
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
| SlackIntegrationUnlinkTeamTestWithSdk |
python | jazzband__django-waffle | waffle/testutils.py | {
"start": 304,
"end": 1079
} | class ____(TestContextDecorator, Generic[_T]):
def __init__(self, name: str, active: _T):
super().__init__()
self.name = name
self.active = active
def get(self) -> None:
self.obj, self.created = self.cls.objects.get_or_create(name=self.name)
def update(self, active: _T) -> None:
raise NotImplementedError
def get_value(self) -> _T:
raise NotImplementedError
def enable(self) -> None:
self.get()
self.old_value = self.get_value()
if self.old_value != self.active:
self.update(self.active)
def disable(self) -> None:
if self.created:
self.obj.delete()
self.obj.flush()
else:
self.update(self.old_value)
| _overrider |
python | pytorch__pytorch | test/mobile/lightweight_dispatch/tests_setup.py | {
"start": 3425,
"end": 4547
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.ops = torch.nn.Sequential(
torch.nn.ReLU(),
torch.nn.Flatten(),
)
def forward(self, x):
x[1] = -2
return self.ops(x)
if __name__ == "__main__":
command = sys.argv[1]
ops_yaml = sys.argv[2]
backup = ops_yaml + ".bak"
if command == "setup":
tests = [
ModelWithDTypeDeviceLayoutPinMemory(),
ModelWithTensorOptional(),
ModelWithScalarList(),
ModelWithFloatList(),
ModelWithListOfOptionalTensors(),
ModelWithArrayOfInt(),
ModelWithTensors(),
ModelWithStringOptional(),
ModelWithMultipleOps(),
]
shutil.copyfile(ops_yaml, backup)
with open(ops_yaml, "a") as f:
for op in _OPERATORS:
f.write(f"- {op}\n")
elif command == "shutdown":
for file in _MODELS:
if os.path.isfile(file):
os.remove(file)
shutil.move(backup, ops_yaml)
| ModelWithMultipleOps |
python | tensorflow__tensorflow | tensorflow/python/ops/linalg/linear_operator_addition.py | {
"start": 8293,
"end": 9816
} | class ____(metaclass=abc.ABCMeta):
"""Abstract base class to add two operators.
Each `Adder` acts independently, adding everything it can, paying no attention
as to whether another `Adder` could have done the addition more efficiently.
"""
@property
def name(self):
return self.__class__.__name__
@abc.abstractmethod
def can_add(self, op1, op2):
"""Returns `True` if this `Adder` can add `op1` and `op2`. Else `False`."""
pass
@abc.abstractmethod
def _add(self, op1, op2, operator_name, hints):
# Derived classes can assume op1 and op2 have been validated, e.g. they have
# the same dtype, and their domain/range dimensions match.
pass
def add(self, op1, op2, operator_name, hints=None):
"""Return new `LinearOperator` acting like `op1 + op2`.
Args:
op1: `LinearOperator`
op2: `LinearOperator`, with `shape` and `dtype` such that adding to
`op1` is allowed.
operator_name: `String` name to give to returned `LinearOperator`
hints: `_Hints` object. Returned `LinearOperator` will be created with
these hints.
Returns:
`LinearOperator`
"""
updated_hints = _infer_hints_allowing_override(op1, op2, hints)
if operator_name is None:
operator_name = "Add/" + op1.name + "__" + op2.name + "/"
scope_name = self.name
if scope_name.startswith("_"):
scope_name = scope_name[1:]
with ops.name_scope(scope_name):
return self._add(op1, op2, operator_name, updated_hints)
| _Adder |
python | python-visualization__folium | folium/map.py | {
"start": 18804,
"end": 20432
} | class ____(MacroElement):
"""
Create a tooltip that shows text when hovering over its parent object.
Parameters
----------
text: str
String to display as a tooltip on the object. If the argument is of a
different type it will be converted to str.
style: str, default None.
HTML inline style properties like font and colors. Will be applied to
a div with the text in it.
sticky: bool, default True
Whether the tooltip should follow the mouse.
**kwargs
These values will map directly to the Leaflet Options. More info
available here: https://leafletjs.com/reference.html#tooltip
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
{{ this._parent.get_name() }}.bindTooltip(
`<div{% if this.style %} style={{ this.style|tojson }}{% endif %}>
{{ this.text }}
</div>`,
{{ this.options|tojavascript }}
);
{% endmacro %}
"""
)
def __init__(
self,
text: str,
style: Optional[str] = None,
sticky: bool = True,
**kwargs: TypeJsonValue,
):
super().__init__()
self._name = "Tooltip"
self.text = str(text)
kwargs.update({"sticky": sticky})
self.options = remove_empty(**kwargs)
if style:
assert isinstance(
style, str
), "Pass a valid inline HTML style property string to style."
# noqa outside of type checking.
self.style = style
| Tooltip |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/unions3.py | {
"start": 688,
"end": 891
} | class ____(metaclass=Metaclass2):
pass
def requires_class_with_meta1(val: Type[ClassWithMeta1]):
pass
MetaOr1 = ClassWithMeta1 | ClassWithNoMeta1
requires_class_with_meta1(MetaOr1)
| ClassWithMeta2 |
python | getsentry__sentry | src/sentry/replays/lib/eap/snuba_transpiler.py | {
"start": 31428,
"end": 31561
} | class ____(TypedDict):
downsampling_mode: QueryResultMetaDownsamplingMode
next_offset: int
request_id: str
| QueryResultMeta |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassHash1.py | {
"start": 591,
"end": 712
} | class ____:
a: int
def __hash__(self) -> int:
return 0
v6: Hashable = DC6(0)
@dataclass(frozen=True)
| DC6 |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/_util_cy.py | {
"start": 2217,
"end": 3865
} | class ____(Dict[_AM_KEY, _AM_VALUE]):
"""A map that creates new keys for missing key access.
Produces an incrementing sequence given a series of unique keys.
This is similar to the compiler prefix_anon_map class although simpler.
Inlines the approach taken by :class:`sqlalchemy.util.PopulateDict` which
is otherwise usually used for this type of operation.
"""
if cython.compiled:
_index: cython.uint
def __cinit__(self): # type: ignore[no-untyped-def]
self._index = 0
else:
_index: int = 0 # type: ignore[no-redef]
@cython.cfunc
@cython.inline
def _add_missing(self: anon_map, key: _AM_KEY, /) -> int:
val: int = self._index
self._index += 1
self_dict: dict = self # type: ignore[type-arg]
self_dict[key] = val
return val
def get_anon(self: anon_map, obj: object, /) -> Tuple[int, bool]:
self_dict: dict = self # type: ignore[type-arg]
idself: int = _get_id(obj)
if idself in self_dict:
return self_dict[idself], True
else:
return self._add_missing(idself), False
if cython.compiled:
def __getitem__(self: anon_map, key: _AM_KEY, /) -> _AM_VALUE:
self_dict: dict = self # type: ignore[type-arg]
if key in self_dict:
return self_dict[key] # type:ignore[no-any-return]
else:
return self._add_missing(key) # type:ignore[no-any-return]
def __missing__(self: anon_map, key: _AM_KEY, /) -> int:
return self._add_missing(key) # type:ignore[no-any-return]
| anon_map |
python | numba__numba | numba/tests/test_ufuncs.py | {
"start": 63866,
"end": 68152
} | class ____(_LoopTypesTester):
_ufuncs = supported_ufuncs[:]
_ufuncs.remove(np.divmod) # not implemented yet
# NOTE: the full list of ufuncs supporting datetime64 and timedelta64
# types in Numpy is:
# ['absolute', 'add', 'divide', 'equal', 'floor_divide', 'fmax', 'fmin',
# 'greater', 'greater_equal', 'less', 'less_equal', 'maximum',
# 'minimum', 'multiply', 'negative', 'not_equal', 'sign', 'subtract',
# 'true_divide']
# Test datetime64 and timedelta64 types.
_required_types = 'mM'
# Test various units combinations (TestLoopTypes is only able to test
# homogeneous units).
def test_add(self):
ufunc = np.add
fn = _make_ufunc_usecase(ufunc)
# heterogeneous inputs
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'm8[m]', 'm8[s]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', 'm8[s]'])
# heterogeneous inputs, scaled output
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'm8[m]', 'm8[ms]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', 'm8[ms]'])
# Cannot upscale result (Numpy would accept this)
with self.assertRaises(LoweringError):
self._check_ufunc_with_dtypes(fn, ufunc,
['m8[m]', 'm8[s]', 'm8[m]'])
def test_subtract(self):
ufunc = np.subtract
fn = _make_ufunc_usecase(ufunc)
# heterogeneous inputs
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[s]', 'M8[m]', 'm8[s]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[m]', 'M8[s]', 'm8[s]'])
# heterogeneous inputs, scaled output
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[s]', 'M8[m]', 'm8[ms]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[m]', 'M8[s]', 'm8[ms]'])
# Cannot upscale result (Numpy would accept this)
with self.assertRaises(LoweringError):
self._check_ufunc_with_dtypes(fn, ufunc,
['M8[m]', 'M8[s]', 'm8[m]'])
def test_multiply(self):
ufunc = np.multiply
fn = _make_ufunc_usecase(ufunc)
# scaled output
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'q', 'm8[us]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['q', 'm8[s]', 'm8[us]'])
# Cannot upscale result (Numpy would accept this)
with self.assertRaises(LoweringError):
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'q', 'm8[m]'])
def test_true_divide(self):
ufunc = np.true_divide
fn = _make_ufunc_usecase(ufunc)
# heterogeneous inputs
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', 'd'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'm8[m]', 'd'])
# scaled output
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'q', 'm8[s]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'd', 'm8[s]'])
# Cannot upscale result (Numpy would accept this)
with self.assertRaises(LoweringError):
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'q', 'm8[m]'])
def test_floor_divide(self):
ufunc = np.floor_divide
fn = _make_ufunc_usecase(ufunc)
# scaled output
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'q', 'm8[s]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'd', 'm8[s]'])
# Cannot upscale result (Numpy would accept this)
with self.assertRaises(LoweringError):
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'q', 'm8[m]'])
def _check_comparison(self, ufunc):
fn = _make_ufunc_usecase(ufunc)
# timedelta
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', '?'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'm8[m]', '?'])
# datetime
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[m]', 'M8[s]', '?'])
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[s]', 'M8[m]', '?'])
def test_comparisons(self):
for ufunc in [np.equal, np.not_equal, np.less, np.less_equal,
np.greater, np.greater_equal]:
self._check_comparison(ufunc)
TestLoopTypesDatetime.autogenerate()
| TestLoopTypesDatetime |
python | scrapy__scrapy | tests/test_spidermiddleware_output_chain.py | {
"start": 418,
"end": 797
} | class ____(_BaseSpiderMiddleware):
def process_spider_exception(self, response, exception):
self.crawler.spider.logger.info(
"Middleware: %s exception caught", exception.__class__.__name__
)
# ================================================================================
# (0) recover from an exception on a spider callback
| LogExceptionMiddleware |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/llama_index/vector_stores/bigquery/base.py | {
"start": 1016,
"end": 1173
} | class ____(BaseModel):
node_id: str
embedding: List[float]
text: str
metadata: Dict[str, Any]
distance: Optional[float] = None
| _BigQueryRow |
python | sqlalchemy__sqlalchemy | test/orm/test_composites.py | {
"start": 41376,
"end": 49108
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"edge",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("x1", Integer),
Column("y1", Integer),
Column("x2", Integer),
Column("y2", Integer),
)
@classmethod
def setup_mappers(cls):
class Point(cls.Comparable):
def __init__(self, x, y):
self.x = x
self.y = y
def __composite_values__(self):
return [self.x, self.y]
def __eq__(self, other):
return (
isinstance(other, Point)
and other.x == self.x
and other.y == self.y
)
def __ne__(self, other):
return not isinstance(other, Point) or not self.__eq__(other)
class Edge(cls.Comparable):
pass
def _test_roundtrip(self, *, assert_deferred=False, options=()):
Edge, Point = self.classes.Edge, self.classes.Point
e1 = Edge(start=Point(3, 4), end=Point(5, 6))
sess = fixture_session()
sess.add(e1)
sess.commit()
stmt = select(Edge)
if options:
stmt = stmt.options(*options)
e1 = sess.execute(stmt).scalar_one()
names = ["start", "end", "x1", "x2", "y1", "y2"]
for name in names:
if assert_deferred:
assert name not in e1.__dict__
else:
assert name in e1.__dict__
eq_(e1, Edge(start=Point(3, 4), end=Point(5, 6)))
def test_columns(self):
edge, Edge, Point = (
self.tables.edge,
self.classes.Edge,
self.classes.Point,
)
self.mapper_registry.map_imperatively(
Edge,
edge,
properties={
"start": sa.orm.composite(Point, edge.c.x1, edge.c.y1),
"end": sa.orm.composite(Point, edge.c.x2, edge.c.y2),
},
)
self._test_roundtrip()
def test_attributes(self):
edge, Edge, Point = (
self.tables.edge,
self.classes.Edge,
self.classes.Point,
)
m = self.mapper_registry.map_imperatively(Edge, edge)
m.add_property("start", sa.orm.composite(Point, Edge.x1, Edge.y1))
m.add_property("end", sa.orm.composite(Point, Edge.x2, Edge.y2))
self._test_roundtrip()
def test_strings(self):
edge, Edge, Point = (
self.tables.edge,
self.classes.Edge,
self.classes.Point,
)
m = self.mapper_registry.map_imperatively(Edge, edge)
m.add_property("start", sa.orm.composite(Point, "x1", "y1"))
m.add_property("end", sa.orm.composite(Point, "x2", "y2"))
self._test_roundtrip()
def test_deferred_config(self):
edge, Edge, Point = (
self.tables.edge,
self.classes.Edge,
self.classes.Point,
)
self.mapper_registry.map_imperatively(
Edge,
edge,
properties={
"start": sa.orm.composite(
Point, edge.c.x1, edge.c.y1, deferred=True, group="s"
),
"end": sa.orm.composite(
Point, edge.c.x2, edge.c.y2, deferred=True
),
},
)
self._test_roundtrip(assert_deferred=True)
def test_defer_option_on_cols(self):
edge, Edge, Point = (
self.tables.edge,
self.classes.Edge,
self.classes.Point,
)
self.mapper_registry.map_imperatively(
Edge,
edge,
properties={
"start": sa.orm.composite(
Point,
edge.c.x1,
edge.c.y1,
),
"end": sa.orm.composite(
Point,
edge.c.x2,
edge.c.y2,
),
},
)
self._test_roundtrip(
assert_deferred=True,
options=(
defer(Edge.x1),
defer(Edge.x2),
defer(Edge.y1),
defer(Edge.y2),
),
)
def test_defer_option_on_composite(self):
edge, Edge, Point = (
self.tables.edge,
self.classes.Edge,
self.classes.Point,
)
self.mapper_registry.map_imperatively(
Edge,
edge,
properties={
"start": sa.orm.composite(
Point,
edge.c.x1,
edge.c.y1,
),
"end": sa.orm.composite(
Point,
edge.c.x2,
edge.c.y2,
),
},
)
self._test_roundtrip(
assert_deferred=True, options=(defer(Edge.start), defer(Edge.end))
)
@testing.variation("composite_only", [True, False])
def test_load_only_option_on_composite(self, composite_only):
edge, Edge, Point = (
self.tables.edge,
self.classes.Edge,
self.classes.Point,
)
self.mapper_registry.map_imperatively(
Edge,
edge,
properties={
"start": sa.orm.composite(
Point, edge.c.x1, edge.c.y1, deferred=True
),
"end": sa.orm.composite(
Point,
edge.c.x2,
edge.c.y2,
),
},
)
if composite_only:
self._test_roundtrip(
assert_deferred=False,
options=(load_only(Edge.start, Edge.end),),
)
else:
self._test_roundtrip(
assert_deferred=False,
options=(load_only(Edge.start, Edge.x2, Edge.y2),),
)
def test_defer_option_on_composite_via_group(self):
edge, Edge, Point = (
self.tables.edge,
self.classes.Edge,
self.classes.Point,
)
self.mapper_registry.map_imperatively(
Edge,
edge,
properties={
"start": sa.orm.composite(
Point, edge.c.x1, edge.c.y1, deferred=True, group="s"
),
"end": sa.orm.composite(
Point, edge.c.x2, edge.c.y2, deferred=True
),
},
)
self._test_roundtrip(
assert_deferred=False,
options=(undefer_group("s"), undefer(Edge.end)),
)
def test_check_prop_type(self):
edge, Edge, Point = (
self.tables.edge,
self.classes.Edge,
self.classes.Point,
)
self.mapper_registry.map_imperatively(
Edge,
edge,
properties={
"start": sa.orm.composite(Point, (edge.c.x1,), edge.c.y1)
},
)
assert_raises_message(
sa.exc.ArgumentError,
# note that we also are checking that the tuple
# renders here, so the "%" operator in the string needs to
# apply the tuple also
r"Composite expects Column objects or mapped "
r"attributes/attribute names as "
r"arguments, got: \(Column",
configure_mappers,
)
| ConfigAndDeferralTest |
python | scikit-learn__scikit-learn | sklearn/_loss/loss.py | {
"start": 23005,
"end": 25516
} | class ____(BaseLoss):
"""Huber loss, for regression.
Domain:
y_true and y_pred all real numbers
quantile in (0, 1)
Link:
y_pred = raw_prediction
For a given sample x_i, the Huber loss is defined as::
loss(x_i) = 1/2 * abserr**2 if abserr <= delta
delta * (abserr - delta/2) if abserr > delta
abserr = |y_true_i - raw_prediction_i|
delta = quantile(abserr, self.quantile)
Note: HuberLoss(quantile=1) equals HalfSquaredError and HuberLoss(quantile=0)
equals delta * (AbsoluteError() - delta/2).
Additional Attributes
---------------------
quantile : float
The quantile level which defines the breaking point `delta` to distinguish
between absolute error and squared error. Must be in range (0, 1).
Reference
---------
.. [1] Friedman, J.H. (2001). :doi:`Greedy function approximation: A gradient
boosting machine <10.1214/aos/1013203451>`.
Annals of Statistics, 29, 1189-1232.
"""
differentiable = False
need_update_leaves_values = True
def __init__(self, sample_weight=None, quantile=0.9, delta=0.5):
check_scalar(
quantile,
"quantile",
target_type=numbers.Real,
min_val=0,
max_val=1,
include_boundaries="neither",
)
self.quantile = quantile # This is better stored outside of Cython.
super().__init__(
closs=CyHuberLoss(delta=float(delta)),
link=IdentityLink(),
)
self.approx_hessian = True
self.constant_hessian = False
def fit_intercept_only(self, y_true, sample_weight=None):
"""Compute raw_prediction of an intercept-only model.
This is the weighted median of the target, i.e. over the samples
axis=0.
"""
# See formula before algo 4 in Friedman (2001), but we apply it to y_true,
# not to the residual y_true - raw_prediction. An estimator like
# HistGradientBoostingRegressor might then call it on the residual, e.g.
# fit_intercept_only(y_true - raw_prediction).
if sample_weight is None:
median = np.percentile(y_true, 50, axis=0)
else:
median = _weighted_percentile(y_true, sample_weight, 50)
diff = y_true - median
term = np.sign(diff) * np.minimum(self.closs.delta, np.abs(diff))
return median + np.average(term, weights=sample_weight)
| HuberLoss |
python | django__django | tests/backends/models.py | {
"start": 3911,
"end": 4122
} | class ____(models.Model):
key = models.CharField(max_length=3, unique=True)
obj = models.ForeignKey("CircularA", models.SET_NULL, null=True)
def natural_key(self):
return (self.key,)
| CircularB |
python | boto__boto3 | tests/unit/resources/test_factory.py | {
"start": 24844,
"end": 27638
} | class ____(BaseTestResourceFactory):
def setUp(self):
super().setUp()
self.model = {
'has': {
'QueueObject': {
'resource': {
'type': 'Queue',
'identifiers': [{'target': 'Url', 'source': 'input'}],
}
},
'PriorityQueue': {
'resource': {
'type': 'Queue',
'identifiers': [{'target': 'Url', 'source': 'input'}],
}
},
}
}
self.defs = {
'Queue': {'identifiers': [{'name': 'Url'}]},
'Message': {
'identifiers': [
{'name': 'QueueUrl'},
{'name': 'ReceiptHandle'},
]
},
}
def test_subresource_custom_name(self):
resource = self.load('test', self.model, self.defs)()
assert hasattr(resource, 'QueueObject')
def test_contains_all_subresources(self):
resource = self.load('test', self.model, self.defs)()
assert 'QueueObject' in dir(resource)
assert 'PriorityQueue' in dir(resource)
assert 'Message' in dir(resource)
def test_get_available_subresources(self):
resource = self.load('test', self.model, self.defs)()
assert hasattr(resource, 'get_available_subresources')
subresources = sorted(resource.get_available_subresources())
expected = sorted(['PriorityQueue', 'Message', 'QueueObject'])
assert subresources == expected
def test_subresource_missing_all_subresources(self):
resource = self.load('test', self.model, self.defs)()
message = resource.Message('url', 'handle')
assert 'QueueObject' not in dir(message)
assert 'PriorityQueue' not in dir(message)
assert 'Queue' not in dir(message)
assert 'Message' not in dir(message)
def test_event_emitted_when_class_created(self):
self.load('test', self.model, self.defs)
assert self.emitter.emit.called
call_args = self.emitter.emit.call_args
# Verify the correct event name emitted.
assert (
call_args[0][0] == 'creating-resource-class.test.ServiceResource'
)
# Verify we send out the class attributes dict.
actual_class_attrs = sorted(call_args[1]['class_attributes'])
assert actual_class_attrs == [
'Message',
'PriorityQueue',
'QueueObject',
'get_available_subresources',
'meta',
]
base_classes = sorted(call_args[1]['base_classes'])
assert base_classes == [ServiceResource]
| TestServiceResourceSubresources |
python | tensorflow__tensorflow | tensorflow/lite/python/lite.py | {
"start": 107101,
"end": 112468
} | class ____(TFLiteConverterBaseV1):
"""Converts the given SavedModel into TensorFlow Lite model."""
def __init__(
self,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None,
custom_objects=None,
):
"""Constructor for TFLiteConverter.
Args:
model_file: Full filepath of HDF5 file containing the tf.keras model.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
custom_objects: Dict mapping names (strings) to custom classes or
functions to be considered during model deserialization. (default None)
Raises:
ValueError: Invalid arguments.
"""
super(TFLiteKerasModelConverter, self).__init__(
experimental_debug_info_func=None
)
# Handles Keras when Eager mode is enabled.
if context.executing_eagerly():
if input_arrays or output_arrays:
raise ValueError(
"`input_arrays` and `output_arrays` are unsupported "
"with Eager mode. If your model requires any of these "
"parameters, please use disable_eager_execution()."
)
keras_model = keras_deps.get_load_model_function()(
model_file, custom_objects
)
function = _trace_model_call(keras_model)
concrete_func = function.get_concrete_function()
frozen_func = _convert_to_constants.convert_variables_to_constants_v2(
concrete_func, lower_control_flow=False
)
_set_tensor_shapes(frozen_func.inputs, input_shapes)
self._keras_model = keras_model
self._graph_def = frozen_func.graph.as_graph_def()
self._input_tensors = frozen_func.inputs
self._output_tensors = frozen_func.outputs
self._debug_info_func = _build_debug_info_func(frozen_func.graph)
return
# Handles Keras when Eager mode is disabled.
keras_deps.get_clear_session_function()()
keras_model = keras_deps.get_load_model_function()(
model_file, custom_objects
)
sess = keras_deps.get_get_session_function()()
# Get input and output tensors.
if input_arrays:
input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays)
else:
input_tensors = keras_model.inputs
if output_arrays:
output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays)
else:
output_tensors = keras_model.outputs
_set_tensor_shapes(input_tensors, input_shapes)
graph_def = _freeze_graph(sess, input_tensors, output_tensors)
self._keras_model = keras_model
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
self._debug_info_func = _build_debug_info_func(sess.graph)
@convert_phase(Component.PREPARE_TF_MODEL, SubComponent.FREEZE_KERAS_MODEL)
def _freeze_keras_model(self, output_dir):
"""Save Keras model to Saved Model format.
Args:
output_dir: The output directory to save the SavedModel.
"""
try:
self._keras_model.save(output_dir, save_format="tf")
except Exception: # pylint: disable=broad-except
# When storing the given keras model to a saved model is failed, let's
# use original keras model conversion pipeline.
return None
tag_set = set([_tag_constants.SERVING])
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
graph_def, input_tensors, output_tensors, sess_graph = _freeze_saved_model(
output_dir, None, None, None, tag_set, signature_key
)
self.saved_model_dir = output_dir
self._saved_model_tags = tag_set
self._saved_model_exported_names = [signature_key]
self._parse_saved_model_args()
if self.saved_model_dir:
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
self._debug_info_func = _build_debug_info_func(sess_graph)
def _convert_as_saved_model(self):
"""Converts a Keras model as a saved model.
Returns:
The converted data in serialized format.
"""
temp_dir = tempfile.mkdtemp()
try:
self._freeze_keras_model(temp_dir)
if self.saved_model_dir:
return super(TFLiteKerasModelConverter, self).convert()
finally:
shutil.rmtree(temp_dir, True)
@_export_metrics
def convert(self):
"""Converts a Keras model based on instance variables.
Returns:
The converted data in serialized format, either a TFLite Flatbuffer or
a Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
saved_model_convert_result = self._convert_as_saved_model()
if saved_model_convert_result:
return saved_model_convert_result
return super(TFLiteKerasModelConverter, self).convert()
| TFLiteKerasModelConverter |
python | crytic__slither | slither/printers/summary/human_summary.py | {
"start": 796,
"end": 14947
} | class ____(AbstractPrinter):
ARGUMENT = "human-summary"
HELP = "Print a human-readable summary of the contracts"
WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#human-summary"
@staticmethod
def _get_summary_erc20(contract):
functions_name = [f.name for f in contract.functions]
state_variables = [v.name for v in contract.state_variables]
pause = "pause" in functions_name
if "mint" in functions_name:
if "mintingFinished" in state_variables:
mint_unlimited = False
else:
mint_unlimited = True
else:
mint_unlimited = None # no minting
race_condition_mitigated = (
"increaseApproval" in functions_name or "safeIncreaseAllowance" in functions_name
)
return pause, mint_unlimited, race_condition_mitigated
def get_summary_erc20(self, contract):
txt = ""
pause, mint_unlimited, race_condition_mitigated = self._get_summary_erc20(contract)
if pause:
txt += yellow("Pausable") + "\n"
if mint_unlimited is None:
txt += green("No Minting") + "\n"
else:
if mint_unlimited:
txt += red("∞ Minting") + "\n"
else:
txt += yellow("Minting") + "\n"
if not race_condition_mitigated:
txt += red("Approve Race Cond.") + "\n"
return txt
def _get_detectors_result(self) -> Tuple[List[Dict], int, int, int, int, int]:
# disable detectors logger
logger = logging.getLogger("Detectors")
logger.setLevel(logging.ERROR)
checks_optimization = self.slither.detectors_optimization
checks_informational = self.slither.detectors_informational
checks_low = self.slither.detectors_low
checks_medium = self.slither.detectors_medium
checks_high = self.slither.detectors_high
issues_optimization = [c.detect() for c in checks_optimization]
issues_optimization = [c for c in issues_optimization if c]
issues_optimization = [item for sublist in issues_optimization for item in sublist]
issues_informational = [c.detect() for c in checks_informational]
issues_informational = [c for c in issues_informational if c]
issues_informational = [item for sublist in issues_informational for item in sublist]
issues_low = [c.detect() for c in checks_low]
issues_low = [c for c in issues_low if c]
issues_low = [item for sublist in issues_low for item in sublist]
issues_medium = (c.detect() for c in checks_medium)
issues_medium = [c for c in issues_medium if c]
issues_medium = [item for sublist in issues_medium for item in sublist]
issues_high = [c.detect() for c in checks_high]
issues_high = [c for c in issues_high if c]
issues_high = [item for sublist in issues_high for item in sublist]
all_results = (
issues_optimization + issues_informational + issues_low + issues_medium + issues_high
)
return (
all_results,
len(issues_optimization),
len(issues_informational),
len(issues_low),
len(issues_medium),
len(issues_high),
)
def get_detectors_result(self) -> Tuple[str, List[Dict], int, int, int, int, int]:
(
all_results,
optimization,
informational,
low,
medium,
high,
) = self._get_detectors_result()
txt = f"Number of optimization issues: {green(optimization)}\n"
txt += f"Number of informational issues: {green(informational)}\n"
txt += f"Number of low issues: {green(low)}\n"
if medium > 0:
txt += f"Number of medium issues: {yellow(medium)}\n"
else:
txt += f"Number of medium issues: {green(medium)}\n"
if high > 0:
txt += f"Number of high issues: {red(high)}\n"
else:
txt += f"Number of high issues: {green(high)}\n\n"
return txt, all_results, optimization, informational, low, medium, high
@staticmethod
def _is_complex_code(contract):
for f in contract.functions:
if compute_cyclomatic_complexity(f) > 7:
return True
return False
def is_complex_code(self, contract):
"""
Check if the code is complex
Heuristic, the code is complex if:
- One function has a cyclomatic complexity > 7
Args:
contract
"""
is_complex = self._is_complex_code(contract)
result = red("Yes") if is_complex else green("No")
return result
@staticmethod
def _number_functions(contract):
return len(contract.functions)
def _get_number_of_assembly_lines(self) -> int:
total_asm_lines = 0
for contract in self.contracts:
for function in contract.functions_declared:
for node in function.nodes:
if node.type == NodeType.ASSEMBLY:
inline_asm = node.inline_asm
if inline_asm:
total_asm_lines += len(inline_asm.splitlines())
return total_asm_lines
def _compilation_type(self):
if self.slither.crytic_compile is None:
return "Compilation non standard\n"
return f"Compiled with {str(self.slither.crytic_compile.type)}\n"
def _number_contracts(self) -> Tuple[int, int, int]:
contracts = self.slither.contracts
deps = [c for c in contracts if c.is_from_dependency()]
tests = [c for c in contracts if c.is_test]
return len(contracts) - len(deps) - len(tests), len(deps), len(tests)
def _standard_libraries(self):
libraries = []
for contract in self.contracts:
lib = is_standard_library(contract)
if lib:
libraries.append(lib)
return libraries
def _ercs(self):
ercs = []
for contract in self.contracts:
ercs += contract.ercs()
return list(set(ercs))
def _get_features(self, contract): # pylint: disable=too-many-branches
has_payable = False
can_send_eth = False
can_selfdestruct = False
has_ecrecover = False
can_delegatecall = False
has_token_interaction = False
has_assembly = False
use_abi_encoder = False
for compilation_unit in self.slither.compilation_units:
for pragma in compilation_unit.pragma_directives:
if (
pragma.source_mapping.filename.absolute
== contract.source_mapping.filename.absolute
):
if pragma.is_abi_encoder_v2:
use_abi_encoder = True
for function in contract.functions:
if function.payable:
has_payable = True
if function.contains_assembly:
has_assembly = True
for ir in function.slithir_operations:
if isinstance(ir, (LowLevelCall, HighLevelCall, Send, Transfer)) and ir.call_value:
can_send_eth = True
if isinstance(ir, SolidityCall) and ir.function in [
SolidityFunction("suicide(address)"),
SolidityFunction("selfdestruct(address)"),
]:
can_selfdestruct = True
if isinstance(ir, SolidityCall) and ir.function == SolidityFunction(
"ecrecover(bytes32,uint8,bytes32,bytes32)"
):
has_ecrecover = True
if isinstance(ir, LowLevelCall) and ir.function_name in [
"delegatecall",
"callcode",
]:
can_delegatecall = True
if isinstance(ir, HighLevelCall):
if (
isinstance(ir.function, (Function, StateVariable))
and ir.function.contract.is_possible_token
):
has_token_interaction = True
return {
"Receive ETH": has_payable,
"Send ETH": can_send_eth,
"Selfdestruct": can_selfdestruct,
"Ecrecover": has_ecrecover,
"Delegatecall": can_delegatecall,
"Tokens interaction": has_token_interaction,
"AbiEncoderV2": use_abi_encoder,
"Assembly": has_assembly,
"Upgradeable": contract.is_upgradeable,
"Proxy": contract.is_upgradeable_proxy,
}
def _get_contracts(self, txt: str) -> str:
(
number_contracts,
number_contracts_deps,
number_contracts_tests,
) = self._number_contracts()
txt += f"Total number of contracts in source files: {number_contracts}\n"
if number_contracts_deps > 0:
txt += f"Number of contracts in dependencies: {number_contracts_deps}\n"
if number_contracts_tests > 0:
txt += f"Number of contracts in tests : {number_contracts_tests}\n"
return txt
def _get_number_lines(self, txt: str, results: Dict) -> Tuple[str, Dict]:
loc = compute_loc_metrics(self.slither)
txt += "Source lines of code (SLOC) in source files: "
txt += f"{loc.src.sloc}\n"
if loc.dep.sloc > 0:
txt += "Source lines of code (SLOC) in dependencies: "
txt += f"{loc.dep.sloc}\n"
if loc.test.sloc > 0:
txt += "Source lines of code (SLOC) in tests : "
txt += f"{loc.test.sloc}\n"
results["number_lines"] = loc.src.sloc
results["number_lines__dependencies"] = loc.dep.sloc
total_asm_lines = self._get_number_of_assembly_lines()
txt += f"Number of assembly lines: {total_asm_lines}\n"
results["number_lines_assembly"] = total_asm_lines
return txt, results
def output(self, _filename): # pylint: disable=too-many-locals,too-many-statements
"""
_filename is not used
Args:
_filename(string)
"""
txt = "\n"
txt += self._compilation_type()
results = {
"contracts": {"elements": []},
"number_lines": 0,
"number_lines_in_dependencies": 0,
"number_lines_assembly": 0,
"standard_libraries": [],
"ercs": [],
"number_findings": {},
"detectors": [],
}
txt = self._get_contracts(txt)
txt, results = self._get_number_lines(txt, results)
(
txt_detectors,
detectors_results,
optimization,
info,
low,
medium,
high,
) = self.get_detectors_result()
txt += txt_detectors
results["number_findings"] = {
"optimization_issues": optimization,
"informational_issues": info,
"low_issues": low,
"medium_issues": medium,
"high_issues": high,
}
results["detectors"] = detectors_results
libs = self._standard_libraries()
if libs:
txt += f'\nUse: {", ".join(libs)}\n'
results["standard_libraries"] = [str(lib) for lib in libs]
ercs = self._ercs()
if ercs:
txt += f'ERCs: {", ".join(ercs)}\n'
results["ercs"] = [str(e) for e in ercs]
table = MyPrettyTable(
["Name", "# functions", "ERCS", "ERC20 info", "Complex code", "Features"]
)
for contract in self.slither.contracts_derived:
if contract.is_from_dependency() or contract.is_test:
continue
is_complex = self.is_complex_code(contract)
number_functions = self._number_functions(contract)
ercs = ",".join(contract.ercs())
is_erc20 = contract.is_erc20()
erc20_info = ""
if is_erc20:
erc20_info += self.get_summary_erc20(contract)
features = "\n".join(
[name for name, to_print in self._get_features(contract).items() if to_print]
)
table.add_row(
[
contract.name,
number_functions,
ercs,
erc20_info,
is_complex,
features,
]
)
self.info(txt + "\n" + str(table))
results_contract = output.Output("")
for contract in self.slither.contracts_derived:
if contract.is_test or contract.is_from_dependency():
continue
contract_d = {
"contract_name": contract.name,
"is_complex_code": self._is_complex_code(contract),
"is_erc20": contract.is_erc20(),
"number_functions": self._number_functions(contract),
"features": [
name for name, to_print in self._get_features(contract).items() if to_print
],
}
if contract_d["is_erc20"]:
pause, mint_limited, race_condition_mitigated = self._get_summary_erc20(contract)
contract_d["erc20_pause"] = pause
if mint_limited is not None:
contract_d["erc20_can_mint"] = True
contract_d["erc20_mint_limited"] = mint_limited
else:
contract_d["erc20_can_mint"] = False
contract_d["erc20_race_condition_mitigated"] = race_condition_mitigated
results_contract.add_contract(contract, additional_fields=contract_d)
results["contracts"]["elements"] = results_contract.elements
json = self.generate_output(txt, additional_fields=results)
return json
| PrinterHumanSummary |
python | walkccc__LeetCode | solutions/3038. Maximum Number of Operations With the Same Score I/3038.py | {
"start": 0,
"end": 249
} | class ____:
def maxOperations(self, nums: list[int]) -> int:
ans = 1
summ = nums[0] + nums[1]
for i in range(2, len(nums) - 1, 2):
if nums[i] + nums[i + 1] == summ:
ans += 1
else:
break
return ans
| Solution |
python | altair-viz__altair | altair/vegalite/v6/schema/_config.py | {
"start": 213191,
"end": 214012
} | class ____(TypedDict, total=False):
"""
:class:`altair.Polygon` ``TypedDict`` wrapper.
Parameters
----------
coordinates
type
Specifies the type of GeoJSON object.
bbox
Bounding box of the coordinate range of the object's Geometries, Features, or
Feature Collections. The value of the bbox member is an array of length 2*n where n
is the number of dimensions represented in the contained geometries, with all axes
of the most southwesterly point followed by all axes of the more northeasterly
point. The axes order of a bbox follows the axes order of geometries.
https://tools.ietf.org/html/rfc7946#section-5
"""
coordinates: Sequence[Sequence[Sequence[float]]]
type: Literal["Polygon"]
bbox: Sequence[float]
| PolygonKwds |
python | psf__black | tests/data/cases/stub.py | {
"start": 76,
"end": 207
} | class ____:
this_lack_of_newline_should_be_kept: int
def b(self) -> None: ...
but_this_newline_should_also_be_kept: int
| B |
python | networkx__networkx | networkx/readwrite/tests/test_sparse6.py | {
"start": 115,
"end": 2180
} | class ____:
def test_from_sparse6_bytes(self):
data = b":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM"
G = nx.from_sparse6_bytes(data)
assert nodes_equal(
sorted(G.nodes()),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17],
)
assert edges_equal(
G.edges(),
[
(0, 1),
(0, 2),
(0, 3),
(1, 12),
(1, 14),
(2, 13),
(2, 15),
(3, 16),
(3, 17),
(4, 7),
(4, 9),
(4, 11),
(5, 6),
(5, 8),
(5, 9),
(6, 10),
(6, 11),
(7, 8),
(7, 10),
(8, 12),
(9, 15),
(10, 14),
(11, 13),
(12, 16),
(13, 17),
(14, 17),
(15, 16),
],
)
def test_from_bytes_multigraph_graph(self):
graph_data = b":An"
G = nx.from_sparse6_bytes(graph_data)
assert isinstance(G, nx.Graph)
multigraph_data = b":Ab"
M = nx.from_sparse6_bytes(multigraph_data)
assert isinstance(M, nx.MultiGraph)
def test_read_sparse6(self):
data = b":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM"
G = nx.from_sparse6_bytes(data)
fh = BytesIO(data)
Gin = nx.read_sparse6(fh)
assert nodes_equal(G.nodes(), Gin.nodes())
assert edges_equal(G.edges(), Gin.edges())
def test_read_many_graph6(self):
# Read many graphs into list
data = b":Q___eDcdFcDeFcE`GaJ`IaHbKNbLM\n:Q___dCfDEdcEgcbEGbFIaJ`JaHN`IM"
fh = BytesIO(data)
glist = nx.read_sparse6(fh)
assert len(glist) == 2
for G in glist:
assert nodes_equal(
G.nodes(),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17],
)
| TestSparseGraph6 |
python | pexpect__pexpect | pexpect/ANSI.py | {
"start": 4070,
"end": 4402
} | class ____ (screen.screen):
'''This class is an abstract, generic terminal.
This does nothing. This is a placeholder that
provides a common base class for other terminals
such as an ANSI terminal. '''
def __init__ (self, r=24, c=80, *args, **kwargs):
screen.screen.__init__(self, r,c,*args,**kwargs)
| term |
python | django__django | tests/custom_lookups/models.py | {
"start": 428,
"end": 514
} | class ____(models.Model):
timestamp = models.PositiveIntegerField()
| MySQLUnixTimestamp |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solver14.py | {
"start": 405,
"end": 1052
} | class ____(Protocol[_X_contra, _X_co]):
def __rdivmod__(self, __other: _X_contra) -> _X_co: ...
@overload
def divmod(__x: SupportsDivMod[_X_contra, _X_co], __y: _X_contra) -> _X_co: ...
@overload
def divmod(__x: _X_contra, __y: SupportsRDivMod[_X_contra, _X_co]) -> _X_co: ...
def divmod(__x: Any, __y: Any) -> Any: ...
reveal_type(
divmod(timedelta(minutes=90), timedelta(hours=1)),
expected_text="tuple[int, timedelta]",
)
reveal_type(divmod(3, 4), expected_text="tuple[int, int]")
reveal_type(divmod(3.6, 4), expected_text="tuple[float, float]")
reveal_type(divmod(3, 4.5), expected_text="tuple[float, float]")
| SupportsRDivMod |
python | google__flatbuffers | tests/namespace_test/NamespaceA/SecondTableInA.py | {
"start": 1515,
"end": 2471
} | class ____(object):
# SecondTableInAT
def __init__(self):
self.referToC = None # type: Optional[TableInCT]
@classmethod
def InitFromBuf(cls, buf, pos):
secondTableInA = SecondTableInA()
secondTableInA.Init(buf, pos)
return cls.InitFromObj(secondTableInA)
@classmethod
def InitFromObj(cls, secondTableInA):
x = SecondTableInAT()
x._UnPack(secondTableInA)
return x
# SecondTableInAT
def _UnPack(self, secondTableInA):
if secondTableInA is None:
return
if secondTableInA.ReferToC() is not None:
self.referToC = TableInCT.InitFromObj(secondTableInA.ReferToC())
# SecondTableInAT
def Pack(self, builder):
if self.referToC is not None:
referToC = self.referToC.Pack(builder)
SecondTableInAStart(builder)
if self.referToC is not None:
SecondTableInAAddReferToC(builder, referToC)
secondTableInA = SecondTableInAEnd(builder)
return secondTableInA
| SecondTableInAT |
python | facebookresearch__faiss | tests/test_fast_scan.py | {
"start": 9881,
"end": 10578
} | class ____(TestImplems):
def build_fast_scan_index(self, index, qbs):
index2 = faiss.IndexPQFastScan(index)
index2.qbs = qbs
index2.implem = 12
return index2
def test_qbs7(self):
self.do_with_params(32, 0x223)
def test_qbs7b(self):
self.do_with_params(32, 0x133)
def test_qbs6(self):
self.do_with_params(32, 0x33)
def test_qbs6_ip(self):
self.do_with_params(32, 0x33, faiss.METRIC_INNER_PRODUCT)
def test_qbs6b(self):
# test codepath where qbs is not known at compile time
self.do_with_params(32, 0x1113)
def test_qbs6_odd_dim(self):
self.do_with_params(30, 0x33)
| TestImplem12 |
python | django__django | tests/contenttypes_tests/test_fields.py | {
"start": 3171,
"end": 4036
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.question = Question.objects.create(text="question")
cls.answer = Answer.objects.create(text="answer", question=cls.question)
def test_defer_not_clear_cached_private_relations(self):
obj = Answer.objects.defer("text").get(pk=self.answer.pk)
with self.assertNumQueries(1):
obj.question
obj.text # Accessing a deferred field.
with self.assertNumQueries(0):
obj.question
def test_only_not_clear_cached_private_relations(self):
obj = Answer.objects.only("content_type", "object_id").get(pk=self.answer.pk)
with self.assertNumQueries(1):
obj.question
obj.text # Accessing a deferred field.
with self.assertNumQueries(0):
obj.question
| DeferredGenericRelationTests |
python | sphinx-doc__sphinx | tests/roots/test-ext-autosummary-skip-member/target.py | {
"start": 0,
"end": 264
} | class ____:
"""docstring of Foo."""
def meth(self):
"""docstring of meth."""
pass
def skipmeth(self):
"""docstring of skipmeth."""
pass
def _privatemeth(self):
"""docstring of _privatemeth."""
pass
| Foo |
python | great-expectations__great_expectations | tests/metrics/test_metric.py | {
"start": 4814,
"end": 7565
} | class ____:
@pytest.mark.unit
def test_same_metric_different_args_have_different_results(self):
context = gx.get_context(mode="ephemeral")
data_source = context.data_sources.add_pandas("Pandas Data Source")
data_asset = data_source.add_dataframe_asset("DataFrame Asset")
batch_definition = data_asset.add_batch_definition_whole_dataframe(
"Whole DataFrame Batch Definition"
)
df = pd.DataFrame(
{
"a": [1, 2, 3],
"b": [4, 5, 6],
}
)
batch = batch_definition.get_batch({"dataframe": df})
metrics = [
ColumnMean(column="a"),
ColumnMean(column="b"),
]
metric_result = batch.compute_metrics(metrics)
assert isinstance(metric_result, list)
assert len(metric_result) == 2
assert metric_result[0].value == 2.0
assert metric_result[0].id == metrics[0].metric_id_for_batch(batch.id)
assert metric_result[1].value == 5.0
assert metric_result[1].id == metrics[1].metric_id_for_batch(batch.id)
assert metric_result[0].id != metric_result[1].id
@pytest.mark.unit
def test_single_metric_as_list_result_is_list(self):
context = gx.get_context(mode="ephemeral")
data_source = context.data_sources.add_pandas("Pandas Data Source")
data_asset = data_source.add_dataframe_asset("DataFrame Asset")
batch_definition = data_asset.add_batch_definition_whole_dataframe(
"Whole DataFrame Batch Definition"
)
df = pd.DataFrame(
{
"a": [1, 2, 3],
}
)
batch = batch_definition.get_batch({"dataframe": df})
metrics = [
ColumnMean(column="a"),
]
metric_result = batch.compute_metrics(metrics)
assert isinstance(metric_result, list)
assert len(metric_result) == 1
assert metric_result[0].value == 2.0
@pytest.mark.unit
def test_single_metric_result_is_metric_result(self):
context = gx.get_context(mode="ephemeral")
data_source = context.data_sources.add_pandas("Pandas Data Source")
data_asset = data_source.add_dataframe_asset("DataFrame Asset")
batch_definition = data_asset.add_batch_definition_whole_dataframe(
"Whole DataFrame Batch Definition"
)
df = pd.DataFrame(
{
"a": [1, 2, 3],
}
)
batch = batch_definition.get_batch({"dataframe": df})
metric_result = batch.compute_metrics(ColumnMean(column="a"))
assert isinstance(metric_result, MetricResult)
assert metric_result.value == 2.0
| TestComputeMetric |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefault2.py | {
"start": 764,
"end": 883
} | class ____[T: (bytes, str) = str | bytes]: ...
# This should generate an error because T1 is not a valid default.
| ClassT6 |
python | bokeh__bokeh | src/bokeh/models/graphs.py | {
"start": 3046,
"end": 3389
} | class ____(CoordinateTransform):
'''
Abstract class for coordinate transform expression obtained from ``LayoutProvider``
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
layout = Instance(LayoutProvider)
| GraphCoordinates |
python | streamlit__streamlit | lib/tests/streamlit/runtime/media_file_manager_test.py | {
"start": 15124,
"end": 17049
} | class ____(unittest.TestCase):
# The number of threads to run our tests on
NUM_THREADS = 50
def setUp(self):
super().setUp()
self.storage = MemoryMediaFileStorage("/mock/endpoint")
self.media_file_manager = MediaFileManager(self.storage)
random.seed(1337)
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session_id"),
)
def test_add_file_multiple_threads(self):
"""We can safely call `add` from multiple threads simultaneously."""
def add_file(ii: int) -> None:
coord = random_coordinates()
data = bytes(f"{ii}", "utf-8")
self.media_file_manager.add(data, "image/png", coord)
call_on_threads(add_file, num_threads=self.NUM_THREADS)
assert len(self.media_file_manager._file_metadata) == self.NUM_THREADS
@mock.patch(
"streamlit.runtime.media_file_manager._get_session_id",
MagicMock(return_value="mock_session_id"),
)
def test_clear_files_multiple_threads(self):
"""We can safely clear session refs and remove orphaned files
from multiple threads simultaneously.
"""
# Add a bunch of files
for sample in ALL_FIXTURES.values():
self.media_file_manager.add(
sample["content"], sample["mimetype"], random_coordinates()
)
assert len(ALL_FIXTURES) == len(self.media_file_manager._file_metadata)
# Remove those files from multiple threads
def remove_files(_: int) -> None:
self.media_file_manager.clear_session_refs("mock_session_id")
self.media_file_manager.remove_orphaned_files()
call_on_threads(remove_files, num_threads=self.NUM_THREADS)
# Our files should be gone!
assert len(self.media_file_manager._file_metadata) == 0
| MediaFileManagerThreadingTest |
python | readthedocs__readthedocs.org | readthedocs/filetreediff/dataclasses.py | {
"start": 618,
"end": 1644
} | class ____:
"""A list of files and the build associated with them."""
files: dict[str, FileTreeDiffManifestFile]
build: FileTreeDiffBuild
def __init__(self, build_id: int, files: list[FileTreeDiffManifestFile]):
self.build = FileTreeDiffBuild(id=build_id)
self.files = {file.path: file for file in files}
@classmethod
def from_dict(cls, data: dict) -> "FileTreeDiffManifest":
"""
Create a FileTreeManifest from a dictionary.
The dictionary should follow the same structure as the one returned by
converting the object to a dictionary using the `as_dict` method.
"""
build_id = data["build"]["id"]
files = [
FileTreeDiffManifestFile(path=path, main_content_hash=file["main_content_hash"])
for path, file in data["files"].items()
]
return cls(build_id, files)
def as_dict(self) -> dict:
"""Convert the object to a dictionary."""
return asdict(self)
| FileTreeDiffManifest |
python | getsentry__sentry-python | sentry_sdk/integrations/spark/spark_driver.py | {
"start": 6700,
"end": 9474
} | class ____(SparkListener):
def _add_breadcrumb(
self,
level, # type: str
message, # type: str
data=None, # type: Optional[dict[str, Any]]
):
# type: (...) -> None
sentry_sdk.get_isolation_scope().add_breadcrumb(
level=level, message=message, data=data
)
def onJobStart(self, jobStart): # noqa: N802,N803
# type: (Any) -> None
sentry_sdk.get_isolation_scope().clear_breadcrumbs()
message = "Job {} Started".format(jobStart.jobId())
self._add_breadcrumb(level="info", message=message)
_set_app_properties()
def onJobEnd(self, jobEnd): # noqa: N802,N803
# type: (Any) -> None
level = ""
message = ""
data = {"result": jobEnd.jobResult().toString()}
if jobEnd.jobResult().toString() == "JobSucceeded":
level = "info"
message = "Job {} Ended".format(jobEnd.jobId())
else:
level = "warning"
message = "Job {} Failed".format(jobEnd.jobId())
self._add_breadcrumb(level=level, message=message, data=data)
def onStageSubmitted(self, stageSubmitted): # noqa: N802,N803
# type: (Any) -> None
stage_info = stageSubmitted.stageInfo()
message = "Stage {} Submitted".format(stage_info.stageId())
data = {"name": stage_info.name()}
attempt_id = _get_attempt_id(stage_info)
if attempt_id is not None:
data["attemptId"] = attempt_id
self._add_breadcrumb(level="info", message=message, data=data)
_set_app_properties()
def onStageCompleted(self, stageCompleted): # noqa: N802,N803
# type: (Any) -> None
from py4j.protocol import Py4JJavaError # type: ignore
stage_info = stageCompleted.stageInfo()
message = ""
level = ""
data = {"name": stage_info.name()}
attempt_id = _get_attempt_id(stage_info)
if attempt_id is not None:
data["attemptId"] = attempt_id
# Have to Try Except because stageInfo.failureReason() is typed with Scala Option
try:
data["reason"] = stage_info.failureReason().get()
message = "Stage {} Failed".format(stage_info.stageId())
level = "warning"
except Py4JJavaError:
message = "Stage {} Completed".format(stage_info.stageId())
level = "info"
self._add_breadcrumb(level=level, message=message, data=data)
def _get_attempt_id(stage_info):
# type: (Any) -> Optional[int]
try:
return stage_info.attemptId()
except Exception:
pass
try:
return stage_info.attemptNumber()
except Exception:
pass
return None
| SentryListener |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_trace.py | {
"start": 44093,
"end": 59869
} | class ____(OrganizationEventsTraceEndpointBase):
@staticmethod
def update_children(event: TraceEvent, limit: int) -> None:
"""Updates the children of subtraces
- Generation could be incorrect from orphans where we've had to reconnect back to an orphan event that's
already been encountered
- Sorting children events by timestamp
"""
parents = [event]
iteration = 0
while parents and iteration < limit:
iteration += 1
parent = parents.pop()
parent.children.sort(key=child_sort_key)
for child in parent.children:
child.generation = parent.generation + 1 if parent.generation is not None else None
parents.append(child)
# Concurrently fetches nodestore data to construct and return a dict mapping eventid of a txn
# to the associated nodestore event.
@staticmethod
def nodestore_event_map(events: Sequence[SnubaTransaction]) -> dict[str, Event | GroupEvent]:
event_map = {}
with ThreadPoolExecutor(max_workers=20) as executor:
future_to_event = {
executor.submit(
eventstore.backend.get_event_by_id, event["project.id"], event["id"]
): event
for event in events
}
for future in as_completed(future_to_event):
event_id = future_to_event[future]["id"]
nodestore_event = future.result()
if nodestore_event is not None:
event_map[event_id] = nodestore_event
return event_map
def serialize(
self,
limit: int,
transactions: Sequence[SnubaTransaction],
errors: Sequence[SnubaError],
roots: Sequence[SnubaTransaction],
warning_extra: dict[str, str],
event_id: str | None,
detailed: bool = False,
query_source: QuerySource | None = None,
) -> SerializedTrace:
"""For the full event trace, we return the results as a graph instead of a flattened list
if event_id is passed, we prune any potential branches of the trace to make as few nodestore calls as
possible
"""
# Code past here is deprecated, but must continue to exist until sentry installs in every possible environment
# are storing span data, since that's the only way serialize_with_spans will work
event_id_to_nodestore_event = self.nodestore_event_map(transactions)
parent_map = self.construct_parent_map(transactions)
error_map = self.construct_error_map(errors)
parent_events: dict[str, TraceEvent] = {}
results_map: dict[str | None, list[TraceEvent]] = defaultdict(list)
to_check: Deque[SnubaTransaction] = deque()
snuba_params = self.get_snuba_params(self.request, self.request.organization)
# The root of the orphan tree we're currently navigating through
orphan_root: SnubaTransaction | None = None
if roots:
results_map[None] = []
for root in roots:
root_event = TraceEvent(
root, None, 0, snuba_params=snuba_params, query_source=query_source
)
parent_events[root["id"]] = root_event
results_map[None].append(root_event)
to_check.append(root)
iteration = 0
with sentry_sdk.start_span(op="building.trace", name="full trace"):
has_orphans = False
while parent_map or to_check:
if len(to_check) == 0:
has_orphans = True
# Grab any set of events from the parent map
parent_span_id, current_events = parent_map.popitem()
current_event, *siblings = current_events
# If there were any siblings put them back
if siblings:
parent_map[parent_span_id] = siblings
previous_event = parent_events[current_event["id"]] = TraceEvent(
current_event,
None,
0,
snuba_params=snuba_params,
query_source=query_source,
)
# Used to avoid removing the orphan from results entirely if we loop
orphan_root = current_event
results_map[parent_span_id].append(previous_event)
else:
current_event = to_check.popleft()
previous_event = parent_events[current_event["id"]]
# We've found the event for the trace navigator so we can remove everything in the deque
# As they're unrelated ancestors now
if event_id and current_event["id"] == event_id:
# Remove any remaining events so we don't think they're orphans
while to_check:
to_remove = to_check.popleft()
if to_remove["trace.parent_span"] in parent_map:
del parent_map[to_remove["trace.parent_span"]]
to_check = deque()
spans: NodeSpans = []
previous_event_id = previous_event.event["id"]
if previous_event_id in event_id_to_nodestore_event:
previous_event.fetched_nodestore = True
nodestore_event = event_id_to_nodestore_event[previous_event_id]
previous_event._nodestore_event = nodestore_event
spans = nodestore_event.data.get("spans", [])
# Need to include the transaction as a span as well
#
# Important that we left pad the span id with 0s because
# the span id is stored as an UInt64 and converted into
# a hex string when quering. However, the conversion does
# not ensure that the final span id is 16 chars long since
# it's a naive base 10 to base 16 conversion.
spans.append({"span_id": previous_event.event["trace.span"].rjust(16, "0")})
for child in spans:
if child["span_id"] in error_map:
previous_event.errors.extend(
[
self.serialize_error(error)
for error in error_map.pop(child["span_id"])
]
)
# We need to connect back to an existing orphan trace
if (
has_orphans
and
# The child event has already been checked
child["span_id"] in results_map
and orphan_root is not None
and
# In the case of a span loop popping the current root removes the orphan subtrace
child["span_id"] != orphan_root["trace.parent_span"]
):
orphan_subtraces = results_map.pop(child["span_id"])
for orphan_subtrace in orphan_subtraces:
orphan_subtrace.parent_event_id = previous_event.event["id"]
previous_event.children.extend(orphan_subtraces)
if child["span_id"] not in parent_map:
continue
# Avoid potential span loops by popping, so we don't traverse the same nodes twice
child_events = parent_map.pop(child["span_id"])
for child_event in child_events:
parent_events[child_event["id"]] = TraceEvent(
child_event,
current_event["id"],
(
previous_event.generation + 1
if previous_event.generation is not None
else None
),
snuba_params=snuba_params,
query_source=query_source,
)
# Add this event to its parent's children
previous_event.children.append(parent_events[child_event["id"]])
to_check.append(child_event)
# Limit iterations just to be safe
iteration += 1
if iteration > limit:
sentry_sdk.set_tag("discover.trace-view.warning", "surpassed-trace-limit")
logger.warning(
"discover.trace-view.surpassed-trace-limit",
extra=warning_extra,
)
break
# We are now left with orphan errors in the error_map,
# that we need to serialize and return with our results.
orphan_errors: list[TraceError] = []
if iteration < limit:
for errors in error_map.values():
for error in errors:
orphan_errors.append(self.serialize_error(error))
iteration += 1
if iteration > limit:
break
if iteration > limit:
break
trace_roots: list[TraceEvent] = []
orphans: list[TraceEvent] = []
for index, result in enumerate(results_map.values()):
for subtrace in result:
self.update_children(subtrace, limit)
if index > 0 or len(roots) == 0:
orphans.extend(result)
elif len(roots) > 0:
trace_roots = result
# We sort orphans and roots separately because we always want the root(s) as the first element(s)
trace_roots.sort(key=child_sort_key)
orphans.sort(key=child_sort_key)
orphan_errors = sorted(orphan_errors, key=lambda k: k["timestamp"])
if len(orphans) > 0:
sentry_sdk.set_tag("discover.trace-view.contains-orphans", "yes")
logger.warning("discover.trace-view.contains-orphans", extra=warning_extra)
serialized_transactions = []
for trace in trace_roots:
serialized_transaction = trace.full_dict(detailed)
if serialized_transaction is not None:
serialized_transactions.append(serialized_transaction)
for orphan in orphans:
serialized_orphan = orphan.full_dict(detailed)
if serialized_orphan is not None:
serialized_transactions.append(serialized_orphan)
return {
"transactions": serialized_transactions,
"orphan_errors": [orphan for orphan in orphan_errors],
}
def serialize_with_spans(
self,
limit: int,
transactions: Sequence[SnubaTransaction],
errors: Sequence[SnubaError],
roots: Sequence[SnubaTransaction],
warning_extra: dict[str, str],
event_id: str | None,
detailed: bool = False,
query_source: QuerySource | None = None,
) -> SerializedTrace:
root_traces: list[TraceEvent] = []
orphans: list[TraceEvent] = []
orphan_event_ids: set[str] = set()
orphan_errors: list[SnubaError] = []
if detailed:
raise ParseError("Cannot return a detailed response using Spans")
with sentry_sdk.start_span(op="serialize", name="create parent map"):
parent_to_children_event_map = defaultdict(list)
serialized_transactions: list[TraceEvent] = []
for transaction in transactions:
parent_id = transaction["trace.parent_transaction"]
serialized_transaction = TraceEvent(
transaction,
parent_id,
-1,
span_serialized=True,
query_source=query_source,
)
if parent_id is None:
if transaction["trace.parent_span"]:
orphans.append(serialized_transaction)
orphan_event_ids.add(serialized_transaction.event["id"])
else:
root_traces.append(serialized_transaction)
else:
parent_to_children_event_map[parent_id].append(serialized_transaction)
serialized_transactions.append(serialized_transaction)
parent_error_map = defaultdict(list)
for error in errors:
if error.get("trace.transaction") is not None:
parent_error_map[error["trace.transaction"]].append(self.serialize_error(error))
else:
orphan_errors.append(error)
with sentry_sdk.start_span(op="serialize", name="associate children"):
for trace_event in serialized_transactions:
event_id = trace_event.event["id"]
if event_id in parent_to_children_event_map:
children_events = parent_to_children_event_map.pop(event_id)
trace_event.children = sorted(children_events, key=child_sort_key)
if event_id in parent_error_map:
trace_event.errors = sorted(
parent_error_map.pop(event_id), key=lambda k: k["timestamp"]
)
with sentry_sdk.start_span(op="serialize", name="more orphans"):
visited_transactions_ids: set[str] = {
root_trace.event["id"] for root_trace in root_traces
}
for serialized_transaction in sorted(serialized_transactions, key=child_sort_key):
if serialized_transaction.event["id"] not in visited_transactions_ids:
if serialized_transaction.event["id"] not in orphan_event_ids:
orphans.append(serialized_transaction)
orphan_event_ids.add(serialized_transaction.event["id"])
visited_transactions_ids.add(serialized_transaction.event["id"])
for child in serialized_transaction.children:
visited_transactions_ids.add(child.event["id"])
with sentry_sdk.start_span(op="serialize", name="sort"):
# Sort the results so they're consistent
orphan_errors.sort(key=lambda k: k["timestamp"])
root_traces.sort(key=child_sort_key)
orphans.sort(key=child_sort_key)
visited_transactions_in_serialization: set[str] = set()
result_transactions: list[FullResponse] = []
for trace in root_traces:
if trace.event["id"] in visited_transactions_in_serialization:
continue
result_transaction = trace.full_dict(detailed, visited_transactions_in_serialization)
if result_transaction is not None:
result_transactions.append(result_transaction)
for orphan in orphans:
if orphan.event["id"] in visited_transactions_in_serialization:
continue
serialized_orphan = orphan.full_dict(detailed, visited_transactions_in_serialization)
if serialized_orphan is not None:
result_transactions.append(serialized_orphan)
with sentry_sdk.start_span(op="serialize", name="to dict"):
return {
"transactions": result_transactions,
"orphan_errors": [self.serialize_error(error) for error in orphan_errors],
}
@region_silo_endpoint
| OrganizationEventsTraceEndpoint |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/components.py | {
"start": 21854,
"end": 23762
} | class ____(UserComponent):
"""
A Progress bar for tracking progress of any task.
Example:
```
progress_bar = ProgressBar(
max=100,
label="Progress Bar",
value=0,
unit="%",
metadata="0.1 items/s"
)
current.card.append(
progress_bar
)
for i in range(100):
progress_bar.update(i, metadata="%s items/s" % i)
```
Parameters
----------
max : int, default 100
The maximum value of the progress bar.
label : str, optional, default None
Optional label for the progress bar.
value : int, default 0
Optional initial value of the progress bar.
unit : str, optional, default None
Optional unit for the progress bar.
metadata : str, optional, default None
Optional additional information to show on the progress bar.
"""
type = "progressBar"
REALTIME_UPDATABLE = True
def __init__(
self,
max: int = 100,
label: Optional[str] = None,
value: int = 0,
unit: Optional[str] = None,
metadata: Optional[str] = None,
):
self._label = label
self._max = max
self._value = value
self._unit = unit
self._metadata = metadata
def update(self, new_value: int, metadata: Optional[str] = None):
self._value = new_value
if metadata is not None:
self._metadata = metadata
@with_default_component_id
@render_safely
def render(self):
data = {
"type": self.type,
"id": self.component_id,
"max": self._max,
"value": self._value,
}
if self._label:
data["label"] = self._label
if self._unit:
data["unit"] = self._unit
if self._metadata:
data["details"] = self._metadata
return data
| ProgressBar |
python | getsentry__sentry | src/sentry/tasks/store.py | {
"start": 2806,
"end": 24552
} | class ____:
has_attachments: bool = False
from_reprocessing: bool = False
def submit_save_event(
task_kind: SaveEventTaskKind,
project_id: int,
cache_key: str | None,
event_id: str | None,
start_time: float | None,
data: MutableMapping[str, Any] | None,
) -> None:
if cache_key:
data = None
# XXX: honor from_reprocessing
if task_kind.has_attachments:
task = save_event_attachments
else:
task = save_event
task_kwargs = {
"cache_key": cache_key,
"data": data,
"start_time": start_time,
"event_id": event_id,
"project_id": project_id,
}
task.delay(**task_kwargs) # type: ignore[arg-type]
def _do_preprocess_event(
cache_key: str,
data: MutableMapping[str, Any] | None,
start_time: float | None,
event_id: str | None,
from_reprocessing: bool,
project: Project | None,
has_attachments: bool = False,
) -> None:
from sentry.stacktraces.processing import find_stacktraces_in_data
from sentry.tasks.symbolication import (
get_symbolication_function_for_platform,
get_symbolication_platforms,
submit_symbolicate,
)
if cache_key and data is None:
data = processing.event_processing_store.get(cache_key)
if data is None:
metrics.incr("events.failed", tags={"reason": "cache", "stage": "pre"}, skip_internal=False)
error_logger.error("preprocess.failed.empty", extra={"cache_key": cache_key})
return
track_event_since_received(
step="start_preprocess_event",
event_data=data,
)
original_data = data
project_id = data["project"]
set_current_event_project(project_id)
if project is None:
project = Project.objects.get_from_cache(id=project_id)
else:
assert project.id == project_id, (project.id, project_id)
project.set_cached_field_value(
"organization", Organization.objects.get_from_cache(id=project.organization_id)
)
# Get the list of platforms for which we want to use Symbolicator.
# Possible values are `js`, `jvm`, and `native`.
# The event will be submitted to Symbolicator for all returned platforms,
# one after the other, so we handle mixed stacktraces.
stacktraces = find_stacktraces_in_data(data)
symbolicate_platforms = get_symbolication_platforms(data, stacktraces)
metrics.incr(
"events.to-symbolicate",
tags={platform.value: True for platform in symbolicate_platforms},
skip_internal=False,
)
should_symbolicate = len(symbolicate_platforms) > 0
if should_symbolicate:
first_platform = symbolicate_platforms.pop(0)
symbolication_function = get_symbolication_function_for_platform(
first_platform, data, stacktraces
)
symbolication_function_name = getattr(symbolication_function, "__name__", "none")
if not killswitch_matches_context(
"store.load-shed-symbolicate-event-projects",
{
"project_id": project_id,
"event_id": event_id,
"platform": data.get("platform") or "null",
"symbolication_function": symbolication_function_name,
},
):
reprocessing2.backup_unprocessed_event(data=original_data)
submit_symbolicate(
SymbolicatorTaskKind(
platform=first_platform,
is_reprocessing=from_reprocessing,
),
cache_key=cache_key,
event_id=event_id,
start_time=start_time,
has_attachments=has_attachments,
symbolicate_platforms=symbolicate_platforms,
)
return
# else: go directly to process, do not go through the symbolicate queue, do not collect 200
# NOTE: Events considered for symbolication always go through `do_process_event`
if should_symbolicate or should_process(data):
submit_process(
from_reprocessing=from_reprocessing,
cache_key=cache_key,
event_id=event_id,
start_time=start_time,
data_has_changed=False,
has_attachments=has_attachments,
)
return
submit_save_event(
SaveEventTaskKind(
has_attachments=has_attachments,
from_reprocessing=from_reprocessing,
),
project_id=project_id,
cache_key=cache_key,
event_id=event_id,
start_time=start_time,
data=original_data,
)
def preprocess_event(
cache_key: str,
data: MutableMapping[str, Any] | None = None,
start_time: float | None = None,
event_id: str | None = None,
project: Project | None = None,
has_attachments: bool = False,
**kwargs: Any,
) -> None:
return _do_preprocess_event(
cache_key=cache_key,
data=data,
start_time=start_time,
event_id=event_id,
from_reprocessing=False,
project=project,
has_attachments=has_attachments,
)
def preprocess_event_from_reprocessing(
cache_key: str,
data: MutableMapping[str, Any] | None = None,
start_time: float | None = None,
event_id: str | None = None,
project: Project | None = None,
**kwargs: Any,
) -> None:
return _do_preprocess_event(
cache_key=cache_key,
data=data,
start_time=start_time,
event_id=event_id,
from_reprocessing=True,
project=project,
)
def is_process_disabled(project_id: int, event_id: str, platform: str) -> bool:
if killswitch_matches_context(
"store.load-shed-process-event-projects",
{
"project_id": project_id,
"event_id": event_id,
"platform": platform,
},
):
return True
process_project_rollout = options.get("store.load-shed-process-event-projects-gradual")
rollout_rate = process_project_rollout.get(project_id)
if not rollout_rate:
return False
return random.random() < rollout_rate
@sentry_sdk.tracing.trace
def normalize_event(data: MutableMapping[str, Any]) -> MutableMapping[str, Any]:
normalizer = StoreNormalizer(
remove_other=False,
is_renormalize=True,
json_dumps=orjson.dumps,
**DEFAULT_STORE_NORMALIZER_ARGS,
)
return normalizer.normalize_event(dict(data), json_loads=orjson.loads)
def do_process_event(
cache_key: str,
start_time: float | None,
event_id: str | None,
from_reprocessing: bool,
data: MutableMapping[str, Any] | None = None,
data_has_changed: bool = False,
from_symbolicate: bool = False,
has_attachments: bool = False,
) -> None:
from sentry.plugins.base import plugins
if data is None:
data = processing.event_processing_store.get(cache_key)
if data is None:
metrics.incr(
"events.failed", tags={"reason": "cache", "stage": "process"}, skip_internal=False
)
error_logger.error("process.failed.empty", extra={"cache_key": cache_key})
return
track_event_since_received(
step="start_process_event",
event_data=data,
)
project_id = data["project"]
set_current_event_project(project_id)
data_event_id = data["event_id"]
def _continue_to_save_event() -> None:
task_kind = SaveEventTaskKind(
from_reprocessing=from_reprocessing,
has_attachments=has_attachments,
)
submit_save_event(
task_kind,
project_id=project_id,
cache_key=cache_key,
event_id=data_event_id,
start_time=start_time,
data=data,
)
if is_process_disabled(project_id, data_event_id, data.get("platform") or "null"):
return _continue_to_save_event()
# NOTE: This span ranges in the 1-2ms range.
with sentry_sdk.start_span(op="tasks.store.process_event.get_project_from_cache"):
project = Project.objects.get_from_cache(id=project_id)
project.set_cached_field_value(
"organization", Organization.objects.get_from_cache(id=project.organization_id)
)
has_changed = data_has_changed
# Stacktrace based event processors.
new_data = process_stacktraces(data)
if new_data is not None:
has_changed = True
data = new_data
attachments = data.get("_attachments", None)
# Second round of datascrubbing after stacktrace and language-specific
# processing. First round happened as part of ingest.
#
# *Right now* the only sensitive data that is added in stacktrace
# processing are usernames in filepaths, so we run directly after
# stacktrace processors.
#
# We do not yet want to deal with context data produced by plugins like
# sessionstack or fullstory (which are in `get_event_preprocessors`), as
# this data is very unlikely to be sensitive data. This is why scrubbing
# happens somewhere in the middle of the pipeline.
#
# On the other hand, Javascript event error translation is happening after
# this block because it uses `get_event_preprocessors`.
#
# We are fairly confident, however, that this should run *before*
# re-normalization as it is hard to find sensitive data in partially
# trimmed strings.
if has_changed:
new_data = safe_execute(scrub_data, project=project, event=data)
# XXX(markus): When datascrubbing is finally "totally stable", we might want
# to drop the event if it crashes to avoid saving PII
if new_data is not None:
data = new_data
# TODO(dcramer): ideally we would know if data changed by default
# Default event processors.
for plugin in plugins.all(version=2):
with sentry_sdk.start_span(op="task.store.process_event.preprocessors") as span:
span.set_data("plugin", plugin.slug)
span.set_data("from_symbolicate", from_symbolicate)
processors = safe_execute(plugin.get_event_preprocessors, data=data)
for processor in processors or ():
try:
result = processor(data)
except Exception:
error_logger.exception("tasks.store.preprocessors.error")
data.setdefault("_metrics", {})["flag.processing.error"] = True
has_changed = True
else:
if result:
data = result
has_changed = True
assert data["project"] == project_id, "Project cannot be mutated by plugins"
# We cannot persist canonical types in the cache, so we need to
# downgrade this.
if not isinstance(data, dict):
data = dict(data.items())
if has_changed:
# Run some of normalization again such that we don't:
# - persist e.g. incredibly large stacktraces from minidumps
# - store event timestamps that are older than our retention window
# (also happening with minidumps)
data = normalize_event(data)
if attachments:
data["_attachments"] = attachments
cache_key = processing.event_processing_store.store(data)
return _continue_to_save_event()
@instrumented_task(
name="sentry.tasks.store.process_event",
namespace=ingest_errors_tasks,
processing_deadline_duration=65,
silo_mode=SiloMode.REGION,
)
def process_event(
cache_key: str,
start_time: float | None = None,
event_id: str | None = None,
data_has_changed: bool = False,
from_symbolicate: bool = False,
has_attachments: bool = False,
**kwargs: Any,
) -> None:
"""
Handles event processing (for those events that need it)
This excludes symbolication via symbolicator service (see symbolicate_event).
:param string cache_key: the cache key for the event data
:param int start_time: the timestamp when the event was ingested
:param string event_id: the event identifier
:param boolean data_has_changed: set to True if the event data was changed in previous tasks
"""
return do_process_event(
cache_key=cache_key,
start_time=start_time,
event_id=event_id,
from_reprocessing=False,
data_has_changed=data_has_changed,
from_symbolicate=from_symbolicate,
has_attachments=has_attachments,
)
@instrumented_task(
name="sentry.tasks.store.process_event_from_reprocessing",
namespace=issues_tasks,
processing_deadline_duration=65,
silo_mode=SiloMode.REGION,
)
def process_event_from_reprocessing(
cache_key: str,
start_time: float | None = None,
event_id: str | None = None,
data_has_changed: bool = False,
from_symbolicate: bool = False,
has_attachments: bool = False,
**kwargs: Any,
) -> None:
return do_process_event(
cache_key=cache_key,
start_time=start_time,
event_id=event_id,
from_reprocessing=True,
data_has_changed=data_has_changed,
from_symbolicate=from_symbolicate,
has_attachments=has_attachments,
)
def _do_save_event(
cache_key: str | None = None,
data: MutableMapping[str, Any] | None = None,
start_time: float | None = None,
event_id: str | None = None,
project_id: int | None = None,
has_attachments: bool = False,
consumer_type: str | None = None,
**kwargs: Any,
) -> None:
"""
Saves an event to the database.
"""
set_current_event_project(project_id)
from sentry.event_manager import EventManager, resolve_project
from sentry.exceptions import HashDiscarded
event_type = "none"
if consumer_type and consumer_type == ConsumerType.Transactions:
processing_store = processing.transaction_processing_store
else:
processing_store = processing.event_processing_store
if cache_key and data is None:
data = processing_store.get(cache_key)
if data is not None:
event_type = data.get("type") or "none"
track_event_since_received(
step="start_save_event",
event_data=data,
)
with metrics.global_tags(tags={"event_type": event_type}):
if event_id is None and data is not None:
event_id = data["event_id"]
# only when we come from reprocessing we get a project_id sent into
# the task.
if project_id is None:
assert data is not None
project_id = data.pop("project")
set_current_event_project(project_id)
# This covers two cases: where data is None because we did not manage
# to fetch it from the default cache or the empty dictionary was
# stored in the default cache. The former happens if the event
# expired while being on the queue, the second happens on reprocessing
# if the raw event was deleted concurrently while we held on to
# it. This causes the node store to delete the data and we end up
# fetching an empty dict. We could in theory not invoke `save_event`
# in those cases but it's important that we always clean up the
# reprocessing reports correctly or they will screw up the UI. So
# to future proof this correctly we just handle this case here.
if not data:
metrics.incr(
"events.failed", tags={"reason": "cache", "stage": "post"}, skip_internal=False
)
return
all_attachments = []
attachments = []
project = None
try:
if cache_key and has_attachments:
all_attachments = list(get_attachments_for_event(data))
# we won’t be needing the transient attachments after this anymore
data.pop("_attachments", None)
attachments = [a for a in all_attachments if not a.rate_limited]
project = resolve_project(project_id)
if killswitch_matches_context(
"store.load-shed-save-event-projects",
{
"project_id": project_id,
"event_type": event_type,
"platform": data.get("platform") or "none",
},
):
raise HashDiscarded("Load shedding save_event")
manager = EventManager(data)
# event.project.organization is populated after this statement.
manager.save(
project=project,
assume_normalized=True,
start_time=start_time,
cache_key=cache_key,
attachments=attachments,
)
# Put the updated event back into the cache so that post_process
# has the most recent data.
# We don't need to update the event in the processing_store for transaction events
# because they're not used in post_process.
if consumer_type != ConsumerType.Transactions:
data = manager.get_data()
if not isinstance(data, dict):
data = dict(data.items())
processing_store.store(data)
except HashDiscarded:
# Delete the event payload from cache since it won't show up in post-processing.
if cache_key:
processing_store.delete_by_key(cache_key)
# Mark all the attachments as `rate_limited`, so they are being properly cleaned up in the `finally` block:
for attachment in all_attachments:
attachment.rate_limited = True
except Exception:
metrics.incr("events.save_event.exception", tags={"event_type": event_type})
raise
finally:
if consumer_type == ConsumerType.Transactions and event_id:
# we won't use the transaction data in post_process
# so we can delete it from the cache now.
if cache_key:
processing_store.delete_by_key(cache_key)
track_sampled_event(
data["event_id"],
ConsumerType.Transactions,
TransactionStageStatus.REDIS_DELETED,
)
reprocessing2.mark_event_reprocessed(data)
if all_attachments and project:
delete_cached_and_ratelimited_attachments(project, all_attachments)
if start_time:
metrics.timing(
"events.time-to-process",
time() - start_time,
instance=data["platform"],
tags={
"is_reprocessing2": (
"true" if reprocessing2.is_reprocessed_event(data) else "false"
),
},
)
track_event_since_received(
step="end_save_event",
event_data=data,
)
@instrumented_task(
name="sentry.tasks.store.save_event",
namespace=ingest_errors_tasks,
processing_deadline_duration=65,
silo_mode=SiloMode.REGION,
)
def save_event(
cache_key: str | None = None,
data: MutableMapping[str, Any] | None = None,
start_time: float | None = None,
event_id: str | None = None,
project_id: int | None = None,
**kwargs: Any,
) -> None:
_do_save_event(
cache_key,
data,
start_time,
event_id,
project_id,
consumer_type=ConsumerType.Events,
**kwargs,
)
@instrumented_task(
name="sentry.tasks.store.save_event_transaction",
namespace=ingest_transactions_tasks,
processing_deadline_duration=65,
silo_mode=SiloMode.REGION,
)
def save_event_transaction(
cache_key: str | None = None,
data: MutableMapping[str, Any] | None = None,
start_time: float | None = None,
event_id: str | None = None,
project_id: int | None = None,
**kwargs: Any,
) -> None:
if event_id:
track_sampled_event(
event_id, ConsumerType.Transactions, TransactionStageStatus.SAVE_TXN_STARTED
)
_do_save_event(
cache_key,
data,
start_time,
event_id,
project_id,
consumer_type=ConsumerType.Transactions,
**kwargs,
)
if event_id:
track_sampled_event(
event_id, ConsumerType.Transactions, TransactionStageStatus.SAVE_TXN_FINISHED
)
@instrumented_task(
name="sentry.tasks.store.save_event_feedback",
namespace=issues_tasks,
processing_deadline_duration=65,
silo_mode=SiloMode.REGION,
)
@metrics.wraps("feedback_consumer.save_event_feedback_task")
def save_event_feedback(
cache_key: str | None = None,
start_time: float | None = None,
event_id: str | None = None,
*,
data: Mapping[str, Any],
project_id: int,
**kwargs: Any,
) -> None:
save_event_feedback_impl(data, project_id)
@instrumented_task(
name="sentry.tasks.store.save_event_attachments",
namespace=ingest_attachments_tasks,
processing_deadline_duration=65,
silo_mode=SiloMode.REGION,
)
def save_event_attachments(
cache_key: str | None = None,
data: MutableMapping[str, Any] | None = None,
start_time: float | None = None,
event_id: str | None = None,
project_id: int | None = None,
**kwargs: Any,
) -> None:
_do_save_event(
cache_key,
data,
start_time,
event_id,
project_id,
consumer_type=ConsumerType.Attachments,
has_attachments=True,
**kwargs,
)
| SaveEventTaskKind |
python | django-haystack__django-haystack | test_haystack/solr_tests/test_solr_management_commands.py | {
"start": 1282,
"end": 12361
} | class ____(TestCase):
fixtures = ["base_data.json", "bulk_data.json"]
def setUp(self):
super().setUp()
self.solr = pysolr.Solr(settings.HAYSTACK_CONNECTIONS["solr"]["URL"])
# Stow.
self.old_ui = connections["solr"].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = SolrMockSearchIndex()
self.ui.build(indexes=[self.smmi])
connections["solr"]._index = self.ui
def tearDown(self):
connections["solr"]._index = self.old_ui
super().tearDown()
def verify_indexed_documents(self):
"""Confirm that the documents in the search index match the database"""
res = self.solr.search("*:*", fl=["id"], rows=50)
self.assertEqual(res.hits, 23)
indexed_doc_ids = set(i["id"] for i in res.docs)
expected_doc_ids = set(
"core.mockmodel.%d" % i
for i in MockModel.objects.values_list("pk", flat=True)
)
self.assertSetEqual(indexed_doc_ids, expected_doc_ids)
def test_basic_commands(self):
call_command("clear_index", interactive=False, verbosity=0)
self.assertEqual(self.solr.search("*:*").hits, 0)
call_command("update_index", verbosity=0, commit=False)
self.assertEqual(self.solr.search("*:*").hits, 0)
call_command("update_index", verbosity=0)
self.verify_indexed_documents()
call_command("clear_index", interactive=False, verbosity=0)
self.assertEqual(self.solr.search("*:*").hits, 0)
call_command("rebuild_index", interactive=False, verbosity=0, commit=False)
self.assertEqual(self.solr.search("*:*").hits, 0)
call_command("rebuild_index", interactive=False, verbosity=0, commit=True)
self.verify_indexed_documents()
call_command("clear_index", interactive=False, verbosity=0, commit=False)
self.verify_indexed_documents()
def test_remove(self):
call_command("clear_index", interactive=False, verbosity=0)
self.assertEqual(self.solr.search("*:*").hits, 0)
call_command("update_index", verbosity=0)
self.verify_indexed_documents()
# Remove several instances, two of which will fit in the same block:
MockModel.objects.get(pk=1).delete()
MockModel.objects.get(pk=2).delete()
MockModel.objects.get(pk=8).delete()
self.assertEqual(self.solr.search("*:*").hits, 23)
# Plain ``update_index`` doesn't fix it.
call_command("update_index", verbosity=0)
self.assertEqual(self.solr.search("*:*").hits, 23)
# Remove without commit also doesn't affect queries:
call_command(
"update_index", remove=True, verbosity=0, batchsize=2, commit=False
)
self.assertEqual(self.solr.search("*:*").hits, 23)
# … but remove with commit does:
call_command("update_index", remove=True, verbosity=0, batchsize=2)
self.assertEqual(self.solr.search("*:*").hits, 20)
def test_age(self):
call_command("clear_index", interactive=False, verbosity=0)
self.assertEqual(self.solr.search("*:*").hits, 0)
start = datetime.datetime.now() - datetime.timedelta(hours=3)
end = datetime.datetime.now()
mock = MockModel.objects.get(pk=1)
mock.pub_date = datetime.datetime.now() - datetime.timedelta(hours=2)
mock.save()
self.assertEqual(
MockModel.objects.filter(pub_date__range=(start, end)).count(), 1
)
call_command("update_index", age=3, verbosity=0)
self.assertEqual(self.solr.search("*:*").hits, 1)
def test_age_with_time_zones(self):
"""Haystack should use django.utils.timezone.now"""
from django.utils.timezone import now as django_now
from haystack.management.commands.update_index import now as haystack_now
self.assertIs(
haystack_now,
django_now,
msg="update_index should use django.utils.timezone.now",
)
with patch("haystack.management.commands.update_index.now") as m:
m.return_value = django_now()
self.test_age()
assert m.called
def test_dates(self):
call_command("clear_index", interactive=False, verbosity=0)
self.assertEqual(self.solr.search("*:*").hits, 0)
start = datetime.datetime.now() - datetime.timedelta(hours=5, minutes=30)
end = datetime.datetime.now() - datetime.timedelta(hours=2)
mock_1 = MockModel.objects.get(pk=1)
mock_1.pub_date = datetime.datetime.now() - datetime.timedelta(
hours=5, minutes=1
)
mock_1.save()
mock_2 = MockModel.objects.get(pk=2)
mock_2.pub_date = datetime.datetime.now() - datetime.timedelta(hours=3)
mock_2.save()
mock_3 = MockModel.objects.get(pk=3)
mock_3.pub_date = datetime.datetime.now() - datetime.timedelta(hours=1)
mock_3.save()
self.assertEqual(
MockModel.objects.filter(pub_date__range=(start, end)).count(), 2
)
call_command(
"update_index",
start_date=start.isoformat(),
end_date=end.isoformat(),
verbosity=0,
)
self.assertEqual(self.solr.search("*:*").hits, 2)
def test_multiprocessing(self):
call_command("clear_index", interactive=False, verbosity=0)
self.assertEqual(self.solr.search("*:*").hits, 0)
call_command("update_index", verbosity=2, workers=2, batchsize=5)
self.verify_indexed_documents()
call_command("clear_index", interactive=False, verbosity=0)
self.assertEqual(self.solr.search("*:*").hits, 0)
call_command("update_index", verbosity=2, workers=2, batchsize=5, commit=False)
self.assertEqual(self.solr.search("*:*").hits, 0)
def test_build_schema_wrong_backend(self):
settings.HAYSTACK_CONNECTIONS["whoosh"] = {
"ENGINE": "haystack.backends.whoosh_backend.WhooshEngine",
"PATH": mkdtemp(prefix="dummy-path-"),
}
connections["whoosh"]._index = self.ui
self.assertRaises(
ImproperlyConfigured, call_command, "build_solr_schema", using="whoosh"
)
def test_build_schema(self):
# Stow.
oldhdf = constants.DOCUMENT_FIELD
oldui = connections["solr"].get_unified_index()
oldurl = settings.HAYSTACK_CONNECTIONS["solr"]["URL"]
conf_dir = tempfile.mkdtemp()
with open(os.path.join(conf_dir, "managed-schema"), "w+") as fp:
pass
try:
needle = "Th3S3cr3tK3y"
constants.DOCUMENT_FIELD = (
needle # Force index to use new key for document_fields
)
settings.HAYSTACK_CONNECTIONS["solr"]["URL"] = (
settings.HAYSTACK_CONNECTIONS["solr"]["URL"].rsplit("/", 1)[0]
+ "/mgmnt"
)
ui = UnifiedIndex()
ui.build(indexes=[SolrMockSecretKeySearchIndex()])
connections["solr"]._index = ui
rendered_file = StringIO()
schema_file = os.path.join(conf_dir, "schema.xml")
solrconfig_file = os.path.join(conf_dir, "solrconfig.xml")
self.assertTrue(
os.path.isdir(conf_dir), msg="Expected %s to be a directory" % conf_dir
)
call_command("build_solr_schema", using="solr", stdout=rendered_file)
contents = rendered_file.getvalue()
self.assertGreater(contents.find('name="%s' % needle), -1)
call_command(
"build_solr_schema", using="solr", configure_directory=conf_dir
)
with open(schema_file) as s:
self.assertGreater(s.read().find('name="%s' % needle), -1)
with open(solrconfig_file) as s:
self.assertGreater(s.read().find('name="df">%s' % needle), -1)
self.assertTrue(
os.path.isfile(os.path.join(conf_dir, "managed-schema.old"))
)
with patch(
"haystack.management.commands.build_solr_schema.requests.get"
) as mock_request:
call_command("build_solr_schema", using="solr", reload_core=True)
with patch(
"haystack.management.commands.build_solr_schema.requests.get"
) as mock_request:
mock_request.return_value.ok = False
self.assertRaises(
CommandError,
call_command,
"build_solr_schema",
using="solr",
reload_core=True,
)
call_command("build_solr_schema", using="solr", filename=schema_file)
with open(schema_file) as s:
self.assertGreater(s.read().find('name="%s' % needle), -1)
finally:
# reset
constants.DOCUMENT_FIELD = oldhdf
connections["solr"]._index = oldui
settings.HAYSTACK_CONNECTIONS["solr"]["URL"] = oldurl
shutil.rmtree(conf_dir, ignore_errors=True)
def test_build_solr_schema_reload_core_without_trailing_slash(self):
"""Ensure `build_solr_schema` works when the Solr core URL does not have a trailing slash."""
# Get the current Solr URL from settings
current_url = settings.HAYSTACK_CONNECTIONS["solr"]["URL"]
# Remove trailing slash if present
updated_url = (
current_url.rstrip("/") if current_url.endswith("/") else current_url
)
# Patch only the `URL` key inside `settings.HAYSTACK_CONNECTIONS["solr"]`
with patch.dict(settings.HAYSTACK_CONNECTIONS["solr"], {"URL": updated_url}):
out = StringIO() # Capture output
call_command(
"build_solr_schema", using="solr", reload_core=True, stdout=out
)
output = out.getvalue()
self.assertIn(
"Trying to reload core named", output
) # Verify core reload message
def test_build_solr_schema_reload_core_with_trailing_slash(self):
"""Ensure `build_solr_schema` works when the Solr core URL has a trailing slash."""
# Get the current Solr URL from settings
current_url = settings.HAYSTACK_CONNECTIONS["solr"]["URL"]
# Add a trailing slash if not present
updated_url = current_url if current_url.endswith("/") else current_url + "/"
# Patch only the `URL` key inside `settings.HAYSTACK_CONNECTIONS["solr"]`
with patch.dict(settings.HAYSTACK_CONNECTIONS["solr"], {"URL": updated_url}):
out = StringIO() # Capture output
call_command(
"build_solr_schema", using="solr", reload_core=True, stdout=out
)
output = out.getvalue()
self.assertIn(
"Trying to reload core named", output
) # Verify core reload message
| ManagementCommandTestCase |
python | django__django | tests/admin_inlines/models.py | {
"start": 8731,
"end": 8895
} | class ____(models.Model):
person = models.ManyToManyField(Person, verbose_name="attendant")
course = models.ForeignKey(Course, on_delete=models.CASCADE)
| Class |
python | doocs__leetcode | lcof/面试题38. 字符串的排列/Solution.py | {
"start": 0,
"end": 514
} | class ____:
def permutation(self, s: str) -> List[str]:
def dfs(i):
if i == len(s) - 1:
ans.append(''.join(cs))
return
vis = set()
for j in range(i, len(s)):
if cs[j] not in vis:
vis.add(cs[j])
cs[i], cs[j] = cs[j], cs[i]
dfs(i + 1)
cs[i], cs[j] = cs[j], cs[i]
ans = []
cs = list(s)
dfs(0)
return ans
| Solution |
python | pytorch__pytorch | torch/amp/grad_scaler.py | {
"start": 1536,
"end": 30643
} | class ____:
"""An instance ``scaler`` of :class:`GradScaler`.
Helps perform the steps of gradient scaling
conveniently.
* ``scaler.scale(loss)`` multiplies a given loss by ``scaler``'s current scale factor.
* ``scaler.step(optimizer)`` safely unscales gradients and calls ``optimizer.step()``.
* ``scaler.update()`` updates ``scaler``'s scale factor.
Example::
# Creates a GradScaler once at the beginning of training.
scaler = GradScaler()
for epoch in epochs:
for input, target in data:
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
# Scales loss. Calls backward() on scaled loss to create scaled gradients.
scaler.scale(loss).backward()
# scaler.step() first unscales gradients of the optimizer's params.
# If gradients don't contain infs/NaNs, optimizer.step() is then called,
# otherwise, optimizer.step() is skipped.
scaler.step(optimizer)
# Updates the scale for next iteration.
scaler.update()
See the :ref:`Automatic Mixed Precision examples<amp-examples>` for usage
(along with autocasting) in more complex cases like gradient clipping, gradient accumulation, gradient penalty,
and multiple losses/optimizers.
``scaler`` dynamically estimates the scale factor each iteration. To minimize gradient underflow,
a large scale factor should be used. However, ``float16`` values can "overflow" (become inf or NaN) if
the scale factor is too large. Therefore, the optimal scale factor is the largest factor that can be used
without incurring inf or NaN gradient values.
``scaler`` approximates the optimal scale factor over time by checking the gradients for infs and NaNs during every
``scaler.step(optimizer)`` (or optional separate ``scaler.unscale_(optimizer)``, see :meth:`unscale_`).
* If infs/NaNs are found, ``scaler.step(optimizer)`` skips the underlying ``optimizer.step()`` (so the params
themselves remain uncorrupted) and ``update()`` multiplies the scale by ``backoff_factor``.
* If no infs/NaNs are found, ``scaler.step(optimizer)`` runs the underlying ``optimizer.step()`` as usual.
If ``growth_interval`` unskipped iterations occur consecutively, ``update()`` multiplies the scale by
``growth_factor``.
The scale factor often causes infs/NaNs to appear in gradients for the first few iterations as its
value calibrates. ``scaler.step`` will skip the underlying ``optimizer.step()`` for these
iterations. After that, step skipping should occur rarely (once every few hundred or thousand iterations).
Args:
device (str, optional, default="cuda"): Device type to use. Possible values are: 'cuda' and 'cpu'.
The type is the same as the `type` attribute of a :class:`torch.device`.
Thus, you may obtain the device type of a tensor using `Tensor.device.type`.
init_scale (float, optional, default=2.**16): Initial scale factor.
growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during
:meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations.
backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during
:meth:`update` if inf/NaN gradients occur in an iteration.
growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients
that must occur for the scale to be multiplied by ``growth_factor``.
enabled (bool, optional): If ``False``, disables gradient scaling. :meth:`step` simply
invokes the underlying ``optimizer.step()``, and other methods become no-ops.
Default: ``True``
"""
def __init__(
self,
device: str = "cuda",
init_scale: float = 2.0**16,
growth_factor: float = 2.0,
backoff_factor: float = 0.5,
growth_interval: int = 2000,
enabled: bool = True,
) -> None:
self._device = device
self._enabled = enabled
if self._device == "cuda":
if enabled and torch.cuda.amp.common.amp_definitely_not_available():
warnings.warn(
"torch.cuda.amp.GradScaler is enabled, but CUDA is not available. Disabling.",
stacklevel=2,
)
self._enabled = False
if self._enabled:
assert growth_factor > 1.0, "The growth factor must be > 1.0."
assert backoff_factor < 1.0, "The backoff factor must be < 1.0."
self._init_scale = init_scale
# self._scale will be lazily initialized during the first call to scale()
self._scale: Optional[torch.Tensor] = None
self._growth_factor = growth_factor
self._backoff_factor = backoff_factor
self._growth_interval = growth_interval
self._init_growth_tracker = 0
# self._growth_tracker will be lazily initialized during the first call to scale()
self._growth_tracker: Optional[torch.Tensor] = None
self._per_optimizer_states: dict[int, dict[str, Any]] = defaultdict(
_refresh_per_optimizer_state
)
def _check_scale_growth_tracker(
self, funcname: str
) -> tuple[torch.Tensor, torch.Tensor]:
fix = "This may indicate your script did not use scaler.scale(loss or outputs) earlier in the iteration."
assert self._scale is not None, (
f"Attempted {funcname} but _scale is None. " + fix
)
assert self._growth_tracker is not None, (
f"Attempted {funcname} but _growth_tracker is None. " + fix
)
return (self._scale, self._growth_tracker)
def _lazy_init_scale_growth_tracker(self, dev: torch.device) -> None:
assert self._growth_tracker is None, "_growth_tracker initialized before _scale"
self._scale = torch.full((), self._init_scale, dtype=torch.float32, device=dev)
self._growth_tracker = torch.full(
(), self._init_growth_tracker, dtype=torch.int32, device=dev
)
@overload
def scale(self, outputs: torch.Tensor) -> torch.Tensor: ...
@overload
def scale(self, outputs: list[torch.Tensor]) -> list[torch.Tensor]: ...
@overload
def scale(self, outputs: tuple[torch.Tensor, ...]) -> tuple[torch.Tensor, ...]: ...
@overload
def scale(self, outputs: Iterable[torch.Tensor]) -> Iterable[torch.Tensor]: ...
def scale(
self,
outputs: Union[torch.Tensor, Iterable[torch.Tensor]],
) -> Union[torch.Tensor, Iterable[torch.Tensor]]:
"""
Multiplies ('scales') a tensor or list of tensors by the scale factor.
Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned
unmodified.
Args:
outputs (Tensor or iterable of Tensors): Outputs to scale.
"""
if not self._enabled:
return outputs
# Short-circuit for the common case.
if isinstance(outputs, torch.Tensor):
if self._scale is None:
self._lazy_init_scale_growth_tracker(outputs.device)
assert self._scale is not None
return outputs * self._scale.to(device=outputs.device, non_blocking=True)
# Invoke the more complex machinery only if we're treating multiple outputs.
stash: list[
_MultiDeviceReplicator
] = [] # holds a reference that can be overwritten by apply_scale
def apply_scale(val: Union[torch.Tensor, Iterable[torch.Tensor]]):
if isinstance(val, torch.Tensor):
if len(stash) == 0:
if self._scale is None:
self._lazy_init_scale_growth_tracker(val.device)
assert self._scale is not None
stash.append(_MultiDeviceReplicator(self._scale))
return val * stash[0].get(val.device)
if isinstance(val, abc.Iterable):
iterable = map(apply_scale, val)
if isinstance(val, (list, tuple)):
return type(val)(iterable)
return iterable
raise ValueError("outputs must be a Tensor or an iterable of Tensors")
return apply_scale(outputs)
def _unscale_grads_(
self,
optimizer: torch.optim.Optimizer,
inv_scale: torch.Tensor,
found_inf: torch.Tensor,
allow_fp16: bool,
) -> dict[torch.device, torch.Tensor]:
per_device_inv_scale = _MultiDeviceReplicator(inv_scale)
per_device_found_inf = _MultiDeviceReplicator(found_inf)
# To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype.
# There could be hundreds of grads, so we'd like to iterate through them just once.
# However, we don't know their devices or dtypes in advance.
# https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict
# Google says mypy struggles with defaultdicts type annotations.
per_device_and_dtype_grads: dict[
torch.device, dict[torch.dtype, list[torch.Tensor]]
] = defaultdict(lambda: defaultdict(list))
with torch.no_grad():
for group in optimizer.param_groups:
for param in group["params"]:
assert isinstance(param, torch.Tensor)
if param.grad is None:
continue
if (not allow_fp16) and param.grad.dtype == torch.float16:
raise ValueError("Attempting to unscale FP16 gradients.")
if param.grad.is_sparse:
# is_coalesced() == False means the sparse grad has values with duplicate indices.
# coalesce() deduplicates indices and adds all values that have the same index.
# For scaled fp16 values, there's a good chance coalescing will cause overflow,
# so we should check the coalesced _values().
if param.grad.dtype is torch.float16:
param.grad = param.grad.coalesce()
to_unscale = param.grad._values()
else:
to_unscale = param.grad
# TODO: is there a way to split by device and dtype without appending in the inner loop?
per_device_and_dtype_grads[to_unscale.device][
to_unscale.dtype
].append(to_unscale)
for device, per_dtype_grads in per_device_and_dtype_grads.items():
for grads in per_dtype_grads.values():
torch._amp_foreach_non_finite_check_and_unscale_(
grads,
per_device_found_inf.get(device),
per_device_inv_scale.get(device),
)
return per_device_found_inf._per_device_tensors
def unscale_(self, optimizer: torch.optim.Optimizer) -> None:
"""
Divides ("unscales") the optimizer's gradient tensors by the scale factor.
:meth:`unscale_` is optional, serving cases where you need to
:ref:`modify or inspect gradients<working-with-unscaled-gradients>`
between the backward pass(es) and :meth:`step`.
If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`.
Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients::
...
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
scaler.step(optimizer)
scaler.update()
Args:
optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled.
.. note::
:meth:`unscale_` does not incur a CPU-GPU sync.
.. warning::
:meth:`unscale_` should only be called once per optimizer per :meth:`step` call,
and only after all gradients for that optimizer's assigned parameters have been accumulated.
Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError.
.. warning::
:meth:`unscale_` may unscale sparse gradients out of place, replacing the ``.grad`` attribute.
"""
if not self._enabled:
return
self._check_scale_growth_tracker("unscale_")
optimizer_state = self._per_optimizer_states[id(optimizer)]
if optimizer_state["stage"] is OptState.UNSCALED:
raise RuntimeError(
"unscale_() has already been called on this optimizer since the last update()."
)
elif optimizer_state["stage"] is OptState.STEPPED:
raise RuntimeError("unscale_() is being called after step().")
# FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
assert self._scale is not None
inv_scale = (
self._scale.double().reciprocal().float()
if self._scale.device != torch.device("mps:0")
else self._scale.reciprocal()
)
found_inf = torch.full((), 0.0, dtype=torch.float32, device=self._scale.device)
optimizer_state["found_inf_per_device"] = self._unscale_grads_(
optimizer, inv_scale, found_inf, False
)
optimizer_state["stage"] = OptState.UNSCALED
def _maybe_opt_step(
self,
optimizer: torch.optim.Optimizer,
optimizer_state: dict[str, Any],
*args: Any,
**kwargs: Any,
) -> Optional[float]:
retval: Optional[float] = None
if not sum(v.item() for v in optimizer_state["found_inf_per_device"].values()):
retval = optimizer.step(*args, **kwargs)
return retval
def step(
self, optimizer: torch.optim.Optimizer, *args: Any, **kwargs: Any
) -> Optional[float]:
"""Invoke ``unscale_(optimizer)`` followed by parameter update, if gradients are not infs/NaN.
:meth:`step` carries out the following two operations:
1. Internally invokes ``unscale_(optimizer)`` (unless :meth:`unscale_` was explicitly called for ``optimizer``
earlier in the iteration). As part of the :meth:`unscale_`, gradients are checked for infs/NaNs.
2. If no inf/NaN gradients are found, invokes ``optimizer.step()`` using the unscaled
gradients. Otherwise, ``optimizer.step()`` is skipped to avoid corrupting the params.
``*args`` and ``**kwargs`` are forwarded to ``optimizer.step()``.
Returns the return value of ``optimizer.step(*args, **kwargs)``.
Args:
optimizer (torch.optim.Optimizer): Optimizer that applies the gradients.
args: Any arguments.
kwargs: Any keyword arguments.
.. warning::
Closure use is not currently supported.
"""
if not self._enabled:
return optimizer.step(*args, **kwargs)
if "closure" in kwargs:
raise RuntimeError(
"Closure use is not currently supported if GradScaler is enabled."
)
self._check_scale_growth_tracker("step")
optimizer_state = self._per_optimizer_states[id(optimizer)]
if optimizer_state["stage"] is OptState.STEPPED:
raise RuntimeError(
"step() has already been called since the last update()."
)
retval: Optional[float] = None
if getattr(optimizer, "_step_supports_amp_scaling", False):
# This optimizer has customized scale-handling logic, so we can call optimizer.step() directly.
# The contract with custom optimizers is that their step() should accept an additional,
# optional grad_scaler kwarg. We append self to the kwargs so the custom optimizer has full information:
# it can query its own state, invoke unscale_ on itself, etc
# The contract above is being deprecated to avoid introducing `grad_scaler: GradScaler` argument
# to `Optimizer.step`. The new behavior is going to add two Tensor attributes of `grad_scale`
# and `found_inf` to the passed optimizer so that the optimizer can utilize those
# to skip the parameter updates or unscale gradients before updating parameters in
# the fused kernel, e.g. `FusedAdamMathFunctor`.
# In this behavior, `GradScaler._check_inf_per_device` is called if `OptState.READY`,
# while the method is expected to be called by users side, i.e. their optimizers.
kwargs_ = kwargs
has_grad_scaler_kwarg = (
"grad_scaler" in inspect.signature(optimizer.step).parameters
)
if has_grad_scaler_kwarg:
warnings.warn(
"GradScaler is going to stop passing itself as a keyword argument to the passed "
"optimizer. In the near future GradScaler registers `grad_scale: Tensor` and "
"`found_inf: Tensor` to the passed optimizer and let the optimizer use them directly.",
FutureWarning,
stacklevel=2,
)
kwargs_.update({"grad_scaler": self})
else:
if optimizer_state["stage"] is OptState.READY:
self._check_inf_per_device(optimizer)
scaler = self._get_scale_async()
assert scaler is not None
found_inf = cast(
torch.Tensor,
sum(
[ # noqa: C419
t.to(scaler.device, non_blocking=True)
for t in optimizer_state["found_inf_per_device"].values()
]
),
)
# Take the product of the scales, if the user has already set `optimizer.grad_scale`.
optimizer.grad_scale = ( # type: ignore[attr-defined]
getattr(optimizer, "grad_scale", None)
if optimizer_state["stage"] == OptState.UNSCALED
else scaler * getattr(optimizer, "grad_scale", 1)
)
optimizer.found_inf = found_inf # type: ignore[attr-defined]
retval = optimizer.step(*args, **kwargs_)
optimizer_state["stage"] = OptState.STEPPED
if not has_grad_scaler_kwarg:
del optimizer.grad_scale # type: ignore[attr-defined]
del optimizer.found_inf # type: ignore[attr-defined]
return retval
if optimizer_state["stage"] is OptState.READY:
self.unscale_(optimizer)
assert len(optimizer_state["found_inf_per_device"]) > 0, (
"No inf checks were recorded for this optimizer."
)
retval = self._maybe_opt_step(optimizer, optimizer_state, *args, **kwargs)
optimizer_state["stage"] = OptState.STEPPED
return retval
def update(self, new_scale: Optional[Union[float, torch.Tensor]] = None) -> None:
"""Update the scale factor.
If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
the scale is multiplied by ``growth_factor`` to increase it.
Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
used directly, it's used to fill GradScaler's internal scale tensor. So if
``new_scale`` was a tensor, later in-place changes to that tensor will not further
affect the scale GradScaler uses internally.)
Args:
new_scale (float or :class:`torch.Tensor`, optional, default=None): New scale factor.
.. warning::
:meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
been invoked for all optimizers used this iteration.
.. warning::
For performance reasons, we do not check the scale factor value to avoid synchronizations,
so the scale factor is not guaranteed to be above 1. If the scale falls below 1 and/or
you are seeing NaNs in your gradients or loss, something is likely wrong. For example,
bf16-pretrained models are often incompatible with AMP/fp16 due to differing dynamic ranges.
"""
if not self._enabled:
return
_scale, _growth_tracker = self._check_scale_growth_tracker("update")
if new_scale is not None:
assert self._scale is not None
# Accept a new user-defined scale.
if isinstance(new_scale, float):
self._scale.fill_(new_scale)
else:
reason = (
"new_scale should be a float or a 1-element torch.cuda.FloatTensor or "
"torch.FloatTensor with requires_grad=False."
)
assert new_scale.device.type == self._device, reason
assert new_scale.numel() == 1, reason
assert new_scale.requires_grad is False, reason
self._scale.copy_(new_scale)
else:
# Consume shared inf/nan data collected from optimizers to update the scale.
# If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
found_infs = [
found_inf.to(device=_scale.device, non_blocking=True)
for state in self._per_optimizer_states.values()
for found_inf in state["found_inf_per_device"].values()
]
assert len(found_infs) > 0, "No inf checks were recorded prior to update."
found_inf_combined = found_infs[0]
if len(found_infs) > 1:
for i in range(1, len(found_infs)):
found_inf_combined += found_infs[i]
torch._amp_update_scale_(
_scale,
_growth_tracker,
found_inf_combined,
self._growth_factor,
self._backoff_factor,
self._growth_interval,
)
# To prepare for next iteration, clear the data collected from optimizers this iteration.
self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
def _get_scale_async(self) -> Optional[torch.Tensor]:
return self._scale
def get_scale(self) -> float:
"""Return a Python float containing the current scale, or 1.0 if scaling is disabled.
.. warning::
:meth:`get_scale` incurs a CPU-GPU sync.
"""
if self._enabled:
return (
self._init_scale
if (scale := self._get_scale_async()) is None
else cast(float, scale.item())
)
return 1.0
def get_growth_factor(self) -> float:
r"""Return a Python float containing the scale growth factor."""
return self._growth_factor
def set_growth_factor(self, new_factor: float) -> None:
r"""Set a new scale growth factor.
Args:
new_scale (float): Value to use as the new scale growth factor.
"""
self._growth_factor = new_factor
def get_backoff_factor(self) -> float:
r"""Return a Python float containing the scale backoff factor."""
return self._backoff_factor
def set_backoff_factor(self, new_factor: float) -> None:
r"""Set a new scale backoff factor.
Args:
new_scale (float): Value to use as the new scale backoff factor.
"""
self._backoff_factor = new_factor
def get_growth_interval(self) -> int:
r"""Return a Python int containing the growth interval."""
return self._growth_interval
def set_growth_interval(self, new_interval: int) -> None:
r"""Set a new growth interval.
Args:
new_interval (int): Value to use as the new growth interval.
"""
self._growth_interval = new_interval
def _get_growth_tracker(self) -> int:
if self._enabled:
return (
self._init_growth_tracker
if self._growth_tracker is None
else cast(int, self._growth_tracker.item())
)
return 0
def is_enabled(self) -> bool:
r"""Return a bool indicating whether this instance is enabled."""
return self._enabled
def state_dict(self) -> dict[str, Any]:
r"""Return the state of the scaler as a :class:`dict`.
It contains five entries:
* ``"scale"`` - a Python float containing the current scale
* ``"growth_factor"`` - a Python float containing the current growth factor
* ``"backoff_factor"`` - a Python float containing the current backoff factor
* ``"growth_interval"`` - a Python int containing the current growth interval
* ``"_growth_tracker"`` - a Python int containing the number of recent consecutive unskipped steps.
If this instance is not enabled, returns an empty dict.
.. note::
If you wish to checkpoint the scaler's state after a particular iteration, :meth:`state_dict`
should be called after :meth:`update`.
"""
if self._enabled:
return {
"scale": self.get_scale(),
"growth_factor": self._growth_factor,
"backoff_factor": self._backoff_factor,
"growth_interval": self._growth_interval,
"_growth_tracker": self._get_growth_tracker(),
}
return {}
def load_state_dict(self, state_dict: dict[str, Any]) -> None:
r"""Load the scaler state.
If this instance is disabled, :meth:`load_state_dict` is a no-op.
Args:
state_dict(dict): scaler state. Should be an object returned from a call to :meth:`state_dict`.
"""
if not self._enabled:
return
if len(state_dict) == 0:
raise RuntimeError(
"The source state dict is empty, possibly because it was saved "
"from a disabled instance of GradScaler."
)
self._init_scale = cast(float, state_dict["scale"])
if self._scale is not None:
self._scale.fill_(state_dict["scale"])
self._growth_factor = cast(float, state_dict["growth_factor"])
self._backoff_factor = cast(float, state_dict["backoff_factor"])
self._growth_interval = cast(int, state_dict["growth_interval"])
self._init_growth_tracker = cast(int, state_dict["_growth_tracker"])
if self._growth_tracker is not None:
self._growth_tracker.fill_(state_dict["_growth_tracker"])
def __getstate__(self) -> dict[str, Any]:
state = self.__dict__.copy()
if self._enabled:
assert len(self._per_optimizer_states) == 0, (
"A GradScaler instance may only be pickled at the beginning "
"of an iteration, or at the end after scaler.update()."
)
# Pickling _scale and _growth_tracker Tensors directly triggers
# "warnings.warn("pickle support for Storage will be removed in 1.5..."
# so instead, we set the unpickled instance up to reinitialize them lazily.
state["_init_scale"] = self.get_scale()
state["_init_growth_tracker"] = self._get_growth_tracker()
state["_scale"] = None
state["_growth_tracker"] = None
return state
def __setstate__(self, state: dict[str, Any]) -> None:
self.__dict__.update(state)
def _check_inf_per_device(self, optimizer: torch.optim.Optimizer) -> dict[str, Any]:
_scale, _ = self._check_scale_growth_tracker("_check_inf_per_device")
dummy_inv_scale = torch.full((), 1.0, dtype=torch.float32, device=_scale.device)
found_inf = torch.full((), 0.0, dtype=torch.float32, device=_scale.device)
self._per_optimizer_states[id(optimizer)]["found_inf_per_device"] = (
self._unscale_grads_(optimizer, dummy_inv_scale, found_inf, True)
)
return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"]
def _found_inf_per_device(self, optimizer: torch.optim.Optimizer) -> dict[str, Any]:
return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"]
| GradScaler |
python | celery__celery | t/unit/concurrency/test_prefork.py | {
"start": 3887,
"end": 5135
} | class ____:
started = False
closed = False
joined = False
terminated = False
_state = None
def __init__(self, *args, **kwargs):
self.started = True
self._timeout_handler = Mock()
self._result_handler = Mock()
self.maintain_pool = Mock()
self._state = mp.RUN
self._processes = kwargs.get('processes')
self._proc_alive_timeout = kwargs.get('proc_alive_timeout')
self._pool = [Bunch(pid=i, inqW_fd=1, outqR_fd=2)
for i in range(self._processes)]
self._current_proc = cycle(range(self._processes))
def close(self):
self.closed = True
self._state = 'CLOSE'
def join(self):
self.joined = True
def terminate(self):
self.terminated = True
def terminate_job(self, *args, **kwargs):
pass
def restart(self, *args, **kwargs):
pass
def handle_result_event(self, *args, **kwargs):
pass
def flush(self):
pass
def grow(self, n=1):
self._processes += n
def shrink(self, n=1):
self._processes -= n
def apply_async(self, *args, **kwargs):
pass
def register_with_event_loop(self, loop):
pass
| MockPool |
python | PyCQA__pylint | tests/functional/u/undefined/undefined_variable.py | {
"start": 3070,
"end": 3189
} | class ____:
""" Detect when using the same name inside the class scope. """
obj = Self # [undefined-variable]
| Self |
python | getsentry__sentry | src/sentry/snuba/metrics/naming_layer/mri.py | {
"start": 10516,
"end": 15256
} | class ____:
op: MetricOperationType
mri: ParsedMRI
def __str__(self) -> str:
return f"{self.op}({self.mri.name})"
def parse_mri_field(field: str | None) -> ParsedMRIField | None:
if field is None:
return None
matches = MRI_EXPRESSION_REGEX.match(field)
if matches is None:
return None
try:
op = cast(MetricOperationType, matches[1])
mri = ParsedMRI(**matches.groupdict())
except (IndexError, TypeError):
return None
return ParsedMRIField(op=op, mri=mri)
def is_mri_field(field: str) -> bool:
"""
Returns True if the passed value is an MRI field.
"""
return parse_mri_field(field) is not None
def format_mri_field(field: str) -> str:
"""
Format a metric field to be used in a metric expression.
For example, if the field is `avg(c:transactions/foo@none)`, it will be returned as `avg(foo)`.
"""
try:
parsed = parse_mri_field(field)
if parsed:
return str(parsed)
else:
return field
except InvalidParams:
return field
def format_mri_field_value(field: str, value: str) -> str:
"""
Formats MRI field value to a human-readable format using unit.
For example, if the value of avg(c:transactions/duration@second) is 60,
it will be returned as 1 minute.
"""
try:
parsed_mri_field = parse_mri_field(field)
if parsed_mri_field is None:
return value
return format_value_using_unit_and_op(
float(value), parsed_mri_field.mri.unit, parsed_mri_field.op
)
except InvalidParams:
return value
def parse_mri(mri_string: str | None) -> ParsedMRI | None:
"""
Parse a mri string to determine its entity, namespace, name and unit.
"""
if mri_string is None:
return None
match = MRI_SCHEMA_REGEX.match(mri_string)
if match is None:
return None
return ParsedMRI(**match.groupdict())
def is_mri(mri_string: str | None) -> bool:
"""
Returns true if the passed value is a mri.
"""
return parse_mri(mri_string) is not None
def is_custom_metric(parsed_mri: ParsedMRI) -> bool:
"""
A custom mri is a mri which uses the custom namespace, and it's different from a custom measurement.
"""
return parsed_mri.namespace == "custom"
def is_measurement(parsed_mri: ParsedMRI) -> bool:
"""
A measurement won't use the custom namespace, but will be under the transaction namespace.
This checks the namespace, and name to match what we consider to be a standard + custom measurement.
"""
return parsed_mri.namespace == "transactions" and parsed_mri.name.startswith("measurements.")
def is_custom_measurement(parsed_mri: ParsedMRI) -> bool:
"""
A custom measurement won't use the custom namespace, but will be under the transaction namespace.
This checks the namespace, and name to match what we expect first before iterating through the
members of the transaction MRI enum to make sure it isn't a standard measurement.
"""
return (
parsed_mri.namespace == "transactions"
and parsed_mri.name.startswith("measurements.")
and
# Iterate through the transaction MRI and check that this parsed_mri isn't in there
all(parsed_mri.mri_string != mri.value for mri in TransactionMRI.__members__.values())
)
_ENTITY_KEY_MAPPING_GENERIC: dict[str, MetricEntity] = {
"c": "generic_metrics_counters",
"s": "generic_metrics_sets",
"d": "generic_metrics_distributions",
"g": "generic_metrics_gauges",
}
_ENTITY_KEY_MAPPING_NON_GENERIC: dict[str, MetricEntity] = {
"c": "metrics_counters",
"s": "metrics_sets",
"d": "metrics_distributions",
}
def get_available_operations(parsed_mri: ParsedMRI) -> list[MetricOperationType]:
if parsed_mri.entity == "e":
return []
elif parsed_mri.namespace == "sessions":
entity_key = _ENTITY_KEY_MAPPING_NON_GENERIC[parsed_mri.entity]
return AVAILABLE_OPERATIONS[entity_key]
else:
entity_key = _ENTITY_KEY_MAPPING_GENERIC[parsed_mri.entity]
return AVAILABLE_GENERIC_OPERATIONS[entity_key]
def extract_use_case_id(mri: str) -> UseCaseID:
"""
Returns the use case ID given the MRI, throws an error if MRI is invalid or the use case doesn't exist.
"""
parsed_mri = parse_mri(mri)
if parsed_mri is not None:
if parsed_mri.namespace in {id.value for id in UseCaseID}:
return UseCaseID(parsed_mri.namespace)
raise ValidationError(f"The use case of the MRI {parsed_mri.namespace} does not exist")
raise ValidationError(f"The MRI {mri} is not valid")
| ParsedMRIField |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_table_column_count_to_equal.py | {
"start": 2021,
"end": 10527
} | class ____(BatchExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
ExpectTableColumnCountToEqual is a \
Batch Expectation.
BatchExpectations are one of the most common types of Expectation.
They are evaluated for an entire Batch, and answer a semantic question about the Batch itself.
Args:
value (int): {VALUE_DESCRIPTION}
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
See Also:
[ExpectTableColumnCountToBeBetween](https://greatexpectations.io/expectations/expect_table_column_count_to_be_between)
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[13]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test test2
0 1.00 2
1 2.30 5
2 4.33 0
Code Examples:
Passing Case:
Input:
ExpectTableColumnCountToEqual(
value=2
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"meta": {{}},
"success": true,
"result": {{
"observed_value": 2
}}
}}
Failing Case:
Input:
ExpectTableColumnCountToEqual(
value=1
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"meta": {{}},
"success": false,
"result": {{
"observed_value": 2
}}
}}
""" # noqa: E501 # FIXME CoP
value: Union[int, SuiteParameterDict] = pydantic.Field(description=VALUE_DESCRIPTION)
library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = {
"maturity": "production",
"tags": ["core expectation", "table expectation"],
"contributors": [
"@great_expectations",
],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
metric_dependencies = ("table.column_count",)
success_keys = ("value",)
args_keys = ("value",)
class Config:
title = "Expect table column count to equal"
@staticmethod
def schema_extra(schema: Dict[str, Any], model: Type[Expectation]) -> None:
BatchExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
@classmethod
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
renderer_configuration.add_param(name="value", param_type=RendererValueType.NUMBER)
renderer_configuration.template_str = "Must have exactly $value columns."
return renderer_configuration
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
_ = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(configuration.kwargs, ["value"])
template_str = "Must have exactly $value columns."
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
def _validate(
self,
metrics: Dict,
runtime_configuration: Optional[dict] = None,
execution_engine: Optional[ExecutionEngine] = None,
):
expected_column_count = self.configuration.kwargs.get("value")
actual_column_count = metrics.get("table.column_count")
return {
"success": actual_column_count == expected_column_count,
"result": {"observed_value": actual_column_count},
}
| ExpectTableColumnCountToEqual |
python | run-llama__llama_index | llama-index-core/llama_index/core/query_engine/sql_join_query_engine.py | {
"start": 4305,
"end": 6471
} | class ____(BaseQueryTransform):
"""
SQL Augment Query Transform.
This query transform will transform the query into a more specific query
after augmenting with SQL results.
Args:
llm (LLM): LLM to use for query transformation.
sql_augment_transform_prompt (BasePromptTemplate): PromptTemplate to use
for query transformation.
check_stop_parser (Optional[Callable[[str], bool]]): Check stop function.
"""
def __init__(
self,
llm: Optional[LLM] = None,
sql_augment_transform_prompt: Optional[BasePromptTemplate] = None,
check_stop_parser: Optional[Callable[[QueryBundle], bool]] = None,
) -> None:
"""Initialize params."""
self._llm = llm or Settings.llm
self._sql_augment_transform_prompt = (
sql_augment_transform_prompt or DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT
)
self._check_stop_parser = check_stop_parser or _default_check_stop
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {"sql_augment_transform_prompt": self._sql_augment_transform_prompt}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "sql_augment_transform_prompt" in prompts:
self._sql_augment_transform_prompt = prompts["sql_augment_transform_prompt"]
def _run(self, query_bundle: QueryBundle, metadata: Dict) -> QueryBundle:
"""Run query transform."""
query_str = query_bundle.query_str
sql_query = metadata["sql_query"]
sql_query_response = metadata["sql_query_response"]
new_query_str = self._llm.predict(
self._sql_augment_transform_prompt,
query_str=query_str,
sql_query_str=sql_query,
sql_response_str=sql_query_response,
)
return QueryBundle(
new_query_str, custom_embedding_strs=query_bundle.custom_embedding_strs
)
def check_stop(self, query_bundle: QueryBundle) -> bool:
"""Check if query indicates stop."""
return self._check_stop_parser(query_bundle)
| SQLAugmentQueryTransform |
python | networkx__networkx | networkx/algorithms/tests/test_lowest_common_ancestors.py | {
"start": 11116,
"end": 14160
} | class ____(TestDAGLCA):
@classmethod
def setup_class(cls):
cls.DG = nx.MultiDiGraph()
nx.add_path(cls.DG, (0, 1, 2, 3))
# add multiedges
nx.add_path(cls.DG, (0, 1, 2, 3))
nx.add_path(cls.DG, (0, 4, 3))
nx.add_path(cls.DG, (0, 5, 6, 8, 3))
nx.add_path(cls.DG, (5, 7, 8))
cls.DG.add_edge(6, 2)
cls.DG.add_edge(7, 2)
cls.root_distance = nx.shortest_path_length(cls.DG, source=0)
cls.gold = {
(1, 1): 1,
(1, 2): 1,
(1, 3): 1,
(1, 4): 0,
(1, 5): 0,
(1, 6): 0,
(1, 7): 0,
(1, 8): 0,
(2, 2): 2,
(2, 3): 2,
(2, 4): 0,
(2, 5): 5,
(2, 6): 6,
(2, 7): 7,
(2, 8): 7,
(3, 3): 3,
(3, 4): 4,
(3, 5): 5,
(3, 6): 6,
(3, 7): 7,
(3, 8): 8,
(4, 4): 4,
(4, 5): 0,
(4, 6): 0,
(4, 7): 0,
(4, 8): 0,
(5, 5): 5,
(5, 6): 5,
(5, 7): 5,
(5, 8): 5,
(6, 6): 6,
(6, 7): 5,
(6, 8): 6,
(7, 7): 7,
(7, 8): 7,
(8, 8): 8,
}
cls.gold.update(((0, n), 0) for n in cls.DG)
def test_all_pairs_lca_self_ancestors():
"""Self-ancestors should always be the node itself, i.e. lca of (0, 0) is 0.
See gh-4458."""
# DAG for test - note order of node/edge addition is relevant
G = nx.DiGraph()
G.add_nodes_from(range(5))
G.add_edges_from([(1, 0), (2, 0), (3, 2), (4, 1), (4, 3)])
ap_lca = nx.all_pairs_lowest_common_ancestor
assert all(u == v == a for (u, v), a in ap_lca(G) if u == v)
MG = nx.MultiDiGraph(G)
assert all(u == v == a for (u, v), a in ap_lca(MG) if u == v)
MG.add_edges_from([(1, 0), (2, 0)])
assert all(u == v == a for (u, v), a in ap_lca(MG) if u == v)
def test_lca_on_null_graph():
G = nx.null_graph(create_using=nx.DiGraph)
with pytest.raises(
nx.NetworkXPointlessConcept, match="LCA meaningless on null graphs"
):
nx.lowest_common_ancestor(G, 0, 0)
def test_lca_on_cycle_graph():
G = nx.cycle_graph(6, create_using=nx.DiGraph)
with pytest.raises(
nx.NetworkXError, match="LCA only defined on directed acyclic graphs"
):
nx.lowest_common_ancestor(G, 0, 3)
def test_lca_multiple_valid_solutions():
G = nx.DiGraph()
G.add_nodes_from(range(4))
G.add_edges_from([(2, 0), (3, 0), (2, 1), (3, 1)])
assert nx.lowest_common_ancestor(G, 0, 1) in {2, 3}
def test_lca_dont_rely_on_single_successor():
# Nodes 0 and 1 have nodes 2 and 3 as immediate ancestors,
# and node 2 also has node 3 as an immediate ancestor.
G = nx.DiGraph()
G.add_nodes_from(range(4))
G.add_edges_from([(2, 0), (2, 1), (3, 1), (3, 0), (3, 2)])
assert nx.lowest_common_ancestor(G, 0, 1) == 2
| TestMultiDiGraph_DAGLCA |
python | huggingface__transformers | src/transformers/models/tvp/modeling_tvp.py | {
"start": 18449,
"end": 19066
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.layer_norm(hidden_states + input_tensor)
return hidden_states
| TvpOutputLayer |
python | huggingface__transformers | src/transformers/models/roc_bert/modeling_roc_bert.py | {
"start": 25923,
"end": 26262
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = RoCBertLMPredictionHead(config)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
@auto_docstring
| RoCBertOnlyMLMHead |
python | pyca__cryptography | src/cryptography/hazmat/primitives/asymmetric/padding.py | {
"start": 2087,
"end": 2160
} | class ____(metaclass=abc.ABCMeta):
_algorithm: hashes.HashAlgorithm
| MGF |
python | paramiko__paramiko | tests/auth.py | {
"start": 4802,
"end": 7804
} | class ____:
def _server(self, *args, **kwargs):
kwargs.setdefault("transport_factory", ServiceRequestingTransport)
return server(*args, **kwargs)
class fallback_pubkey_algorithm:
@requires_sha1_signing
def key_type_algo_selected_when_no_server_sig_algs(self):
privkey = RSAKey.from_private_key_file(_support("rsa.key"))
# Server pretending to be an apparently common setup:
# - doesn't support (or have enabled) sha2
# - also doesn't support (or have enabled) server-sig-algs/ext-info
# This is the scenario in which Paramiko has to guess-the-algo, and
# where servers that don't support sha2 or server-sig-algs can give
# us trouble.
server_init = dict(_disable_sha2_pubkey, server_sig_algs=False)
with self._server(
pubkeys=[privkey],
connect=dict(pkey=privkey),
server_init=server_init,
catch_error=True,
) as (tc, ts, err):
# Auth did work
assert tc.is_authenticated()
# Selected ssh-rsa, instead of first-in-the-list (rsa-sha2-512)
assert tc._agreed_pubkey_algorithm == "ssh-rsa"
@requires_sha1_signing
def key_type_algo_selection_is_cert_suffix_aware(self):
# This key has a cert next to it, which should trigger cert-aware
# loading within key classes.
privkey = PKey.from_path(_support("rsa.key"))
server_init = dict(_disable_sha2_pubkey, server_sig_algs=False)
with self._server(
pubkeys=[privkey],
connect=dict(pkey=privkey),
server_init=server_init,
catch_error=True,
) as (tc, ts, err):
assert not err
# Auth did work
assert tc.is_authenticated()
# Selected expected cert type
assert (
tc._agreed_pubkey_algorithm
== "ssh-rsa-cert-v01@openssh.com"
)
@requires_sha1_signing
def uses_first_preferred_algo_if_key_type_not_in_list(self):
# This is functionally the same as legacy AuthHandler, just
# arriving at the same place in a different manner.
privkey = RSAKey.from_private_key_file(_support("rsa.key"))
server_init = dict(_disable_sha2_pubkey, server_sig_algs=False)
with self._server(
pubkeys=[privkey],
connect=dict(pkey=privkey),
server_init=server_init,
client_init=_disable_sha1_pubkey, # no ssh-rsa
catch_error=True,
) as (tc, ts, err):
assert not tc.is_authenticated()
assert isinstance(err, AuthenticationException)
assert tc._agreed_pubkey_algorithm == "rsa-sha2-512"
| AuthOnlyHandler_ |
python | joke2k__faker | faker/providers/address/th/__init__.py | {
"start": 45,
"end": 5370
} | class ____(AddressProvider):
# country names adapted from
# https://github.com/PyThaiNLP/pythainlp/blob/dev/pythainlp/corpus/countries_th.txt
countries = (
"กรีซ",
"กรีนแลนด์",
"กวม",
"กัมพูชา",
"กัวเดอลุป",
"กัวเตมาลา",
"กาตาร์",
"กานา",
"กาบอง",
"กาบูเวร์ดี",
"กายอานา",
"กินี-บิสเซา",
"กินี",
"เกรเนดา",
"เกาหลีใต้",
"เกาหลีเหนือ",
"เกาะคริสต์มาส",
"เกาะนอร์ฟอล์ก",
"เกาะบูเวต",
"เกาะแมน",
"เกิร์นซีย์",
"แกมเบีย",
"โกตดิวัวร์",
"คอโมโรส",
"คอสตาริกา",
"คาซัคสถาน",
"คิริบาส",
"คิวบา",
"คีร์กีซสถาน",
"คีลิง",
"คูเวต",
"เคนยา",
"แคนาดา",
"แคเมอรูน",
"โครเอเชีย",
"โคลอมเบีย",
"จอร์เจีย",
"จอร์แดน",
"จาเมกา",
"จิบูตี",
"จีน",
"เจอร์ซีย์",
"ชาด",
"ชิลี",
"ซานมารีโน",
"ซามัว",
"ซาอุดีอาระเบีย",
"ซิมบับเว",
"ซีเรีย",
"ซูดาน",
"ซูรินาม",
"เซเชลส์",
"เซนต์คิตส์และเนวิส",
"เซนต์ลูเซีย",
"เซนต์วินเซนต์และเกรนาดีนส์",
"เซนต์เฮเลนา",
"เซเนกัล",
"เซอร์เบีย",
"เซาตูเมและปรินซิปี",
"เซียร์ราลีโอน",
"แซ็ง-บาร์เตเลมี",
"แซ็งบาร์เตเลมี",
"แซ็ง-มาร์แต็ง",
"แซ็งมาร์แต็ง",
"แซงปีแยร์และมีเกอลง",
"แซมเบีย",
"โซมาเลีย",
"ไซปรัส",
"ญี่ปุ่น",
"ดอมินีกา",
"เดนมาร์ก",
"ตรินิแดดและโตเบโก",
"ตองกา",
"ติมอร์-เลสเต",
"ติมอร์เลสเต",
"ตุรกี",
"ตูนิเซีย",
"ตูวาลู",
"เติร์กเมนิสถาน",
"โตเกเลา",
"โตโก",
"ไต้หวัน",
"ทาจิกิสถาน",
"แทนซาเนีย",
"ไทย",
"นครรัฐวาติกัน",
"นอร์เวย์",
"นามิเบีย",
"นาอูรู",
"นิการากัว",
"นิวแคลิโดเนีย",
"นิวซีแลนด์",
"นีอูเอ",
"เนเธอร์แลนด์แอนทิลลีส",
"เนเธอร์แลนด์",
"เนปาล",
"ไนจีเรีย",
"ไนเจอร์",
"บราซิล",
"บริติชอินเดียนโอเชียนเทร์ริทอรี",
"บรูไนดารุสซาลาม",
"บอตสวานา",
"บอสเนียและเฮอร์เซโกวีนา",
"บังกลาเทศ",
"บัลแกเรีย",
"บาร์เบโดส",
"บาห์เรน",
"บาฮามาส",
"บุรุนดี",
"บูร์กินาฟาโซ",
"เบนิน",
"เบลเยียม",
"เบลารุส",
"เบลีซ",
"เบอร์มิวดา",
"โบลิเวีย",
"ปากีสถาน",
"ปานามา",
"ปาปัวนิวกินี",
"ปารากวัย",
"ปาเลา",
"ปาเลสไตน์",
"เปรู",
"เปอร์โตริโก",
"โปรตุเกส",
"โปแลนด์",
"ฝรั่งเศส",
"พม่า",
"ฟิจิ",
"ฟินแลนด์",
"ฟิลิปปินส์",
"เฟรนช์เกียนา",
"เฟรนช์โปลินีเซีย",
"ภูฏาน",
"มองโกเลีย",
"มอนต์เซอร์รัต",
"มอนเตเนโกร",
"มอริเชียส",
"มอริเตเนีย",
"มอลโดวา",
"มอลตา",
"มัลดีฟส์",
"มาเก๊า",
"ประเทศมาซิโดเนียเหนือ",
"มาดากัสการ์",
"มายอต",
"มาร์ตีนิก",
"มาลาวี",
"มาลี",
"มาเลเซีย",
"เม็กซิโก",
"โมซัมบิก",
"โมนาโก",
"โมร็อกโก",
"ไมโครนีเซีย",
"ยานไมเอน",
"ยิบรอลตาร์",
"ยูกันดา",
"ยูเครน",
"เยเมน",
"เยอรมนี",
"รวันดา",
"รัสเซีย",
"เรอูว์นียง",
"โรมาเนีย",
"ลักเซมเบิร์ก",
"ลัตเวีย",
"ลาว",
"ลิกเตนสไตน์",
"ลิทัวเนีย",
"ลิเบีย",
"เลโซโท",
"เลบานอน",
"ไลบีเรีย",
"วานูอาตู",
"วาลลิสและฟุตูนา",
"เวเนซุเอลา",
"เวสเทิร์นสะฮารา",
"เวียดนาม",
"ศรีลังกา",
"สกอตแลนด์",
"สเปน",
"สฟาลบาร์",
"สโลวาเกีย",
"สโลวีเนีย",
"สวาซิแลนด์",
"สวิตเซอร์แลนด์",
"สวีเดน",
"สหรัฐอเมริกา",
"สหรัฐอาหรับเอมิเรตส์",
"สหราชอาณาจักร",
"สาธารณรัฐคองโก",
"สาธารณรัฐเช็ก",
"สาธารณรัฐโดมินิกัน",
"สิงคโปร์",
"หมู่เกาะคุก",
"หมู่เกาะเคย์แมน",
"หมู่เกาะโคโคส",
"หมู่เกาะโซโลมอน",
"หมู่เกาะบริติชเวอร์จิน",
"หมู่เกาะพิตแคร์น",
"หมู่เกาะฟอล์กแลนด์",
"หมู่เกาะแฟโร",
"หมู่เกาะมาร์แชลล์",
"อเมริกันซามัว",
"ออสเตรเลีย",
"ออสเตรีย",
"อังกฤษ",
"อันดอร์รา",
"อัฟกานิสถาน",
"อาเซอร์ไบจาน",
"อาร์เจนตินา",
"อาร์มีเนีย",
"อารูบา",
"อิเควทอเรียลกินี",
"อิตาลี",
"อินเดีย",
"อินโดนีเซีย",
"อิรัก",
"อิสราเอล",
"อิหร่าน",
"อียิปต์",
"อุซเบกิสถาน",
"อุรุกวัย",
"เอกวาดอร์",
"เอธิโอเปีย",
"เอริเทรีย",
"เอลซัลวาดอร์",
"เอสโตเนีย",
"แองกวิลลา",
"แองโกลา",
"แอฟริกากลาง",
"แอฟริกาใต้",
"แอลจีเรีย",
"แอลเบเนีย",
"โอมาน",
"ไอซ์แลนด์",
"ไอร์แลนด์",
"ไอวอรีโคสต์",
"ฮ่องกง",
"ฮอนดูรัส",
"ฮังการี",
"เฮติ",
)
| Provider |
python | django-haystack__django-haystack | haystack/fields.py | {
"start": 15189,
"end": 15245
} | class ____(FacetField, CharField):
pass
| FacetCharField |
python | langchain-ai__langchain | libs/langchain/langchain_classic/agents/schema.py | {
"start": 170,
"end": 1219
} | class ____(ChatPromptTemplate):
"""Chat prompt template for the agent scratchpad."""
@classmethod
@override
def is_lc_serializable(cls) -> bool:
return False
def _construct_agent_scratchpad(
self,
intermediate_steps: list[tuple[AgentAction, str]],
) -> str:
if len(intermediate_steps) == 0:
return ""
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
return (
f"This was your previous work "
f"(but I haven't seen any of it! I only see what "
f"you return as final answer):\n{thoughts}"
)
def _merge_partial_and_user_variables(self, **kwargs: Any) -> dict[str, Any]:
intermediate_steps = kwargs.pop("intermediate_steps")
kwargs["agent_scratchpad"] = self._construct_agent_scratchpad(
intermediate_steps,
)
return kwargs
| AgentScratchPadChatPromptTemplate |
python | keras-team__keras | keras/src/trainers/trainer_test.py | {
"start": 4549,
"end": 9234
} | class ____(py_dataset_adapter.PyDataset):
def __init__(self, infinite=False, **kwargs):
super().__init__(**kwargs)
self.infinite = infinite
@property
def num_batches(self):
return None if self.infinite else 20
def __getitem__(self, idx):
CPU_DEVICES = {
"tensorflow": "CPU:0",
"jax": "cpu:0",
"torch": "cpu",
}
with backend.device(CPU_DEVICES[backend.backend()]):
return ops.ones((5, 4)), ops.zeros((5, 3))
def create_dataset(dataset_type, dataset_kwargs):
if dataset_type == "np_array":
return np.ones((100, 4)), np.zeros((100, 3))
elif dataset_type == "native_array":
return ops.ones((100, 4)), ops.zeros((100, 3))
elif dataset_type == "py_dataset":
return TestPyDataset(**dataset_kwargs), None
elif dataset_type == "tf_dataset":
import tensorflow as tf
dataset = tf.data.Dataset.from_tensor_slices(
(tf.ones((100, 4)), tf.zeros((100, 3)))
).batch(5)
if dataset_kwargs.get("infinite", False):
dataset = dataset.repeat()
return dataset, None
elif dataset_type == "torch_dataloader":
import torch
class TestIterableDataset(torch.utils.data.IterableDataset):
def __iter__(self):
for _ in range(20):
yield torch.ones((5, 4)), torch.zeros((5, 3))
class TestIterableDatasetWithLen(TestIterableDataset):
def __len__(self):
return 20
if dataset_kwargs.get("iterable", False):
if dataset_kwargs.get("has_len", False):
dataset = TestIterableDatasetWithLen()
else:
dataset = TestIterableDataset()
return torch.utils.data.DataLoader(dataset), None
else:
dataset = torch.utils.data.TensorDataset(
torch.ones((100, 4)), torch.zeros((100, 3))
)
return torch.utils.data.DataLoader(dataset, batch_size=5), None
elif dataset_type == "generator":
def generate_finite():
for _ in range(20):
yield ops.ones((5, 4)), ops.zeros((5, 3))
def generate_infinite():
while True:
yield ops.ones((5, 4)), ops.zeros((5, 3))
if dataset_kwargs.get("infinite", False):
return generate_infinite(), None
else:
return generate_finite(), None
elif dataset_type == "grain_datast":
import grain
class TestIterableDataset(grain.sources.RandomAccessDataSource):
def __init__(self):
super().__init__()
self.x = np.ones((100, 4)).astype("float32")
self.y = np.zeros((100, 3)).astype("float32")
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
if dataset_kwargs.get("use_dataloader", False):
source = TestIterableDataset()
dataloader = grain.DataLoader(
data_source=source,
sampler=grain.samplers.IndexSampler(len(source), num_epochs=1),
operations=[grain.transforms.Batch(batch_size=5)],
)
return dataloader, None
else:
dataset = grain.MapDataset.source(TestIterableDataset())
if dataset_kwargs.get("has_len", False):
dataset = dataset.to_iter_dataset()
dataset = dataset.batch(5)
return dataset, None
else:
raise ValueError(f"Invalid dataset type {dataset_type}")
def sparse_generator(generator_type):
if generator_type == "scipy":
import scipy
for _ in range(4):
x = scipy.sparse.random(2, 4, density=0.25, dtype="float32")
y = np.random.rand(2, 3).astype("float32")
yield x, y
elif generator_type == "tf":
import tensorflow as tf
for _ in range(4):
x = tf.random.uniform((2, 4), dtype="float32")
x = tf.sparse.from_dense(tf.nn.dropout(x, 0.25))
y = tf.random.uniform((2, 3), dtype="float32")
yield x, y
elif generator_type == "jax":
import jax
import jax.experimental.sparse as jax_sparse
for _ in range(4):
seed = jax.random.PRNGKey(0)
x = jax_sparse.random_bcoo(seed, (2, 4), dtype="float32", nse=0.25)
y = jax.random.uniform(seed, (2, 3), dtype="float32")
yield x, y
else:
raise ValueError(f"Invalid generator type {generator_type}")
| TestPyDataset |
python | mlflow__mlflow | mlflow/llama_index/tracer.py | {
"start": 13147,
"end": 18956
} | class ____(BaseEventHandler, extra="allow"):
"""
Event handler processes various events that are triggered during execution.
Events are used as supplemental source for recording additional metadata to the span,
such as model name, parameters to the span, because they are not available in the inputs
and outputs in SpanHandler.
"""
_span_handler: MlflowSpanHandler
@classmethod
def class_name(cls) -> str:
return "MlflowEventHandler"
def __init__(self, _span_handler):
super().__init__()
self._span_handler = _span_handler
def handle(self, event: BaseEvent) -> Any:
try:
if span := self._span_handler.get_span_for_event(event):
self._handle_event(event, span)
except Exception as e:
_logger.debug(f"Failed to handle event: {e}", exc_info=True)
@singledispatchmethod
def _handle_event(self, event: BaseEvent, span: LiveSpan):
# Pass through the events we are not interested in
pass
@_handle_event.register
def _(self, event: AgentToolCallEvent, span: LiveSpan):
span.set_attribute("name", event.tool.name)
span.set_attribute("description", event.tool.description)
span.set_attribute("parameters", event.tool.get_parameters_dict())
@_handle_event.register
def _(self, event: EmbeddingStartEvent, span: LiveSpan):
span.set_attribute("model_dict", event.model_dict)
@_handle_event.register
def _(self, event: LLMPredictStartEvent, span: LiveSpan):
"""
An event triggered when LLM's predict() is called.
In LlamaIndex, predict() is a gateway method that dispatch the request to
either chat() or completion() method depending on the model type, as well
as crafting prompt from the template.
"""
template = event.template
template_args = {
**template.kwargs,
**(event.template_args or {}),
}
span.set_attributes(
{
"prmopt_template": template.get_template(),
"template_arguments": {var: template_args.get(var) for var in template_args},
}
)
@_handle_event.register
def _(self, event: LLMCompletionStartEvent, span: LiveSpan):
span.set_attribute("prompt", event.prompt)
span.set_attribute("model_dict", event.model_dict)
@_handle_event.register
def _(self, event: LLMCompletionEndEvent, span: LiveSpan):
span.set_attribute("usage", self._extract_token_usage(event.response))
token_counts = self._parse_usage(span)
span.set_attribute(SpanAttributeKey.CHAT_USAGE, token_counts)
self._span_handler.resolve_pending_stream_span(span, event)
@_handle_event.register
def _(self, event: LLMChatStartEvent, span: LiveSpan):
span.set_attribute(SpanAttributeKey.SPAN_TYPE, SpanType.CHAT_MODEL)
span.set_attribute("model_dict", event.model_dict)
@_handle_event.register
def _(self, event: LLMChatEndEvent, span: LiveSpan):
span.set_attribute("usage", self._extract_token_usage(event.response))
token_counts = self._parse_usage(span)
span.set_attribute(SpanAttributeKey.CHAT_USAGE, token_counts)
self._span_handler.resolve_pending_stream_span(span, event)
@_handle_event.register
def _(self, event: ReRankStartEvent, span: LiveSpan):
span.set_attribute(SpanAttributeKey.SPAN_TYPE, SpanType.RERANKER)
span.set_attributes(
{
"model_name": event.model_name,
"top_n": event.top_n,
}
)
@_handle_event.register
def _(self, event: ExceptionEvent, span: LiveSpan):
"""
Handle an exception event for stream spans.
For non-stream spans, exception is processed by the prepare_to_drop_span() handler of
the span handler. However, for stream spans, the exception may raised during the
streaming after it exit. Therefore, we need to resolve the span here.
"""
self._span_handler.resolve_pending_stream_span(span, event)
def _extract_token_usage(self, response: ChatResponse | CompletionResponse) -> dict[str, int]:
if raw := response.raw:
# The raw response can be a Pydantic model or a dictionary
if isinstance(raw, pydantic.BaseModel):
raw = raw.model_dump()
if usage := raw.get("usage"):
return usage
# If the usage is not found in the raw response, look for token counts
# in additional_kwargs of the completion payload
usage = {}
if additional_kwargs := getattr(response, "additional_kwargs", None):
for k in ["prompt_tokens", "completion_tokens", "total_tokens"]:
if (v := additional_kwargs.get(k)) is not None:
usage[k] = v
return usage
def _parse_usage(self, span: LiveSpan):
try:
usage = span.get_attribute("usage")
return {
TokenUsageKey.INPUT_TOKENS: usage["prompt_tokens"],
TokenUsageKey.OUTPUT_TOKENS: usage["completion_tokens"],
TokenUsageKey.TOTAL_TOKENS: usage.get(
"total_tokens", usage["prompt_tokens"] + usage["completion_tokens"]
),
}
except Exception as e:
_logger.debug(f"Failed to set TokenUsage to the span: {e}", exc_info=True)
_StreamEndEvent = LLMChatEndEvent | LLMCompletionEndEvent | ExceptionEvent
def _get_task_step_output_type():
if _get_llama_index_version() < Version("0.13.0"):
from llama_index.core.base.agent.types import TaskStepOutput
return TaskStepOutput
return ()
| MlflowEventHandler |
python | huggingface__transformers | tests/models/blip_2/test_modeling_blip_2.py | {
"start": 16722,
"end": 23708
} | class ____(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (Blip2ForConditionalGeneration,) if is_torch_available() else ()
additional_model_inputs = ["input_ids"]
test_resize_embeddings = False
test_attention_outputs = False
_is_composite = True
def setUp(self):
self.model_tester = Blip2ForConditionalGenerationDecoderOnlyModelTester(self)
common_properties = ["image_token_index", "num_query_tokens", "image_text_hidden_size"]
self.config_tester = ConfigTester(
self, config_class=Blip2Config, has_text_modality=False, common_properties=common_properties
)
def test_config(self):
self.config_tester.run_common_tests()
def test_for_conditional_generation(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_conditional_generation(*config_and_inputs)
@unittest.skip(
reason="Blip2QFormerModel does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet."
)
def test_eager_matches_sdpa_generate(self):
pass
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Inputs_embeds is tested in individual model tests")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="Blip2Model does not have input/output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="BLIP2 has no separate base model without a head.")
def test_model_base_model_prefix(self):
pass
def test_sdpa_can_dispatch_composite_models(self):
"""
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention".
In contrast to the above test, this one checks if the "config._attn_implementation" is a dict after the model
is loaded, because we manually replicate requested attn implementation on each sub-config when loading.
See https://github.com/huggingface/transformers/pull/32238 for more info
The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model
that has a different set of sub-configs has to overwrite this test.
"""
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
if not self._is_composite:
self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_sdpa = model_class.from_pretrained(tmpdirname)
model_sdpa = model_sdpa.eval().to(torch_device)
# `None` as it is the requested one which will be assigned to each sub-config
# Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present)
self.assertTrue(model.language_model.config._attn_implementation == "sdpa")
self.assertTrue(model.vision_model.config._attn_implementation == "sdpa")
self.assertTrue(model.qformer.config._attn_implementation == "eager")
model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager")
model_eager = model_eager.eval().to(torch_device)
self.assertTrue(model_eager.config._attn_implementation == "eager")
self.assertTrue(model_eager.language_model.config._attn_implementation == "eager")
self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager")
self.assertTrue(model_eager.qformer.config._attn_implementation == "eager")
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if (
class_name.endswith("Attention")
and getattr(submodule, "config", None)
and submodule.config._attn_implementation == "sdpa"
):
raise ValueError("The eager model should not have SDPA attention layers")
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_load_vision_qformer_text_config(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
# Save Blip2Config and check if we can load Blip2VisionConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
vision_config = Blip2VisionConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict())
# Save Blip2Config and check if we can load Blip2QFormerConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
qformer_config = Blip2QFormerConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict())
@slow
def test_model_from_pretrained(self):
model_name = "Salesforce/blip2-opt-2.7b"
model = Blip2ForConditionalGeneration.from_pretrained(model_name)
self.assertIsNotNone(model)
# overwrite because BLIP internally calls LM.generate() with embeds thus it cannot operate in no cache format
def _check_generate_outputs(self, output, config, use_cache=False, num_return_sequences=1, num_beams=1):
use_cache = True # force this to be True in case False is passed
super()._check_generate_outputs(
output, config, use_cache=use_cache, num_return_sequences=num_return_sequences, num_beams=num_beams
)
# this class is based on `T5ModelTester` found in tests/models/t5/test_modeling_t5.py
| Blip2ForConditionalGenerationDecoderOnlyTest |
python | langchain-ai__langchain | libs/langchain/langchain_classic/evaluation/parsing/base.py | {
"start": 360,
"end": 2678
} | class ____(StringEvaluator):
"""Evaluate whether the prediction is valid JSON.
This evaluator checks if the prediction is a valid JSON string. It does not
require any input or reference.
Attributes:
requires_input: Whether this evaluator requires an input
string. Always False.
requires_reference: Whether this evaluator requires a
reference string. Always False.
evaluation_name: The name of the evaluation metric.
Always "json".
Examples:
>>> evaluator = JsonValidityEvaluator()
>>> prediction = '{"name": "John", "age": 30, "city": "New York"}'
>>> evaluator.evaluate(prediction)
{'score': 1}
>>> prediction = '{"name": "John", "age": 30, "city": "New York",}'
>>> evaluator.evaluate(prediction)
{'score': 0, 'reasoning': 'Expecting property name enclosed in double quotes'}
"""
def __init__(self, **_: Any) -> None:
"""Initialize the JsonValidityEvaluator."""
super().__init__()
@property
@override
def requires_input(self) -> bool:
return False
@property
@override
def requires_reference(self) -> bool:
return False
@property
@override
def evaluation_name(self) -> str:
return "json_validity"
@override
def _evaluate_strings(
self,
prediction: str,
**kwargs: Any,
) -> dict:
"""Evaluate the prediction string.
Args:
prediction: The prediction string to evaluate.
**kwargs: Additional keyword arguments (not used).
Returns:
`dict` containing the evaluation score. The score is `1` if
the prediction is valid JSON, and `0` otherwise.
If the prediction is not valid JSON, the dictionary also contains
a `reasoning` field with the error message.
"""
try:
parse_json_markdown(prediction, parser=json.loads)
except json.JSONDecodeError as e:
return {"score": 0, "reasoning": str(e)}
except Exception as e:
_logger.exception("Passing JSON failed with unexpected error.")
return {"score": 0, "reasoning": str(e)}
return {"score": 1}
| JsonValidityEvaluator |
python | squidfunk__mkdocs-material | material/plugins/tags/structure/mapping/__init__.py | {
"start": 1511,
"end": 3532
} | class ____:
"""
A mapping between a page or link and a set of tags.
We use this class to store the mapping between a page or link and a set of
tags. This is necessary as we don't want to store the tags directly on the
page or link object, in order not to clutter the internal data structures
of MkDocs, keeping the plugin as unobtrusive as possible.
Links are primarily used when integrating with tags from external projects,
as we can't construct a page object for them as we do for local files.
"""
def __init__(self, item: Page | Link, *, tags: Iterable[Tag] | None = None):
"""
Initialize the mapping.
Tags can be passed upon initialization, but can also be added later on
using the `add` or `update` method. of the `tags` attribute.
Arguments:
item: The page or link.
tags: The tags associated with the page or link.
"""
self.item = item
self.tags = set(tags or [])
def __repr__(self) -> str:
"""
Return a printable representation of the mapping.
Returns:
Printable representation.
"""
return f"Mapping({repr(self.item)}, tags={self.tags})"
def __and__(self, tags: set[Tag]) -> Iterator[Tag]:
"""
Iterate over the tags featured in the mapping.
This method expands each tag in the mapping and checks whether it is
equal to one of the tags in the given set. If so, the tag is yielded.
Arguments:
tags: The set of tags.
Yields:
The current tag.
"""
assert isinstance(tags, set)
# Iterate over expanded tags
for tag in self.tags:
if set(tag) & tags:
yield tag
# -------------------------------------------------------------------------
item: Page | Link
"""
The page or link.
"""
tags: set[Tag]
"""
The tags associated with the page or link.
"""
| Mapping |
python | apache__airflow | providers/google/tests/unit/google/common/auth_backend/test_google_openid.py | {
"start": 3026,
"end": 6008
} | class ____:
@pytest.fixture(autouse=True)
def _set_attrs(self, google_openid_app, admin_user) -> None:
self.app = google_openid_app
self.admin_user = admin_user
@mock.patch("google.oauth2.id_token.verify_token")
def test_success(self, mock_verify_token):
mock_verify_token.return_value = {
"iss": "accounts.google.com",
"email_verified": True,
"email": "test@fab.org",
}
with self.app.test_client() as test_client:
response = test_client.get("/fab/v1/users", headers={"Authorization": "bearer JWT_TOKEN"})
assert response.status_code == 200
@pytest.mark.parametrize("auth_header", ["bearer", "JWT_TOKEN", "bearer "])
@mock.patch("google.oauth2.id_token.verify_token")
def test_malformed_headers(self, mock_verify_token, auth_header):
mock_verify_token.return_value = {
"iss": "accounts.google.com",
"email_verified": True,
"email": "test@fab.org",
}
with self.app.test_client() as test_client:
response = test_client.get("/fab/v1/users", headers={"Authorization": auth_header})
assert response.status_code == 401
@mock.patch("google.oauth2.id_token.verify_token")
def test_invalid_iss_in_jwt_token(self, mock_verify_token):
mock_verify_token.return_value = {
"iss": "INVALID",
"email_verified": True,
"email": "test@fab.org",
}
with self.app.test_client() as test_client:
response = test_client.get("/fab/v1/users", headers={"Authorization": "bearer JWT_TOKEN"})
assert response.status_code == 401
@mock.patch("google.oauth2.id_token.verify_token")
def test_user_not_exists(self, mock_verify_token):
mock_verify_token.return_value = {
"iss": "accounts.google.com",
"email_verified": True,
"email": "invalid@fab.org",
}
with self.app.test_client() as test_client:
response = test_client.get("/fab/v1/users", headers={"Authorization": "bearer JWT_TOKEN"})
assert response.status_code == 401
@conf_vars({("fab", "auth_backends"): "airflow.providers.google.common.auth_backend.google_openid"})
def test_missing_id_token(self):
with self.app.test_client() as test_client:
response = test_client.get("/fab/v1/users")
assert response.status_code == 401
@conf_vars({("fab", "auth_backends"): "airflow.providers.google.common.auth_backend.google_openid"})
@mock.patch("google.oauth2.id_token.verify_token")
def test_invalid_id_token(self, mock_verify_token):
mock_verify_token.side_effect = GoogleAuthError("Invalid token")
with self.app.test_client() as test_client:
response = test_client.get("/fab/v1/users", headers={"Authorization": "bearer JWT_TOKEN"})
assert response.status_code == 401
| TestGoogleOpenID |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1403089,
"end": 1403305
} | class ____(SingleTimeUnit):
"""LocalSingleTimeUnit schema wrapper."""
_schema = {"$ref": "#/definitions/LocalSingleTimeUnit"}
def __init__(self, *args):
super().__init__(*args)
| LocalSingleTimeUnit |
python | openai__openai-python | src/openai/resources/beta/threads/threads.py | {
"start": 94781,
"end": 96187
} | class ____:
def __init__(self, threads: Threads) -> None:
self._threads = threads
self.create = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
threads.create, # pyright: ignore[reportDeprecated],
)
)
self.retrieve = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
threads.retrieve, # pyright: ignore[reportDeprecated],
)
)
self.update = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
threads.update, # pyright: ignore[reportDeprecated],
)
)
self.delete = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
threads.delete, # pyright: ignore[reportDeprecated],
)
)
self.create_and_run = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
threads.create_and_run, # pyright: ignore[reportDeprecated],
)
)
@cached_property
def runs(self) -> RunsWithStreamingResponse:
return RunsWithStreamingResponse(self._threads.runs)
@cached_property
def messages(self) -> MessagesWithStreamingResponse:
return MessagesWithStreamingResponse(self._threads.messages)
| ThreadsWithStreamingResponse |
python | ray-project__ray | rllib/examples/rl_modules/classes/vpg_using_shared_encoder_rlm.py | {
"start": 1618,
"end": 7190
} | class ____(MultiRLModule):
"""VPG (vanilla pol. gradient)-style MultiRLModule handling a shared encoder.
# __sphinx_doc_mrlm_end__
This MultiRLModule needs to be configured appropriately as below.
.. testcode::
# __sphinx_doc_how_to_run_begin__
import gymnasium as gym
from ray.rllib.core.rl_module.rl_module import RLModuleSpec
from ray.rllib.core.rl_module.multi_rl_module import MultiRLModuleSpec
from ray.rllib.examples.algorithms.classes.vpg import VPGConfig
from ray.rllib.examples.learners.classes.vpg_torch_learner_shared_optimizer import VPGTorchLearnerSharedOptimizer
from ray.rllib.examples.envs.classes.multi_agent import MultiAgentCartPole
from ray.rllib.examples.rl_modules.classes.vpg_using_shared_encoder_rlm import (
SHARED_ENCODER_ID,
SharedEncoder,
VPGPolicyAfterSharedEncoder,
VPGMultiRLModuleWithSharedEncoder,
)
single_agent_env = gym.make("CartPole-v1")
EMBEDDING_DIM = 64 # encoder output dim
config = (
VPGConfig()
.environment(MultiAgentCartPole, env_config={"num_agents": 2})
.training(
learner_class=VPGTorchLearnerSharedOptimizer,
)
.multi_agent(
# Declare the two policies trained.
policies={"p0", "p1"},
# Agent IDs of `MultiAgentCartPole` are 0 and 1. They are mapped to
# the two policies with ModuleIDs "p0" and "p1", respectively.
policy_mapping_fn=lambda agent_id, episode, **kw: f"p{agent_id}"
)
.rl_module(
rl_module_spec=MultiRLModuleSpec(
multi_rl_module_class=VPGMultiRLModuleWithSharedEncoder,
rl_module_specs={
# Shared encoder.
SHARED_ENCODER_ID: RLModuleSpec(
module_class=SharedEncoder,
model_config={"embedding_dim": EMBEDDING_DIM},
observation_space=single_agent_env.observation_space,
action_space=single_agent_env.action_space,
),
# Large policy net.
"p0": RLModuleSpec(
module_class=VPGPolicyAfterSharedEncoder,
model_config={
"embedding_dim": EMBEDDING_DIM,
"hidden_dim": 1024,
},
),
# Small policy net.
"p1": RLModuleSpec(
module_class=VPGPolicyAfterSharedEncoder,
model_config={
"embedding_dim": EMBEDDING_DIM,
"hidden_dim": 64,
},
),
},
),
)
)
algo = config.build_algo()
print(algo.train())
# __sphinx_doc_how_to_run_end__
# __sphinx_doc_mrlm_2_begin__
"""
def setup(self):
# Call the super's setup().
super().setup()
# Assert, we have the shared encoder submodule.
assert SHARED_ENCODER_ID in self._rl_modules and len(self._rl_modules) > 1
# Assign the encoder to a convenience attribute.
self.encoder = self._rl_modules[SHARED_ENCODER_ID]
def _forward(self, batch, forward_type, **kwargs):
# Collect our policies' outputs in this dict.
fwd_out = {}
# Loop through the policy nets (through the given batch's keys).
for policy_id, policy_batch in batch.items():
# Feed this policy's observation into the shared encoder
encoder_output = self.encoder._forward(batch[policy_id])
policy_batch[ENCODER_OUT] = encoder_output[ENCODER_OUT]
# Get the desired module
m = getattr(self._rl_modules[policy_id], forward_type)
# Pass the policy's embeddings through the policy net.
fwd_out[policy_id] = m(batch[policy_id], **kwargs)
return fwd_out
# These methods could probably stand to be adjusted in MultiRLModule using something like this, so that subclasses that tweak _forward don't need to rewrite all of them. The prior implementation errored out because of this issue.
@override(MultiRLModule)
def _forward_inference(
self, batch: Dict[str, Any], **kwargs
) -> Union[Dict[str, Any], Dict[ModuleID, Dict[str, Any]]]:
return self._forward(batch, "_forward_inference", **kwargs)
@override(MultiRLModule)
def _forward_exploration(
self, batch: Dict[str, Any], **kwargs
) -> Union[Dict[str, Any], Dict[ModuleID, Dict[str, Any]]]:
return self._forward(batch, "_forward_exploration", **kwargs)
@override(MultiRLModule)
def _forward_train(
self, batch: Dict[str, Any], **kwargs
) -> Union[Dict[str, Any], Dict[ModuleID, Dict[str, Any]]]:
return self._forward(batch, "_forward_train", **kwargs)
# __sphinx_doc_mrlm_2_end__
# __sphinx_doc_encoder_begin__
| VPGMultiRLModuleWithSharedEncoder |
python | getsentry__sentry | src/sentry/integrations/bitbucket/search.py | {
"start": 956,
"end": 3338
} | class ____(SourceCodeSearchEndpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
@property
def repository_field(self) -> str:
return "repo"
@property
def integration_provider(self):
return IntegrationProviderSlug.BITBUCKET.value
@property
def installation_class(self):
return BitbucketIntegration
def handle_search_issues(self, installation: T, query: str, repo: str | None) -> Response:
with self.record_event(
SCMIntegrationInteractionType.HANDLE_SEARCH_ISSUES,
organization_id=installation.organization_id,
integration_id=installation.org_integration.integration_id,
).capture() as lifecycle:
assert repo
full_query = f'title~"{query}"'
try:
response = installation.search_issues(query=full_query, repo=repo)
except ApiError as e:
if "no issue tracker" in str(e):
lifecycle.record_halt(str(SourceCodeSearchEndpointHaltReason.NO_ISSUE_TRACKER))
return Response(
{"detail": "Bitbucket Repository has no issue tracker."}, status=400
)
elif "resource not found" in str(e):
lifecycle.record_halt(
str(SourceCodeSearchEndpointHaltReason.MISSING_REPOSITORY_OR_NO_ACCESS)
)
return Response({"detail": "Bitbucket Repository not found."}, status=400)
raise
assert isinstance(response, dict)
return Response(
[
{"label": "#{} {}".format(i["id"], i["title"]), "value": i["id"]}
for i in response.get("values", [])
]
)
def handle_search_repositories(
self, integration: Integration, installation: T, query: str
) -> Response:
with self.record_event(
SCMIntegrationInteractionType.HANDLE_SEARCH_REPOSITORIES,
organization_id=installation.organization_id,
integration_id=integration.id,
).capture():
result = installation.get_repositories(query)
return Response([{"label": i["name"], "value": i["name"]} for i in result])
| BitbucketSearchEndpoint |
python | huggingface__transformers | src/transformers/models/umt5/modeling_umt5.py | {
"start": 49417,
"end": 58985
} | class ____(UMT5PreTrainedModel, GenerationMixin):
r"""
Examples:
```python
>>> from transformers import UMT5ForConditionalGeneration, AutoTokenizer
>>> model = UMT5ForConditionalGeneration.from_pretrained("google/umt5-small")
>>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small")
>>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
>>> summary = "Weiter Verhandlung in Syrien."
>>> inputs = tokenizer(article, text_target=summary, return_tensors="pt")
>>> outputs = model(**inputs)
>>> loss = outputs.loss
```"""
model_type = "umt5"
_tied_weights_keys = {
"encoder.embed_tokens.weight": "shared.weight",
"decoder.embed_tokens.weight": "shared.weight",
"lm_head.weight": "shared.weight",
}
def __init__(self, config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.tie_encoder_decoder = False
self.encoder = UMT5Stack(encoder_config)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.tie_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = UMT5Stack(decoder_config)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_input_embeddings
def get_input_embeddings(self):
return self.shared
# Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.set_input_embeddings
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.Tensor]]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so
you should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training).
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
UMT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [UMT5
Training](./umt5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import AutoTokenizer, UMT5ForConditionalGeneration
>>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small")
>>> model = UMT5ForConditionalGeneration.from_pretrained("google/umt5-small")
>>> # training
>>> input_ids = tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids
>>> labels = tokenizer("<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", return_tensors="pt").input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
>>> # inference
>>> input_ids = tokenizer("Studies have shown that <extra_id_0> good for you", return_tensors="pt").input_ids
>>> outputs = model.generate(input_ids)
>>> tokenizer.decode(outputs[0], skip_special_tokens=True)
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
sequence_output = decoder_outputs[0]
if self.config.tie_word_embeddings:
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
sequence_output = sequence_output * (self.model_dim**-0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
# move labels to correct device to enable PP
labels = labels.to(lm_logits.device)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
if not return_dict:
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
# Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.prepare_decoder_input_ids_from_labels
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
@auto_docstring
| UMT5ForConditionalGeneration |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/scroll_to_center.py | {
"start": 143,
"end": 1110
} | class ____(App[None]):
AUTO_FOCUS = ""
CSS = """
VerticalScroll {
border: round $primary;
}
#vertical {
height: 21;
}
HorizontalScroll {
border: round $accent;
height: auto;
}
Label {
height: auto;
width: auto;
}
"""
def compose(self) -> ComposeResult:
yield Label(("SPAM\n" * 53)[:-1])
with VerticalScroll(id="vertical"):
yield Label(("SPAM\n" * 78)[:-1])
with HorizontalScroll():
yield Label(("v\n" * 17)[:-1])
yield Label("@" * 302)
yield Label("[red]>>bullseye<<[/red]", id="bullseye")
yield Label("@" * 99)
yield Label(("SPAM\n" * 49)[:-1])
yield Label(("SPAM\n" * 51)[:-1])
def key_s(self) -> None:
self.screen.scroll_to_center(self.query_one("#bullseye"), origin_visible=False)
if __name__ == "__main__":
MyApp().run()
| MyApp |
python | huggingface__transformers | tests/models/plbart/test_modeling_plbart.py | {
"start": 14042,
"end": 16856
} | class ____(AbstractSeq2SeqIntegrationTest):
checkpoint_name = "uclanlp/plbart-java-cs"
src_text = [
"public int maximum(int a, int b, int c){return Math.max(a, Math.max(b, c));}",
"public int product(int a, int b, int c){return a*b*c;}",
]
tgt_text = [
"public int maximum(int a, int b, int c){return Math.Max(",
"public int Product(int a, int b, int c){return a * b *",
]
@slow
def test_java_cs_generate_one(self):
batch = self.tokenizer(
["public int maximum(int a, int b, int c){return Math.max(a, Math.max(b, c));}"], return_tensors="pt"
)
batch = batch.to(torch_device)
translated_tokens = self.model.generate(**batch)
decoded = self.tokenizer.decode(translated_tokens, skip_special_tokens=True)
self.assertEqual(self.tgt_text[0], decoded[0])
# self.assertEqual(self.tgt_text[1], decoded[1])
@slow
def test_java_cs_generate_batch(self):
batch = self.tokenizer(self.src_text, return_tensors="pt", padding=True, truncation=True)
batch = batch.to(torch_device)
translated_tokens = self.model.generate(**batch)
decoded = self.tokenizer.decode(translated_tokens, skip_special_tokens=True)
assert self.tgt_text == decoded
def test_plbart_java_cs_config(self):
plbart_models = ["uclanlp/plbart-java-cs"]
expected = {"scale_embedding": True}
for name in plbart_models:
config = PLBartConfig.from_pretrained(name)
for k, v in expected.items():
try:
self.assertEqual(v, getattr(config, k))
except AssertionError as e:
e.args += (name, k)
raise
def test_plbart_fast_forward(self):
config = PLBartConfig(
vocab_size=99,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
add_final_layer_norm=True,
)
lm_model = PLBartForConditionalGeneration(config).to(torch_device)
context = torch.tensor(
[[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], device=torch_device, dtype=torch.long
)
summary = torch.tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], device=torch_device, dtype=torch.long)
result = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary)
expected_shape = (*summary.shape, config.vocab_size)
self.assertEqual(result.logits.shape, expected_shape)
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
| PLBartJavaCsIntegrationTest |
python | getsentry__sentry | src/sentry/auth/providers/saml2/rippling/provider.py | {
"start": 466,
"end": 1049
} | class ____(AuthView):
"""
Rippling provides the Metadata URL during initial application setup, before
configuration values have been saved, thus we cannot immediately attempt to
create an identity for the setting up the SSO.
This is simply an extra step to wait for them to complete that.
"""
def handle(self, request: HttpRequest, pipeline: AuthHelper) -> HttpResponseBase:
if "continue_setup" in request.POST:
return pipeline.next_step()
return self.respond("sentry_auth_rippling/wait-for-completion.html")
| WaitForCompletion |
python | zarr-developers__zarr-python | src/zarr/codecs/vlen_utf8.py | {
"start": 557,
"end": 2279
} | class ____(ArrayBytesCodec):
"""Variable-length UTF8 codec"""
@classmethod
def from_dict(cls, data: dict[str, JSON]) -> Self:
_, configuration_parsed = parse_named_configuration(
data, "vlen-utf8", require_configuration=False
)
configuration_parsed = configuration_parsed or {}
return cls(**configuration_parsed)
def to_dict(self) -> dict[str, JSON]:
return {"name": "vlen-utf8", "configuration": {}}
def evolve_from_array_spec(self, array_spec: ArraySpec) -> Self:
return self
# TODO: expand the tests for this function
async def _decode_single(
self,
chunk_bytes: Buffer,
chunk_spec: ArraySpec,
) -> NDBuffer:
assert isinstance(chunk_bytes, Buffer)
raw_bytes = chunk_bytes.as_array_like()
decoded = _vlen_utf8_codec.decode(raw_bytes)
assert decoded.dtype == np.object_
decoded.shape = chunk_spec.shape
as_string_dtype = decoded.astype(chunk_spec.dtype.to_native_dtype(), copy=False)
return chunk_spec.prototype.nd_buffer.from_numpy_array(as_string_dtype)
async def _encode_single(
self,
chunk_array: NDBuffer,
chunk_spec: ArraySpec,
) -> Buffer | None:
assert isinstance(chunk_array, NDBuffer)
return chunk_spec.prototype.buffer.from_bytes(
_vlen_utf8_codec.encode(chunk_array.as_numpy_array())
)
def compute_encoded_size(self, input_byte_length: int, _chunk_spec: ArraySpec) -> int:
# what is input_byte_length for an object dtype?
raise NotImplementedError("compute_encoded_size is not implemented for VLen codecs")
@dataclass(frozen=True)
| VLenUTF8Codec |
python | django__django | django/db/backends/ddl_references.py | {
"start": 3144,
"end": 3512
} | class ____(TableColumns):
"""Hold a reference to an index name."""
def __init__(self, table, columns, suffix, create_index_name):
self.suffix = suffix
self.create_index_name = create_index_name
super().__init__(table, columns)
def __str__(self):
return self.create_index_name(self.table, self.columns, self.suffix)
| IndexName |
python | jazzband__django-formtools | tests/forms.py | {
"start": 27,
"end": 211
} | class ____(forms.Form):
field1 = forms.CharField()
field1_ = forms.CharField()
bool1 = forms.BooleanField(required=False)
date1 = forms.DateField(required=False)
| TestForm |
python | fastapi__sqlmodel | docs_src/tutorial/automatic_id_none_refresh/tutorial001_py310.py | {
"start": 63,
"end": 2049
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str
secret_name: str
age: int | None = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
print("Before interacting with the database")
print("Hero 1:", hero_1)
print("Hero 2:", hero_2)
print("Hero 3:", hero_3)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
print("After adding to the session")
print("Hero 1:", hero_1)
print("Hero 2:", hero_2)
print("Hero 3:", hero_3)
session.commit()
print("After committing the session")
print("Hero 1:", hero_1)
print("Hero 2:", hero_2)
print("Hero 3:", hero_3)
print("After committing the session, show IDs")
print("Hero 1 ID:", hero_1.id)
print("Hero 2 ID:", hero_2.id)
print("Hero 3 ID:", hero_3.id)
print("After committing the session, show names")
print("Hero 1 name:", hero_1.name)
print("Hero 2 name:", hero_2.name)
print("Hero 3 name:", hero_3.name)
session.refresh(hero_1)
session.refresh(hero_2)
session.refresh(hero_3)
print("After refreshing the heroes")
print("Hero 1:", hero_1)
print("Hero 2:", hero_2)
print("Hero 3:", hero_3)
print("After the session closes")
print("Hero 1:", hero_1)
print("Hero 2:", hero_2)
print("Hero 3:", hero_3)
def main():
create_db_and_tables()
create_heroes()
if __name__ == "__main__":
main()
| Hero |
python | scipy__scipy | scipy/_lib/tests/test__util.py | {
"start": 8252,
"end": 11496
} | class ____:
# check that wrapper `_rename_parameter` for backward-compatible
# keyword renaming works correctly
# Example method/function that still accepts keyword `old`
@_rename_parameter("old", "new")
def old_keyword_still_accepted(self, new):
return new
# Example method/function for which keyword `old` is deprecated
@_rename_parameter("old", "new", dep_version="1.9.0")
def old_keyword_deprecated(self, new):
return new
def test_old_keyword_still_accepted(self):
# positional argument and both keyword work identically
res1 = self.old_keyword_still_accepted(10)
res2 = self.old_keyword_still_accepted(new=10)
res3 = self.old_keyword_still_accepted(old=10)
assert res1 == res2 == res3 == 10
# unexpected keyword raises an error
message = re.escape("old_keyword_still_accepted() got an unexpected")
with pytest.raises(TypeError, match=message):
self.old_keyword_still_accepted(unexpected=10)
# multiple values for the same parameter raises an error
message = re.escape("old_keyword_still_accepted() got multiple")
with pytest.raises(TypeError, match=message):
self.old_keyword_still_accepted(10, new=10)
with pytest.raises(TypeError, match=message):
self.old_keyword_still_accepted(10, old=10)
with pytest.raises(TypeError, match=message):
self.old_keyword_still_accepted(new=10, old=10)
@pytest.fixture
def kwarg_lock(self):
from threading import Lock
return Lock()
def test_old_keyword_deprecated(self, kwarg_lock):
# positional argument and both keyword work identically,
# but use of old keyword results in DeprecationWarning
dep_msg = "Use of keyword argument `old` is deprecated"
res1 = self.old_keyword_deprecated(10)
res2 = self.old_keyword_deprecated(new=10)
# pytest warning filter is not thread-safe, enforce serialization
with kwarg_lock:
with pytest.warns(DeprecationWarning, match=dep_msg):
res3 = self.old_keyword_deprecated(old=10)
assert res1 == res2 == res3 == 10
# unexpected keyword raises an error
message = re.escape("old_keyword_deprecated() got an unexpected")
with pytest.raises(TypeError, match=message):
self.old_keyword_deprecated(unexpected=10)
# multiple values for the same parameter raises an error and,
# if old keyword is used, results in DeprecationWarning
message = re.escape("old_keyword_deprecated() got multiple")
with pytest.raises(TypeError, match=message):
self.old_keyword_deprecated(10, new=10)
with kwarg_lock:
with pytest.raises(TypeError, match=message), \
pytest.warns(DeprecationWarning, match=dep_msg):
# breakpoint()
self.old_keyword_deprecated(10, old=10)
with kwarg_lock:
with pytest.raises(TypeError, match=message), \
pytest.warns(DeprecationWarning, match=dep_msg):
self.old_keyword_deprecated(new=10, old=10)
| TestRenameParameter |
python | spack__spack | lib/spack/spack/cmd/create.py | {
"start": 21682,
"end": 22022
} | class ____(PackageTemplate):
"""Provides appropriate overrides for licensed Intel software"""
base_class_name = "IntelOneApiPackage"
package_class_import = "from spack_repo.builtin.build_systems.oneapi import IntelOneApiPackage"
body_def = """\
# FIXME: Override `setup_environment` if necessary."""
| IntelPackageTemplate |
python | kamyu104__LeetCode-Solutions | Python/matchsticks-to-square.py | {
"start": 114,
"end": 1143
} | class ____(object):
def makesquare(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
total_len = sum(nums)
if total_len % 4:
return False
side_len = total_len / 4
fullset = (1 << len(nums)) - 1
used_subsets = []
valid_half_subsets = [0] * (1 << len(nums))
for subset in xrange(fullset+1):
subset_total_len = 0
for i in xrange(len(nums)):
if subset & (1 << i):
subset_total_len += nums[i]
if subset_total_len == side_len:
for used_subset in used_subsets:
if (used_subset & subset) == 0:
valid_half_subset = used_subset | subset
valid_half_subsets[valid_half_subset] = True
if valid_half_subsets[fullset ^ valid_half_subset]:
return True
used_subsets.append(subset)
return False
| Solution |
python | getsentry__sentry-python | sentry_sdk/scrubber.py | {
"start": 1198,
"end": 6064
} | class ____:
def __init__(
self, denylist=None, recursive=False, send_default_pii=False, pii_denylist=None
):
# type: (Optional[List[str]], bool, bool, Optional[List[str]]) -> None
"""
A scrubber that goes through the event payload and removes sensitive data configured through denylists.
:param denylist: A security denylist that is always scrubbed, defaults to DEFAULT_DENYLIST.
:param recursive: Whether to scrub the event payload recursively, default False.
:param send_default_pii: Whether pii is sending is on, pii fields are not scrubbed.
:param pii_denylist: The denylist to use for scrubbing when pii is not sent, defaults to DEFAULT_PII_DENYLIST.
"""
self.denylist = DEFAULT_DENYLIST.copy() if denylist is None else denylist
if not send_default_pii:
pii_denylist = (
DEFAULT_PII_DENYLIST.copy() if pii_denylist is None else pii_denylist
)
self.denylist += pii_denylist
self.denylist = [x.lower() for x in self.denylist]
self.recursive = recursive
def scrub_list(self, lst):
# type: (object) -> None
"""
If a list is passed to this method, the method recursively searches the list and any
nested lists for any dictionaries. The method calls scrub_dict on all dictionaries
it finds.
If the parameter passed to this method is not a list, the method does nothing.
"""
if not isinstance(lst, list):
return
for v in lst:
self.scrub_dict(v) # no-op unless v is a dict
self.scrub_list(v) # no-op unless v is a list
def scrub_dict(self, d):
# type: (object) -> None
"""
If a dictionary is passed to this method, the method scrubs the dictionary of any
sensitive data. The method calls itself recursively on any nested dictionaries (
including dictionaries nested in lists) if self.recursive is True.
This method does nothing if the parameter passed to it is not a dictionary.
"""
if not isinstance(d, dict):
return
for k, v in d.items():
# The cast is needed because mypy is not smart enough to figure out that k must be a
# string after the isinstance check.
if isinstance(k, str) and k.lower() in self.denylist:
d[k] = AnnotatedValue.substituted_because_contains_sensitive_data()
elif self.recursive:
self.scrub_dict(v) # no-op unless v is a dict
self.scrub_list(v) # no-op unless v is a list
def scrub_request(self, event):
# type: (Event) -> None
with capture_internal_exceptions():
if "request" in event:
if "headers" in event["request"]:
self.scrub_dict(event["request"]["headers"])
if "cookies" in event["request"]:
self.scrub_dict(event["request"]["cookies"])
if "data" in event["request"]:
self.scrub_dict(event["request"]["data"])
def scrub_extra(self, event):
# type: (Event) -> None
with capture_internal_exceptions():
if "extra" in event:
self.scrub_dict(event["extra"])
def scrub_user(self, event):
# type: (Event) -> None
with capture_internal_exceptions():
if "user" in event:
self.scrub_dict(event["user"])
def scrub_breadcrumbs(self, event):
# type: (Event) -> None
with capture_internal_exceptions():
if "breadcrumbs" in event:
if (
not isinstance(event["breadcrumbs"], AnnotatedValue)
and "values" in event["breadcrumbs"]
):
for value in event["breadcrumbs"]["values"]:
if "data" in value:
self.scrub_dict(value["data"])
def scrub_frames(self, event):
# type: (Event) -> None
with capture_internal_exceptions():
for frame in iter_event_frames(event):
if "vars" in frame:
self.scrub_dict(frame["vars"])
def scrub_spans(self, event):
# type: (Event) -> None
with capture_internal_exceptions():
if "spans" in event:
for span in cast(List[Dict[str, object]], event["spans"]):
if "data" in span:
self.scrub_dict(span["data"])
def scrub_event(self, event):
# type: (Event) -> None
self.scrub_request(event)
self.scrub_extra(event)
self.scrub_user(event)
self.scrub_breadcrumbs(event)
self.scrub_frames(event)
self.scrub_spans(event)
| EventScrubber |
python | huggingface__transformers | src/transformers/cache_utils.py | {
"start": 61676,
"end": 62095
} | class ____(StaticCache):
def __init__(self, config: PreTrainedConfig, max_cache_len: int, *args, **kwargs):
logger.warning_once(
"`SlidingWindowCache` is deprecated and will be removed in version v4.59 "
"Use `StaticCache(...)` instead which will correctly infer the type of each layer."
)
super().__init__(config=config, max_cache_len=max_cache_len)
| SlidingWindowCache |
python | pypa__pip | src/pip/_vendor/pkg_resources/__init__.py | {
"start": 120168,
"end": 124449
} | class ____(Warning):
"""
Base class for warning about deprecations in ``pkg_resources``
This class is not derived from ``DeprecationWarning``, and as such is
visible by default.
"""
# Ported from ``setuptools`` to avoid introducing an import inter-dependency:
_LOCALE_ENCODING = "locale" if sys.version_info >= (3, 10) else None
def _read_utf8_with_fallback(file: str, fallback_encoding=_LOCALE_ENCODING) -> str:
"""See setuptools.unicode_utils._read_utf8_with_fallback"""
try:
with open(file, "r", encoding="utf-8") as f:
return f.read()
except UnicodeDecodeError: # pragma: no cover
msg = f"""\
********************************************************************************
`encoding="utf-8"` fails with {file!r}, trying `encoding={fallback_encoding!r}`.
This fallback behaviour is considered **deprecated** and future versions of
`setuptools/pkg_resources` may not implement it.
Please encode {file!r} with "utf-8" to ensure future builds will succeed.
If this file was produced by `setuptools` itself, cleaning up the cached files
and re-building/re-installing the package with a newer version of `setuptools`
(e.g. by updating `build-system.requires` in its `pyproject.toml`)
might solve the problem.
********************************************************************************
"""
# TODO: Add a deadline?
# See comment in setuptools.unicode_utils._Utf8EncodingNeeded
warnings.warn(msg, PkgResourcesDeprecationWarning, stacklevel=2)
with open(file, "r", encoding=fallback_encoding) as f:
return f.read()
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
g.update(
(name, getattr(manager, name))
for name in dir(manager)
if not name.startswith('_')
)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = _declare_state('object', 'working_set', WorkingSet._build_master())
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
tuple(dist.activate(replace=False) for dist in working_set)
add_activation_listener(
lambda dist: dist.activate(replace=True),
existing=False,
)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
if TYPE_CHECKING:
# All of these are set by the @_call_aside methods above
__resource_manager = ResourceManager() # Won't exist at runtime
resource_exists = __resource_manager.resource_exists
resource_isdir = __resource_manager.resource_isdir
resource_filename = __resource_manager.resource_filename
resource_stream = __resource_manager.resource_stream
resource_string = __resource_manager.resource_string
resource_listdir = __resource_manager.resource_listdir
set_extraction_path = __resource_manager.set_extraction_path
cleanup_resources = __resource_manager.cleanup_resources
working_set = WorkingSet()
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script
| PkgResourcesDeprecationWarning |
python | pypa__pip | src/pip/_vendor/cachecontrol/heuristics.py | {
"start": 3036,
"end": 4881
} | class ____(BaseHeuristic):
"""
If there is no Expires header already, fall back on Last-Modified
using the heuristic from
http://tools.ietf.org/html/rfc7234#section-4.2.2
to calculate a reasonable value.
Firefox also does something like this per
https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ
http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397
Unlike mozilla we limit this to 24-hr.
"""
cacheable_by_default_statuses = {
200,
203,
204,
206,
300,
301,
404,
405,
410,
414,
501,
}
def update_headers(self, resp: HTTPResponse) -> dict[str, str]:
headers: Mapping[str, str] = resp.headers
if "expires" in headers:
return {}
if "cache-control" in headers and headers["cache-control"] != "public":
return {}
if resp.status not in self.cacheable_by_default_statuses:
return {}
if "date" not in headers or "last-modified" not in headers:
return {}
time_tuple = parsedate_tz(headers["date"])
assert time_tuple is not None
date = calendar.timegm(time_tuple[:6])
last_modified = parsedate(headers["last-modified"])
if last_modified is None:
return {}
now = time.time()
current_age = max(0, now - date)
delta = date - calendar.timegm(last_modified)
freshness_lifetime = max(0, min(delta / 10, 24 * 3600))
if freshness_lifetime <= current_age:
return {}
expires = date + freshness_lifetime
return {"expires": time.strftime(TIME_FMT, time.gmtime(expires))}
def warning(self, resp: HTTPResponse) -> str | None:
return None
| LastModified |
python | doocs__leetcode | lcci/17.08.Circus Tower/Solution.py | {
"start": 365,
"end": 855
} | class ____:
def bestSeqAtIndex(self, height: List[int], weight: List[int]) -> int:
arr = list(zip(height, weight))
arr.sort(key=lambda x: (x[0], -x[1]))
alls = sorted({w for _, w in arr})
m = {w: i for i, w in enumerate(alls, 1)}
tree = BinaryIndexedTree(len(m))
ans = 1
for _, w in arr:
x = m[w]
t = tree.query(x - 1) + 1
ans = max(ans, t)
tree.update(x, t)
return ans
| Solution |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-nvidia-tensorrt/llama_index/llms/nvidia_tensorrt/base.py | {
"start": 2455,
"end": 15013
} | class ____(CustomLLM):
r"""
Local TensorRT LLM.
[TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) provides users with an easy-to-use Python API to define Large Language Models (LLMs) and build TensorRT engines that contain state-of-the-art optimizations to perform inference
efficiently on NVIDIA GPUs.
Since TensorRT-LLM is a SDK for interacting with local models in process there are a few environment steps that must be followed to ensure that the TensorRT-LLM setup can be used.
1. Nvidia Cuda 12.2 or higher is currently required to run TensorRT-LLM
2. Install `tensorrt_llm` via pip with `pip3 install tensorrt_llm -U --extra-index-url https://pypi.nvidia.com`
3. For this example we will use Llama2. The Llama2 model files need to be created via scripts following the instructions
(https://github.com/NVIDIA/trt-llm-rag-windows/blob/release/1.0/README.md#building-trt-engine)
* The following files will be created from following the stop above
* `Llama_float16_tp1_rank0.engine`: The main output of the build script, containing the executable graph of operations with the model weights embedded.
* `config.json`: Includes detailed information about the model, like its general structure and precision, as well as information about which plug-ins were incorporated into the engine.
* `model.cache`: Caches some of the timing and optimization information from model compilation, making successive builds quicker.
4. `mkdir model`
5. Move all of the files mentioned above to the model directory.
Examples:
`pip install llama-index-llms-nvidia-tensorrt`
```python
from llama_index.llms.nvidia_tensorrt import LocalTensorRTLLM
def completion_to_prompt(completion):
return f"<s> [INST] {completion} [/INST] "
def messages_to_prompt(messages):
content = ""
for message in messages:
content += str(message) + "\n"
return f"<s> [INST] {content} [/INST] "
llm = LocalTensorRTLLM(
model_path="./model",
engine_name="llama_float16_tp1_rank0.engine",
tokenizer_dir="meta-llama/Llama-2-13b-chat",
completion_to_prompt=completion_to_prompt,
messages_to_prompt=messages_to_prompt,
)
resp = llm.complete("Who is Paul Graham?")
print(str(resp))
```
"""
model_path: Optional[str] = Field(description="The path to the trt engine.")
temperature: float = Field(description="The temperature to use for sampling.")
max_new_tokens: int = Field(description="The maximum number of tokens to generate.")
context_window: int = Field(
description="The maximum number of context tokens for the model."
)
messages_to_prompt: Callable = Field(
description="The function to convert messages to a prompt.", exclude=True
)
completion_to_prompt: Callable = Field(
description="The function to convert a completion to a prompt.", exclude=True
)
generate_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Kwargs used for generation."
)
model_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Kwargs used for model initialization."
)
verbose: bool = Field(description="Whether to print verbose output.")
_model: Any = PrivateAttr()
_model_config: Any = PrivateAttr()
_tokenizer: Any = PrivateAttr()
_max_new_tokens = PrivateAttr()
_sampling_config = PrivateAttr()
_verbose = PrivateAttr()
def __init__(
self,
model_path: Optional[str] = None,
engine_name: Optional[str] = None,
tokenizer_dir: Optional[str] = None,
temperature: float = 0.1,
max_new_tokens: int = DEFAULT_NUM_OUTPUTS,
context_window: int = DEFAULT_CONTEXT_WINDOW,
messages_to_prompt: Optional[Callable] = None,
completion_to_prompt: Optional[Callable] = None,
callback_manager: Optional[CallbackManager] = None,
generate_kwargs: Optional[Dict[str, Any]] = None,
model_kwargs: Optional[Dict[str, Any]] = None,
verbose: bool = False,
) -> None:
try:
import tensorrt_llm
from tensorrt_llm.runtime import ModelConfig, SamplingConfig
except ImportError:
print(
"Unable to import `tensorrt_llm` module. Please ensure you have\
`tensorrt_llm` installed in your environment. You can run\
`pip3 install tensorrt_llm -U --extra-index-url https://pypi.nvidia.com` to install."
)
model_kwargs = model_kwargs or {}
model_kwargs.update({"n_ctx": context_window, "verbose": verbose})
max_new_tokens = max_new_tokens
verbose = verbose
# check if model is cached
if model_path is not None:
if not os.path.exists(model_path):
raise ValueError(
"Provided model path does not exist. "
"Please check the path or provide a model_url to download."
)
else:
engine_dir = model_path
engine_dir_path = Path(engine_dir)
config_path = engine_dir_path / "config.json"
# config function
with open(config_path) as f:
config = json.load(f)
use_gpt_attention_plugin = config["plugin_config"][
"gpt_attention_plugin"
]
remove_input_padding = config["plugin_config"]["remove_input_padding"]
tp_size = config["builder_config"]["tensor_parallel"]
pp_size = 1
if "pipeline_parallel" in config["builder_config"]:
pp_size = config["builder_config"]["pipeline_parallel"]
world_size = tp_size * pp_size
assert world_size == tensorrt_llm.mpi_world_size(), (
f"Engine world size ({world_size}) != Runtime world size ({tensorrt_llm.mpi_world_size()})"
)
num_heads = config["builder_config"]["num_heads"] // tp_size
hidden_size = config["builder_config"]["hidden_size"] // tp_size
vocab_size = config["builder_config"]["vocab_size"]
num_layers = config["builder_config"]["num_layers"]
num_kv_heads = config["builder_config"].get("num_kv_heads", num_heads)
paged_kv_cache = config["plugin_config"]["paged_kv_cache"]
if config["builder_config"].get("multi_query_mode", False):
tensorrt_llm.logger.warning(
"`multi_query_mode` config is deprecated. Please rebuild the engine."
)
num_kv_heads = 1
num_kv_heads = (num_kv_heads + tp_size - 1) // tp_size
model_config = ModelConfig(
num_heads=num_heads,
num_kv_heads=num_kv_heads,
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
gpt_attention_plugin=use_gpt_attention_plugin,
paged_kv_cache=paged_kv_cache,
remove_input_padding=remove_input_padding,
max_batch_size=config["builder_config"]["max_batch_size"],
)
assert pp_size == 1, (
"Python runtime does not support pipeline parallelism"
)
world_size = tp_size * pp_size
runtime_rank = tensorrt_llm.mpi_rank()
runtime_mapping = tensorrt_llm.Mapping(
world_size, runtime_rank, tp_size=tp_size, pp_size=pp_size
)
# TensorRT-LLM must run on a GPU.
assert torch.cuda.is_available(), (
"LocalTensorRTLLM requires a Nvidia CUDA enabled GPU to operate"
)
torch.cuda.set_device(runtime_rank % runtime_mapping.gpus_per_node)
tokenizer = AutoTokenizer.from_pretrained(tokenizer_dir, legacy=False)
sampling_config = SamplingConfig(
end_id=EOS_TOKEN,
pad_id=PAD_TOKEN,
num_beams=1,
temperature=temperature,
)
serialize_path = engine_dir_path / (engine_name if engine_name else "")
with open(serialize_path, "rb") as f:
engine_buffer = f.read()
decoder = tensorrt_llm.runtime.GenerationSession(
model_config, engine_buffer, runtime_mapping, debug_mode=False
)
model = decoder
generate_kwargs = generate_kwargs or {}
generate_kwargs.update(
{"temperature": temperature, "max_tokens": max_new_tokens}
)
super().__init__(
model_path=model_path,
temperature=temperature,
context_window=context_window,
max_new_tokens=max_new_tokens,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
callback_manager=callback_manager,
generate_kwargs=generate_kwargs,
model_kwargs=model_kwargs,
verbose=verbose,
)
self._model = model
self._model_config = model_config
self._tokenizer = tokenizer
self._sampling_config = sampling_config
self._max_new_tokens = max_new_tokens
self._verbose = verbose
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "LocalTensorRTLLM"
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return LLMMetadata(
context_window=self.context_window,
num_output=self.max_new_tokens,
model_name=self.model_path,
)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
prompt = self.messages_to_prompt(messages)
completion_response = self.complete(prompt, formatted=True, **kwargs)
return completion_response_to_chat_response(completion_response)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
try:
import torch
except ImportError:
raise ImportError("nvidia_tensorrt requires `pip install torch`.")
self.generate_kwargs.update({"stream": False})
if not formatted:
prompt = self.completion_to_prompt(prompt)
input_text = prompt
input_ids, input_lengths = parse_input(
input_text, self._tokenizer, EOS_TOKEN, self._model_config
)
max_input_length = torch.max(input_lengths).item()
self._model.setup(
input_lengths.size(0), max_input_length, self._max_new_tokens, 1
) # beam size is set to 1
if self._verbose:
start_time = time.time()
output_ids = self._model.decode(input_ids, input_lengths, self._sampling_config)
torch.cuda.synchronize()
elapsed_time = -1.0
if self._verbose:
end_time = time.time()
elapsed_time = end_time - start_time
output_txt, output_token_ids = get_output(
output_ids, input_lengths, self._max_new_tokens, self._tokenizer
)
if self._verbose:
print(f"Input context length : {input_ids.shape[1]}")
print(f"Inference time : {elapsed_time:.2f} seconds")
print(f"Output context length : {len(output_token_ids)} ")
print(
f"Inference token/sec : {(len(output_token_ids) / elapsed_time):2f}"
)
# call garbage collected after inference
torch.cuda.empty_cache()
gc.collect()
return CompletionResponse(
text=output_txt,
raw=generate_completion_dict(output_txt, self._model, self.model_path),
)
@llm_completion_callback()
def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
raise NotImplementedError(
"Nvidia TensorRT-LLM does not currently support streaming completion."
)
| LocalTensorRTLLM |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/execution_api/versions/head/test_asset_events.py | {
"start": 16144,
"end": 18603
} | class ____:
@pytest.mark.usefixtures("test_asset_alias")
def test_get_by_asset(self, client):
response = client.get(
"/execution/asset-events/by-asset-alias",
params={"name": "test_alias"},
)
assert response.status_code == 200
assert response.json() == {
"asset_events": [
{
"id": 1,
"extra": {"foo": "bar"},
"source_task_id": "bar",
"source_dag_id": "foo",
"source_run_id": "custom",
"source_map_index": -1,
"asset": {
"extra": {"foo": "bar"},
"group": "asset",
"name": "test_get_asset_by_name",
"uri": "s3://bucket/key",
},
"created_dagruns": [],
"timestamp": "2021-01-01T00:00:00Z",
"partition_key": None,
},
{
"id": 2,
"extra": {"foo": "bar"},
"source_task_id": "bar",
"source_dag_id": "foo",
"source_run_id": "custom",
"source_map_index": -1,
"asset": {
"extra": {"foo": "bar"},
"group": "asset",
"name": "test_get_asset_by_name",
"uri": "s3://bucket/key",
},
"created_dagruns": [],
"timestamp": "2021-01-02T00:00:00Z",
"partition_key": None,
},
{
"id": 3,
"extra": {"foo": "bar"},
"source_task_id": "bar",
"source_dag_id": "foo",
"source_run_id": "custom",
"source_map_index": -1,
"asset": {
"extra": {"foo": "bar"},
"group": "asset",
"name": "test_get_asset_by_name",
"uri": "s3://bucket/key",
},
"created_dagruns": [],
"timestamp": "2021-01-03T00:00:00Z",
"partition_key": None,
},
]
}
| TestGetAssetEventByAssetAlias |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.