language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | google__jax | tests/lru_cache_test.py | {
"start": 1463,
"end": 4900
} | class ____(LRUCacheTestCase):
def test_get_nonexistent_key(self):
cache = LRUCache(self.name, max_size=-1)
self.assertIsNone(cache.get("a"))
def test_put_and_get_key(self):
cache = LRUCache(self.name, max_size=-1)
cache.put("a", b"a")
self.assertEqual(cache.get("a"), b"a")
self.assertCacheKeys(("a",))
cache.put("b", b"b")
self.assertEqual(cache.get("a"), b"a")
self.assertEqual(cache.get("b"), b"b")
self.assertCacheKeys(("a", "b"))
def test_put_empty_value(self):
cache = LRUCache(self.name, max_size=-1)
cache.put("a", b"")
self.assertEqual(cache.get("a"), b"")
def test_put_empty_key(self):
cache = LRUCache(self.name, max_size=-1)
with self.assertRaisesRegex(ValueError, r"key cannot be empty"):
cache.put("", b"a")
def test_eviction(self):
cache = LRUCache(self.name, max_size=2)
cache.put("a", b"a")
cache.put("b", b"b")
# `sleep()` is necessary to guarantee that `b`'s timestamp is strictly greater than `a`'s
time.sleep(1)
cache.get("b")
# write `c`. `a` should be evicted
cache.put("c", b"c")
self.assertCacheKeys(("b", "c"))
# calling `get()` on `b` makes `c` least recently used
time.sleep(1)
cache.get("b")
# write `d`. `c` should be evicted
cache.put("d", b"d")
self.assertCacheKeys(("b", "d"))
def test_eviction_with_empty_value(self):
cache = LRUCache(self.name, max_size=1)
cache.put("a", b"a")
# write `b` with length 0
# eviction should not happen even though the cache is full
cache.put("b", b"")
self.assertCacheKeys(("a", "b"))
# calling `get()` on `a` makes `b` least recently used
time.sleep(1)
cache.get("a")
# writing `c` should result in evicting the
# least recent used file (`b`) first,
# but this is not sufficient to make room for `c`,
# so `a` should be evicted as well
cache.put("c", b"c")
self.assertCacheKeys(("c",))
def test_existing_cache_dir(self):
cache = LRUCache(self.name, max_size=2)
cache.put("a", b"a")
# simulates reinitializing the cache in another process
del cache
cache = LRUCache(self.name, max_size=2)
self.assertEqual(cache.get("a"), b"a")
# ensure that the LRU policy survives cache reinitialization
cache.put("b", b"b")
# calling `get()` on `a` makes `b` least recently used
time.sleep(1)
cache.get("a")
# write `c`. `b` should be evicted
cache.put("c", b"c")
self.assertCacheKeys(("a", "c"))
def test_max_size(self):
cache = LRUCache(self.name, max_size=1)
msg = (r"Cache value for key .+? of size \d+ bytes exceeds the maximum "
r"cache size of \d+ bytes")
with self.assertWarnsRegex(UserWarning, msg):
cache.put("a", b"aaaa")
self.assertIsNone(cache.get("a"))
self.assertEqual(set(self.path.glob(f"*{_CACHE_SUFFIX}")), set())
# Check that we don't write access time file when the eviction policy is
# disabled. Writing this file can be extremely unperformant and cause
# problems on large-scale network storage.
def test_no_atime_file(self):
cache = LRUCache(self.name, max_size=-1)
cache.put("a", b"a")
self.assertEmpty(list(self.path.glob(f"*{_ATIME_SUFFIX}")))
cache.get("a")
self.assertEmpty(list(self.path.glob(f"*{_ATIME_SUFFIX}")))
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| LRUCacheTest |
python | graphql-python__graphene | graphene/types/inputobjecttype.py | {
"start": 2330,
"end": 4715
} | class ____(UnmountedType, BaseType):
"""
Input Object Type Definition
An input object defines a structured collection of fields which may be
supplied to a field argument.
Using ``graphene.NonNull`` will ensure that a input value must be provided by the query.
All class attributes of ``graphene.InputObjectType`` are implicitly mounted as InputField
using the below Meta class options.
.. code:: python
from graphene import InputObjectType, String, InputField
class Person(InputObjectType):
# implicitly mounted as Input Field
first_name = String(required=True)
# explicitly mounted as Input Field
last_name = InputField(String, description="Surname")
The fields on an input object type can themselves refer to input object types, but you can't
mix input and output types in your schema.
Meta class options (optional):
name (str): the name of the GraphQL type (must be unique in schema). Defaults to class
name.
description (str): the description of the GraphQL type in the schema. Defaults to class
docstring.
container (class): A class reference for a value object that allows for
attribute initialization and access. Default InputObjectTypeContainer.
fields (Dict[str, graphene.InputField]): Dictionary of field name to InputField. Not
recommended to use (prefer class attributes).
"""
@classmethod
def __init_subclass_with_meta__(cls, container=None, _meta=None, **options):
if not _meta:
_meta = InputObjectTypeOptions(cls)
fields = {}
for base in reversed(cls.__mro__):
fields.update(yank_fields_from_attrs(base.__dict__, _as=InputField))
if _meta.fields:
_meta.fields.update(fields)
else:
_meta.fields = fields
if container is None:
container = type(cls.__name__, (InputObjectTypeContainer, cls), {})
_meta.container = container
super(InputObjectType, cls).__init_subclass_with_meta__(_meta=_meta, **options)
@classmethod
def get_type(cls):
"""
This function is called when the unmounted type (InputObjectType instance)
is mounted (as a Field, InputField or Argument)
"""
return cls
| InputObjectType |
python | pytorch__pytorch | torch/distributed/debug/_frontend.py | {
"start": 6694,
"end": 7250
} | class ____(BaseHTTPRequestHandler):
frontend: "FrontendServer"
def do_GET(self):
self.frontend._handle_request(self)
def get_path(self) -> str:
return urlparse(self.path).path
def get_query(self) -> dict[str, list[str]]:
return parse_qs(urlparse(self.path).query)
def get_query_arg(
self, name: str, default: object = None, type: type = str
) -> object:
query = self.get_query()
if name not in query:
return default
return type(query[name][0])
| HTTPRequestHandler |
python | jina-ai__jina | jina/excepts.py | {
"start": 622,
"end": 761
} | class ____(Exception, BaseJinaException):
"""Flow exception when a deployment can not be found in the flow."""
| FlowMissingDeploymentError |
python | Netflix__metaflow | metaflow/plugins/kubernetes/kubernetes_decorator.py | {
"start": 1546,
"end": 32438
} | class ____(StepDecorator):
"""
Specifies that this step should execute on Kubernetes.
Parameters
----------
cpu : int, default 1
Number of CPUs required for this step. If `@resources` is
also present, the maximum value from all decorators is used.
memory : int, default 4096
Memory size (in MB) required for this step. If
`@resources` is also present, the maximum value from all decorators is
used.
disk : int, default 10240
Disk size (in MB) required for this step. If
`@resources` is also present, the maximum value from all decorators is
used.
image : str, optional, default None
Docker image to use when launching on Kubernetes. If not specified, and
METAFLOW_KUBERNETES_CONTAINER_IMAGE is specified, that image is used. If
not, a default Docker image mapping to the current version of Python is used.
image_pull_policy: str, default KUBERNETES_IMAGE_PULL_POLICY
If given, the imagePullPolicy to be applied to the Docker image of the step.
image_pull_secrets: List[str], default []
The default is extracted from METAFLOW_KUBERNETES_IMAGE_PULL_SECRETS.
Kubernetes image pull secrets to use when pulling container images
in Kubernetes.
service_account : str, default METAFLOW_KUBERNETES_SERVICE_ACCOUNT
Kubernetes service account to use when launching pod in Kubernetes.
secrets : List[str], optional, default None
Kubernetes secrets to use when launching pod in Kubernetes. These
secrets are in addition to the ones defined in `METAFLOW_KUBERNETES_SECRETS`
in Metaflow configuration.
node_selector: Union[Dict[str,str], str], optional, default None
Kubernetes node selector(s) to apply to the pod running the task.
Can be passed in as a comma separated string of values e.g.
'kubernetes.io/os=linux,kubernetes.io/arch=amd64' or as a dictionary
{'kubernetes.io/os': 'linux', 'kubernetes.io/arch': 'amd64'}
namespace : str, default METAFLOW_KUBERNETES_NAMESPACE
Kubernetes namespace to use when launching pod in Kubernetes.
gpu : int, optional, default None
Number of GPUs required for this step. A value of zero implies that
the scheduled node should not have GPUs.
gpu_vendor : str, default KUBERNETES_GPU_VENDOR
The vendor of the GPUs to be used for this step.
tolerations : List[Dict[str,str]], default []
The default is extracted from METAFLOW_KUBERNETES_TOLERATIONS.
Kubernetes tolerations to use when launching pod in Kubernetes.
labels: Dict[str, str], default: METAFLOW_KUBERNETES_LABELS
Kubernetes labels to use when launching pod in Kubernetes.
annotations: Dict[str, str], default: METAFLOW_KUBERNETES_ANNOTATIONS
Kubernetes annotations to use when launching pod in Kubernetes.
use_tmpfs : bool, default False
This enables an explicit tmpfs mount for this step.
tmpfs_tempdir : bool, default True
sets METAFLOW_TEMPDIR to tmpfs_path if set for this step.
tmpfs_size : int, optional, default: None
The value for the size (in MiB) of the tmpfs mount for this step.
This parameter maps to the `--tmpfs` option in Docker. Defaults to 50% of the
memory allocated for this step.
tmpfs_path : str, optional, default /metaflow_temp
Path to tmpfs mount for this step.
persistent_volume_claims : Dict[str, str], optional, default None
A map (dictionary) of persistent volumes to be mounted to the pod for this step. The map is from persistent
volumes to the path to which the volume is to be mounted, e.g., `{'pvc-name': '/path/to/mount/on'}`.
shared_memory: int, optional
Shared memory size (in MiB) required for this step
port: int, optional
Port number to specify in the Kubernetes job object
compute_pool : str, optional, default None
Compute pool to be used for for this step.
If not specified, any accessible compute pool within the perimeter is used.
hostname_resolution_timeout: int, default 10 * 60
Timeout in seconds for the workers tasks in the gang scheduled cluster to resolve the hostname of control task.
Only applicable when @parallel is used.
qos: str, default: Burstable
Quality of Service class to assign to the pod. Supported values are: Guaranteed, Burstable, BestEffort
security_context: Dict[str, Any], optional, default None
Container security context. Applies to the task container. Allows the following keys:
- privileged: bool, optional, default None
- allow_privilege_escalation: bool, optional, default None
- run_as_user: int, optional, default None
- run_as_group: int, optional, default None
- run_as_non_root: bool, optional, default None
"""
name = "kubernetes"
defaults = {
"cpu": "1",
"memory": "4096",
"disk": "10240",
"image": None,
"image_pull_policy": None,
"image_pull_secrets": None, # e.g., ["regcred"]
"service_account": None,
"secrets": None, # e.g., mysecret
"node_selector": None, # e.g., kubernetes.io/os=linux
"namespace": None,
"gpu": None, # value of 0 implies that the scheduled node should not have GPUs
"gpu_vendor": None,
"tolerations": None, # e.g., [{"key": "arch", "operator": "Equal", "value": "amd"},
# {"key": "foo", "operator": "Equal", "value": "bar"}]
"labels": None, # e.g. {"test-label": "value", "another-label":"value2"}
"annotations": None, # e.g. {"note": "value", "another-note": "value2"}
"use_tmpfs": None,
"tmpfs_tempdir": True,
"tmpfs_size": None,
"tmpfs_path": "/metaflow_temp",
"persistent_volume_claims": None, # e.g., {"pvc-name": "/mnt/vol", "another-pvc": "/mnt/vol2"}
"shared_memory": None,
"port": None,
"compute_pool": None,
"executable": None,
"hostname_resolution_timeout": 10 * 60,
"qos": KUBERNETES_QOS,
"security_context": None,
}
package_metadata = None
package_url = None
package_sha = None
run_time_limit = None
# Conda environment support
supports_conda_environment = True
target_platform = KUBERNETES_CONDA_ARCH or "linux-64"
def init(self):
if not self.attributes["namespace"]:
self.attributes["namespace"] = KUBERNETES_NAMESPACE
if not self.attributes["service_account"]:
self.attributes["service_account"] = KUBERNETES_SERVICE_ACCOUNT
if not self.attributes["gpu_vendor"]:
self.attributes["gpu_vendor"] = KUBERNETES_GPU_VENDOR
if not self.attributes["node_selector"] and KUBERNETES_NODE_SELECTOR:
self.attributes["node_selector"] = KUBERNETES_NODE_SELECTOR
if not self.attributes["tolerations"] and KUBERNETES_TOLERATIONS:
self.attributes["tolerations"] = json.loads(KUBERNETES_TOLERATIONS)
if (
not self.attributes["persistent_volume_claims"]
and KUBERNETES_PERSISTENT_VOLUME_CLAIMS
):
self.attributes["persistent_volume_claims"] = json.loads(
KUBERNETES_PERSISTENT_VOLUME_CLAIMS
)
if not self.attributes["image_pull_policy"] and KUBERNETES_IMAGE_PULL_POLICY:
self.attributes["image_pull_policy"] = KUBERNETES_IMAGE_PULL_POLICY
if not self.attributes["image_pull_secrets"] and KUBERNETES_IMAGE_PULL_SECRETS:
self.attributes["image_pull_secrets"] = json.loads(
KUBERNETES_IMAGE_PULL_SECRETS
)
if isinstance(self.attributes["node_selector"], str):
self.attributes["node_selector"] = parse_kube_keyvalue_list(
self.attributes["node_selector"].split(",")
)
if self.attributes["compute_pool"]:
if self.attributes["node_selector"] is None:
self.attributes["node_selector"] = {}
self.attributes["node_selector"].update(
{"outerbounds.co/compute-pool": self.attributes["compute_pool"]}
)
if self.attributes["tolerations"]:
try:
from kubernetes.client import V1Toleration
for toleration in self.attributes["tolerations"]:
try:
invalid_keys = [
k
for k in toleration.keys()
if k not in V1Toleration.attribute_map.keys()
]
if len(invalid_keys) > 0:
raise KubernetesException(
"Tolerations parameter contains invalid keys: %s"
% invalid_keys
)
except AttributeError:
raise KubernetesException(
"Unable to parse tolerations: %s"
% self.attributes["tolerations"]
)
except (NameError, ImportError):
pass
# parse the CPU, memory, disk, values from the KUBERNETES_ environment variable (you would need to export the METAFLOW_KUBERNETES_CPU, METAFLOW_KUBERNETES_MEMORY and/or METAFLOW_KUBERNTES_DISK environment variable with the desired values before running the flow)
# find the values from the environment variables, then validate if the values are still the default ones, if so, then replace them with the values from the environment variables (otherwise, keep the values from the decorator)
if self.attributes["cpu"] == self.defaults["cpu"] and KUBERNETES_CPU:
self.attributes["cpu"] = KUBERNETES_CPU
if self.attributes["memory"] == self.defaults["memory"] and KUBERNETES_MEMORY:
self.attributes["memory"] = KUBERNETES_MEMORY
if self.attributes["disk"] == self.defaults["disk"] and KUBERNETES_DISK:
self.attributes["disk"] = KUBERNETES_DISK
# Label source precedence (decreasing):
# - System labels (set outside of decorator)
# - Decorator labels: @kubernetes(labels={})
# - Environment variable labels: METAFLOW_KUBERNETES_LABELS=
deco_labels = {}
if self.attributes["labels"] is not None:
deco_labels = self.attributes["labels"]
env_labels = {}
if KUBERNETES_LABELS:
env_labels = parse_kube_keyvalue_list(KUBERNETES_LABELS.split(","), False)
self.attributes["labels"] = {**env_labels, **deco_labels}
# Annotations
# annotation precedence (decreasing):
# - System annotations (set outside of decorator)
# - Decorator annotations: @kubernetes(annotations={})
# - Environment annotations: METAFLOW_KUBERNETES_ANNOTATIONS=
deco_annotations = {}
if self.attributes["annotations"] is not None:
deco_annotations = self.attributes["annotations"]
env_annotations = {}
if KUBERNETES_ANNOTATIONS:
env_annotations = parse_kube_keyvalue_list(
KUBERNETES_ANNOTATIONS.split(","), False
)
self.attributes["annotations"] = {**env_annotations, **deco_annotations}
# If no docker image is explicitly specified, impute a default image.
if not self.attributes["image"]:
# If metaflow-config specifies a docker image, just use that.
if KUBERNETES_CONTAINER_IMAGE:
self.attributes["image"] = KUBERNETES_CONTAINER_IMAGE
# If metaflow-config doesn't specify a docker image, assign a
# default docker image.
else:
# Default to vanilla Python image corresponding to major.minor
# version of the Python interpreter launching the flow.
self.attributes["image"] = "python:%s.%s" % (
platform.python_version_tuple()[0],
platform.python_version_tuple()[1],
)
# Assign docker registry URL for the image.
if not get_docker_registry(self.attributes["image"]):
if KUBERNETES_CONTAINER_REGISTRY:
self.attributes["image"] = "%s/%s" % (
KUBERNETES_CONTAINER_REGISTRY.rstrip("/"),
self.attributes["image"],
)
# Check if TmpFS is enabled and set default tmpfs_size if missing.
if self.attributes["use_tmpfs"] or (
self.attributes["tmpfs_size"] and not self.attributes["use_tmpfs"]
):
if not self.attributes["tmpfs_size"]:
# default tmpfs behavior - https://man7.org/linux/man-pages/man5/tmpfs.5.html
self.attributes["tmpfs_size"] = int(self.attributes["memory"]) // 2
if not self.attributes["shared_memory"]:
self.attributes["shared_memory"] = KUBERNETES_SHARED_MEMORY
if not self.attributes["port"]:
self.attributes["port"] = KUBERNETES_PORT
# Refer https://github.com/Netflix/metaflow/blob/master/docs/lifecycle.png
def step_init(self, flow, graph, step, decos, environment, flow_datastore, logger):
# Executing Kubernetes jobs requires a non-local datastore.
if flow_datastore.TYPE not in ("s3", "azure", "gs"):
raise KubernetesException(
"The *@kubernetes* decorator requires --datastore=s3 or --datastore=azure or --datastore=gs at the moment."
)
# Set internal state.
self.logger = logger
self.environment = environment
self.step = step
self.flow_datastore = flow_datastore
if (
self.attributes["qos"] is not None
# case insensitive matching.
and self.attributes["qos"].lower()
not in [c.lower() for c in SUPPORTED_KUBERNETES_QOS_CLASSES]
):
raise MetaflowException(
"*%s* is not a valid Kubernetes QoS class. Choose one of the following: %s"
% (self.attributes["qos"], ", ".join(SUPPORTED_KUBERNETES_QOS_CLASSES))
)
if any([deco.name == "batch" for deco in decos]):
raise MetaflowException(
"Step *{step}* is marked for execution both on AWS Batch and "
"Kubernetes. Please use one or the other.".format(step=step)
)
if any([deco.name == "parallel" for deco in decos]) and any(
[deco.name == "catch" for deco in decos]
):
raise MetaflowException(
"Step *{step}* contains a @parallel decorator "
"with the @catch decorator. @catch is not supported with @parallel on Kubernetes.".format(
step=step
)
)
# Set run time limit for the Kubernetes job.
self.run_time_limit = get_run_time_limit_for_task(decos)
if self.run_time_limit < 60:
raise KubernetesException(
"The timeout for step *{step}* should be at least 60 seconds for "
"execution on Kubernetes.".format(step=step)
)
for deco in decos:
if isinstance(deco, ResourcesDecorator):
for k, v in deco.attributes.items():
# If GPU count is specified, explicitly set it in self.attributes.
if k == "gpu" and v != None:
self.attributes["gpu"] = v
if k in self.attributes:
if self.defaults[k] is None:
# skip if expected value isn't an int/float
continue
# We use the larger of @resources and @batch attributes
# TODO: Fix https://github.com/Netflix/metaflow/issues/467
my_val = self.attributes.get(k)
if not (my_val is None and v is None):
self.attributes[k] = str(
max(float(my_val or 0), float(v or 0))
)
# Check GPU vendor.
if self.attributes["gpu_vendor"].lower() not in ("amd", "nvidia"):
raise KubernetesException(
"GPU vendor *{}* for step *{step}* is not currently supported.".format(
self.attributes["gpu_vendor"], step=step
)
)
# CPU, Disk, and Memory values should be greater than 0.
for attr in ["cpu", "disk", "memory"]:
if not (
isinstance(self.attributes[attr], (int, unicode, basestring, float))
and float(self.attributes[attr]) > 0
):
raise KubernetesException(
"Invalid {} value *{}* for step *{step}*; it should be greater than 0".format(
attr, self.attributes[attr], step=step
)
)
if self.attributes["gpu"] is not None and not (
isinstance(self.attributes["gpu"], (int, unicode, basestring))
and float(self.attributes["gpu"]).is_integer()
):
raise KubernetesException(
"Invalid GPU value *{}* for step *{step}*; it should be an integer".format(
self.attributes["gpu"], step=step
)
)
if self.attributes["tmpfs_size"]:
if not (
isinstance(self.attributes["tmpfs_size"], (int, unicode, basestring))
and int(self.attributes["tmpfs_size"]) > 0
):
raise KubernetesException(
"Invalid tmpfs_size value: *{size}* for step *{step}* (should be an integer greater than 0)".format(
size=self.attributes["tmpfs_size"], step=step
)
)
if self.attributes["shared_memory"]:
if not (
isinstance(self.attributes["shared_memory"], int)
and int(self.attributes["shared_memory"]) > 0
):
raise KubernetesException(
"Invalid shared_memory value: *{size}* for step *{step}* (should be an integer greater than 0)".format(
size=self.attributes["shared_memory"], step=step
)
)
validate_kube_labels(self.attributes["labels"])
# TODO: add validation to annotations as well?
def package_init(self, flow, step_name, environment):
try:
# Kubernetes is a soft dependency.
from kubernetes import client, config
except (NameError, ImportError):
raise KubernetesException(
"Could not import module 'kubernetes'.\n\nInstall Kubernetes "
"Python package (https://pypi.org/project/kubernetes/) first.\n"
"You can install the module by executing - "
"%s -m pip install kubernetes\n"
"or equivalent through your favorite Python package manager."
% sys.executable
)
def runtime_init(self, flow, graph, package, run_id):
# Set some more internal state.
self.flow = flow
self.graph = graph
self.package = package
self.run_id = run_id
def runtime_task_created(
self, task_datastore, task_id, split_index, input_paths, is_cloned, ubf_context
):
# To execute the Kubernetes job, the job container needs to have
# access to the code package. We store the package in the datastore
# which the pod is able to download as part of it's entrypoint.
if not is_cloned:
self._save_package_once(self.flow_datastore, self.package)
def runtime_step_cli(
self, cli_args, retry_count, max_user_code_retries, ubf_context
):
if retry_count <= max_user_code_retries:
# After all attempts to run the user code have failed, we don't need
# to execute on Kubernetes anymore. We can execute possible fallback
# code locally.
cli_args.commands = ["kubernetes", "step"]
cli_args.command_args.append(self.package_metadata)
cli_args.command_args.append(self.package_sha)
cli_args.command_args.append(self.package_url)
# skip certain keys as CLI arguments
_skip_keys = ["compute_pool", "hostname_resolution_timeout"]
# --namespace is used to specify Metaflow namespace (a different
# concept from k8s namespace).
for k, v in self.attributes.items():
if k in _skip_keys:
continue
if k == "namespace":
cli_args.command_options["k8s_namespace"] = v
elif k in {"node_selector"} and v:
cli_args.command_options[k] = [
"=".join([key, str(val)]) if val else key
for key, val in v.items()
]
elif k in [
"image_pull_secrets",
"tolerations",
"persistent_volume_claims",
"labels",
"annotations",
"security_context",
]:
cli_args.command_options[k] = json.dumps(v)
else:
cli_args.command_options[k] = v
cli_args.command_options["run-time-limit"] = self.run_time_limit
cli_args.entrypoint[0] = sys.executable
def task_pre_step(
self,
step_name,
task_datastore,
metadata,
run_id,
task_id,
flow,
graph,
retry_count,
max_retries,
ubf_context,
inputs,
):
self.metadata = metadata
self.task_datastore = task_datastore
# current.tempdir reflects the value of METAFLOW_TEMPDIR (the current working
# directory by default), or the value of tmpfs_path if tmpfs_tempdir=False.
if not self.attributes["tmpfs_tempdir"]:
current._update_env({"tempdir": self.attributes["tmpfs_path"]})
# task_pre_step may run locally if fallback is activated for @catch
# decorator. In that scenario, we skip collecting Kubernetes execution
# metadata. A rudimentary way to detect non-local execution is to
# check for the existence of METAFLOW_KUBERNETES_WORKLOAD environment
# variable.
meta = {}
if "METAFLOW_KUBERNETES_WORKLOAD" in os.environ:
meta["kubernetes-pod-name"] = os.environ["METAFLOW_KUBERNETES_POD_NAME"]
meta["kubernetes-pod-namespace"] = os.environ[
"METAFLOW_KUBERNETES_POD_NAMESPACE"
]
meta["kubernetes-pod-id"] = os.environ["METAFLOW_KUBERNETES_POD_ID"]
meta["kubernetes-pod-service-account-name"] = os.environ[
"METAFLOW_KUBERNETES_SERVICE_ACCOUNT_NAME"
]
meta["kubernetes-node-ip"] = os.environ["METAFLOW_KUBERNETES_NODE_IP"]
meta["kubernetes-jobset-name"] = os.environ.get(
"METAFLOW_KUBERNETES_JOBSET_NAME"
)
# TODO (savin): Introduce equivalent support for Microsoft Azure and
# Google Cloud Platform
# TODO: Introduce a way to detect Cloud Provider, so unnecessary requests
# (and delays) can be avoided by not having to try out all providers.
if KUBERNETES_FETCH_EC2_METADATA:
instance_meta = get_ec2_instance_metadata()
meta.update(instance_meta)
# Unfortunately, there doesn't seem to be any straight forward way right
# now to attach the Batch/v1 name - While we can rely on a hacky approach
# given we know that the pod name is simply a unique suffix with a hyphen
# delimiter to the Batch/v1 name - this approach will fail if the Batch/v1
# name is closer to 63 chars where the pod name will truncate the Batch/v1
# name.
# if "ARGO_WORKFLOW_NAME" not in os.environ:
# meta["kubernetes-job-name"] = os.environ[
# "METAFLOW_KUBERNETES_POD_NAME"
# ].rpartition("-")[0]
# Start MFLog sidecar to collect task logs.
self._save_logs_sidecar = Sidecar("save_logs_periodically")
self._save_logs_sidecar.start()
# Start spot termination monitor sidecar.
current._update_env(
{"spot_termination_notice": "/tmp/spot_termination_notice"}
)
self._spot_monitor_sidecar = Sidecar("spot_termination_monitor")
self._spot_monitor_sidecar.start()
num_parallel = None
if hasattr(flow, "_parallel_ubf_iter"):
num_parallel = flow._parallel_ubf_iter.num_parallel
if num_parallel and num_parallel > 1:
_setup_multinode_environment(
ubf_context, self.attributes["hostname_resolution_timeout"]
)
# current.parallel.node_index will be correctly available over here.
meta.update({"parallel-node-index": current.parallel.node_index})
if ubf_context == UBF_CONTROL:
flow._control_mapper_tasks = [
"{}/{}/{}".format(run_id, step_name, task_id)
for task_id in [task_id]
+ [
"%s-worker-%d" % (task_id, idx)
for idx in range(num_parallel - 1)
]
]
flow._control_task_is_mapper_zero = True
if len(meta) > 0:
entries = [
MetaDatum(
field=k,
value=v,
type=k,
tags=["attempt_id:{0}".format(retry_count)],
)
for k, v in meta.items()
if v is not None
]
# Register book-keeping metadata for debugging.
metadata.register_metadata(run_id, step_name, task_id, entries)
def task_finished(
self, step_name, flow, graph, is_task_ok, retry_count, max_retries
):
# task_finished may run locally if fallback is activated for @catch
# decorator.
if "METAFLOW_KUBERNETES_WORKLOAD" in os.environ:
# If `local` metadata is configured, we would need to copy task
# execution metadata from the AWS Batch container to user's
# local file system after the user code has finished execution.
# This happens via datastore as a communication bridge.
# TODO: There is no guarantee that task_pre_step executes before
# task_finished is invoked.
# For now we guard against the missing metadata object in this case.
if hasattr(self, "metadata") and self.metadata.TYPE == "local":
# Note that the datastore is *always* Amazon S3 (see
# runtime_task_created function).
sync_local_metadata_to_datastore(
DATASTORE_LOCAL_DIR, self.task_datastore
)
try:
self._save_logs_sidecar.terminate()
self._spot_monitor_sidecar.terminate()
except:
# Best effort kill
pass
@classmethod
def _save_package_once(cls, flow_datastore, package):
if cls.package_url is None:
if not FEAT_ALWAYS_UPLOAD_CODE_PACKAGE:
cls.package_url, cls.package_sha = flow_datastore.save_data(
[package.blob], len_hint=1
)[0]
cls.package_metadata = package.package_metadata
else:
# Blocks until the package is uploaded
cls.package_url = package.package_url()
cls.package_sha = package.package_sha()
cls.package_metadata = package.package_metadata
# TODO: Unify this method with the multi-node setup in @batch
def _setup_multinode_environment(ubf_context, hostname_resolution_timeout):
import socket
def _wait_for_hostname_resolution(max_wait_timeout=10 * 60):
"""
keep trying to resolve the hostname of the control task until the hostname is resolved
or the max_wait_timeout is reached. This is a workaround for the issue where the control
task is not scheduled before the worker task and the worker task fails because it cannot
resolve the hostname of the control task.
"""
start_time = time.time()
while True:
try:
return socket.gethostbyname(os.environ["MF_MASTER_ADDR"])
except socket.gaierror:
if time.time() - start_time > max_wait_timeout:
raise MetaflowException(
"Failed to get host by name for MF_MASTER_ADDR after waiting for {} seconds.".format(
max_wait_timeout
)
)
time.sleep(1)
try:
# Even if Kubernetes may deploy control pods before worker pods, there is always a
# possibility that the worker pods may start before the control. In the case that this happens,
# the worker pods will not be able to resolve the control pod's IP address and this will cause
# the worker pods to fail. So if the worker pods are requesting a hostname resolution, we will
# make it wait for the name to be resolved within a reasonable timeout period.
if ubf_context != UBF_CONTROL:
os.environ["MF_PARALLEL_MAIN_IP"] = _wait_for_hostname_resolution(
hostname_resolution_timeout
)
else:
os.environ["MF_PARALLEL_MAIN_IP"] = socket.gethostbyname(
os.environ["MF_MASTER_ADDR"]
)
os.environ["MF_PARALLEL_NUM_NODES"] = os.environ["MF_WORLD_SIZE"]
os.environ["MF_PARALLEL_NODE_INDEX"] = (
str(0)
if "MF_CONTROL_INDEX" in os.environ
else str(int(os.environ["MF_WORKER_REPLICA_INDEX"]) + 1)
)
except KeyError as e:
raise MetaflowException("Environment variable {} is missing.".format(e))
except socket.gaierror:
raise MetaflowException("Failed to get host by name for MF_MASTER_ADDR.")
except ValueError:
raise MetaflowException("Invalid value for MF_WORKER_REPLICA_INDEX.")
| KubernetesDecorator |
python | pandas-dev__pandas | asv_bench/benchmarks/stat_ops.py | {
"start": 2042,
"end": 2320
} | class ____:
params = [ops, ["float", "int"]]
param_names = ["op", "dtype"]
def setup(self, op, dtype):
s = pd.Series(np.random.randn(100000)).astype(dtype)
self.s_func = getattr(s, op)
def time_op(self, op, dtype):
self.s_func()
| SeriesOps |
python | ray-project__ray | python/ray/dashboard/modules/job/common.py | {
"start": 8910,
"end": 17216
} | class ____:
"""
Interface to put and get job data from the Internal KV store.
"""
# Please keep this format in sync with JobDataKey()
# in src/ray/gcs/gcs_server/gcs_job_manager.h.
JOB_DATA_KEY_PREFIX = f"{RAY_INTERNAL_NAMESPACE_PREFIX}job_info_"
JOB_DATA_KEY = f"{JOB_DATA_KEY_PREFIX}{{job_id}}"
def __init__(
self,
gcs_client: GcsClient,
export_event_log_dir_root: Optional[str] = None,
):
"""
Initialize the JobInfoStorageClient which manages data in the internal KV store.
Export Submission Job events are written when the KV store is updated if
the feature flag is on and a export_event_log_dir_root is passed.
export_event_log_dir_root doesn't need to be passed if the caller
is not modifying data in the KV store.
"""
self._gcs_client = gcs_client
self._export_submission_job_event_logger: logging.Logger = None
try:
if (
check_export_api_enabled(ExportEvent.SourceType.EXPORT_SUBMISSION_JOB)
and export_event_log_dir_root is not None
):
self._export_submission_job_event_logger = get_export_event_logger(
EventLogType.SUBMISSION_JOB,
export_event_log_dir_root,
)
except Exception:
logger.exception(
"Unable to initialize export event logger so no export "
"events will be written."
)
async def put_info(
self,
job_id: str,
job_info: JobInfo,
overwrite: bool = True,
timeout: Optional[int] = 30,
) -> bool:
"""Put job info to the internal kv store.
Args:
job_id: The job id.
job_info: The job info.
overwrite: Whether to overwrite the existing job info.
timeout: The timeout in seconds for the GCS operation.
Returns:
True if a new key is added.
"""
added_num = await self._gcs_client.async_internal_kv_put(
self.JOB_DATA_KEY.format(job_id=job_id).encode(),
json.dumps(job_info.to_json()).encode(),
overwrite,
namespace=ray_constants.KV_NAMESPACE_JOB,
timeout=timeout,
)
if added_num == 1 or overwrite:
# Write export event if data was updated in the KV store
try:
self._write_submission_job_export_event(job_id, job_info)
except Exception:
logger.exception("Error while writing job submission export event.")
return added_num == 1
def _write_submission_job_export_event(
self, job_id: str, job_info: JobInfo
) -> None:
"""
Write Submission Job export event if _export_submission_job_event_logger
exists. The logger will exist if the export API feature flag is enabled
and a log directory was passed to JobInfoStorageClient.
"""
if not self._export_submission_job_event_logger:
return
status_value_descriptor = (
ExportSubmissionJobEventData.JobStatus.DESCRIPTOR.values_by_name.get(
job_info.status.name
)
)
if status_value_descriptor is None:
logger.error(
f"{job_info.status.name} is not a valid "
"ExportSubmissionJobEventData.JobStatus enum value. This event "
"will not be written."
)
return
job_status = status_value_descriptor.number
submission_event_data = ExportSubmissionJobEventData(
submission_job_id=job_id,
status=job_status,
entrypoint=job_info.entrypoint,
message=job_info.message,
metadata=job_info.metadata,
error_type=job_info.error_type,
start_time=job_info.start_time,
end_time=job_info.end_time,
runtime_env_json=json.dumps(job_info.runtime_env),
driver_agent_http_address=job_info.driver_agent_http_address,
driver_node_id=job_info.driver_node_id,
driver_exit_code=job_info.driver_exit_code,
)
self._export_submission_job_event_logger.send_event(submission_event_data)
async def get_info(self, job_id: str, timeout: int = 30) -> Optional[JobInfo]:
serialized_info = await self._gcs_client.async_internal_kv_get(
self.JOB_DATA_KEY.format(job_id=job_id).encode(),
namespace=ray_constants.KV_NAMESPACE_JOB,
timeout=timeout,
)
if serialized_info is None:
return None
else:
return JobInfo.from_json(json.loads(serialized_info))
async def delete_info(self, job_id: str, timeout: int = 30):
await self._gcs_client.async_internal_kv_del(
self.JOB_DATA_KEY.format(job_id=job_id).encode(),
False,
namespace=ray_constants.KV_NAMESPACE_JOB,
timeout=timeout,
)
async def put_status(
self,
job_id: str,
status: JobStatus,
message: Optional[str] = None,
driver_exit_code: Optional[int] = None,
error_type: Optional[JobErrorType] = None,
jobinfo_replace_kwargs: Optional[Dict[str, Any]] = None,
timeout: Optional[int] = 30,
):
"""Puts or updates job status. Sets end_time if status is terminal."""
old_info = await self.get_info(job_id, timeout=timeout)
if jobinfo_replace_kwargs is None:
jobinfo_replace_kwargs = dict()
jobinfo_replace_kwargs.update(
status=status,
message=message,
driver_exit_code=driver_exit_code,
error_type=error_type,
)
if old_info is not None:
if status != old_info.status and old_info.status.is_terminal():
assert False, "Attempted to change job status from a terminal state."
new_info = replace(old_info, **jobinfo_replace_kwargs)
else:
new_info = JobInfo(
entrypoint="Entrypoint not found.", **jobinfo_replace_kwargs
)
if status.is_terminal():
new_info.end_time = int(time.time() * 1000)
await self.put_info(job_id, new_info, timeout=timeout)
async def get_status(self, job_id: str, timeout: int = 30) -> Optional[JobStatus]:
job_info = await self.get_info(job_id, timeout)
if job_info is None:
return None
else:
return job_info.status
async def get_all_jobs(self, timeout: int = 30) -> Dict[str, JobInfo]:
raw_job_ids_with_prefixes = await self._gcs_client.async_internal_kv_keys(
self.JOB_DATA_KEY_PREFIX.encode(),
namespace=ray_constants.KV_NAMESPACE_JOB,
timeout=timeout,
)
job_ids_with_prefixes = [
job_id.decode() for job_id in raw_job_ids_with_prefixes
]
job_ids = []
for job_id_with_prefix in job_ids_with_prefixes:
assert job_id_with_prefix.startswith(
self.JOB_DATA_KEY_PREFIX
), "Unexpected format for internal_kv key for Job submission"
job_ids.append(job_id_with_prefix[len(self.JOB_DATA_KEY_PREFIX) :])
async def get_job_info(job_id: str):
job_info = await self.get_info(job_id, timeout)
return job_id, job_info
return dict(await asyncio.gather(*[get_job_info(job_id) for job_id in job_ids]))
def uri_to_http_components(package_uri: str) -> Tuple[str, str]:
suffix = Path(package_uri).suffix
if suffix not in {".zip", ".whl"}:
raise ValueError(f"package_uri ({package_uri}) does not end in .zip or .whl")
# We need to strip the <protocol>:// prefix to make it possible to pass
# the package_uri over HTTP.
protocol, package_name = parse_uri(package_uri)
return protocol.value, package_name
def http_uri_components_to_uri(protocol: str, package_name: str) -> str:
return f"{protocol}://{package_name}"
def validate_request_type(json_data: Dict[str, Any], request_type: dataclass) -> Any:
return request_type(**json_data)
@dataclass
| JobInfoStorageClient |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-scrapegraph/tests/test_tools_scrapegraph.py | {
"start": 14518,
"end": 15542
} | class ____:
"""Test tool integration features."""
def test_spec_functions_list(self):
"""Test that all expected functions are in spec_functions."""
expected_functions = [
"scrapegraph_smartscraper",
"scrapegraph_markdownify",
"scrapegraph_search",
"scrapegraph_scrape",
"scrapegraph_agentic_scraper",
]
assert ScrapegraphToolSpec.spec_functions == expected_functions
def test_to_tool_list(self, tool_spec_with_api_key):
"""Test conversion to LlamaIndex tool list."""
tool_spec, _ = tool_spec_with_api_key
tools = tool_spec.to_tool_list()
# Should create one tool for each spec function
assert len(tools) == len(ScrapegraphToolSpec.spec_functions)
# Check tool names match spec functions
tool_names = [tool.metadata.name for tool in tools]
for func_name in ScrapegraphToolSpec.spec_functions:
assert func_name in tool_names
| TestToolIntegration |
python | pytorch__pytorch | test/test_nn.py | {
"start": 354926,
"end": 362828
} | class ____(TestCase):
def test_add_relu(self):
a = torch.rand((7, 11))
b = torch.rand((7, 11))
a = a.float()
b = b.float()
a = a * -10
a = a + 5
add_res = a + b
relu_res = torch.relu(add_res)
add_relu_res = torch._VF._add_relu(a, b)
self.assertEqual(add_relu_res, relu_res)
def test_add_relu_broadcasting(self):
a = torch.rand((1, 32))
b = 1
b_scalar = torch.ones(1, 32)
res = torch._VF._add_relu(a, b)
broadcasted_res = torch._VF._add_relu(a, b_scalar)
self.assertEqual(broadcasted_res, res)
def add_test(test, decorator=None):
def add(test_name, fn):
if hasattr(TestNN, test_name):
raise RuntimeError('Found two tests with the same name: ' + test_name)
if decorator is not None:
fn = decorator(fn)
setattr(TestNN, test_name, fn)
test_name = test.get_name()
if not hasattr(test, 'test_cpu') or test.test_cpu:
add(test_name, lambda self, test=test: test(self))
cuda_test_name = test_name + '_cuda'
# With dtype enable, it's good enough to test against three floating types
kwargs = {}
if 'extra_args' in get_function_arglist(test.test_cuda):
kwargs['extra_args'] = test.extra_args
if 'dtype' in get_function_arglist(test.test_cuda):
if torch.cuda.is_tf32_supported() and test.with_tf32:
def with_tf32_off(self, test=test, kwargs=kwargs):
with tf32_off():
test.test_cuda(self, dtype=torch.float, **kwargs)
add(cuda_test_name + '_fp32', with_tf32_off)
def with_tf32_on(self, test=test, kwargs=kwargs):
with tf32_on(self, test.tf32_precision):
test.test_cuda(self, dtype=torch.float, **kwargs)
add(cuda_test_name + '_tf32', with_tf32_on)
else:
add(cuda_test_name + '_float', lambda self,
test=test, kwargs=kwargs: test.test_cuda(self, dtype=torch.float, **kwargs))
add(cuda_test_name + '_double', lambda self,
test=test, kwargs=kwargs: test.test_cuda(self, dtype=torch.double, **kwargs))
def test_half(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.half, **kwargs)
if getattr(test, 'check_half', True):
add(cuda_test_name + '_half', test_half)
def test_bfloat16(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.bfloat16, **kwargs)
if getattr(test, 'check_bfloat16', True):
add(cuda_test_name + '_bfloat16', test_bfloat16)
def test_cfloat(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.cfloat, **kwargs)
def test_cdouble(self, test=test, kwargs=kwargs):
test.test_cuda(self, dtype=torch.cdouble, **kwargs)
if getattr(test, 'check_complex', False):
add(cuda_test_name + '_cfloat', test_cfloat)
add(cuda_test_name + '_cdouble', test_cdouble)
else:
def with_tf32_off(self, test=test, kwargs=kwargs):
with tf32_off():
test.test_cuda(self, **kwargs)
if torch.cuda.is_tf32_supported() and test.with_tf32:
add(cuda_test_name + '_fp32', with_tf32_off)
def with_tf32_on(self, test=test, kwargs=kwargs):
with tf32_on(self, test.tf32_precision):
test.test_cuda(self, **kwargs)
add(cuda_test_name + '_tf32', with_tf32_on)
else:
add(cuda_test_name, with_tf32_off)
for test_params in module_tests + get_new_module_tests():
# TODO: CUDA is not implemented yet
if 'constructor' not in test_params:
name = test_params.pop('module_name')
test_params['constructor'] = getattr(nn, name)
decorator = test_params.pop('decorator', None)
test = NewModuleTest(**test_params)
add_test(test, decorator)
if 'check_eval' in test_params:
# create a new test that is identical but that sets module.training to False
desc = test_params.get('desc', None)
test_params['desc'] = 'eval' if desc is None else desc + '_eval'
def gen_eval_constructor(constructor):
def eval_constructor(*args, **kwargs):
cons = constructor(*args, **kwargs)
cons.training = False
return cons
eval_constructor.__name__ = constructor.__name__
return eval_constructor
test_params['constructor'] = gen_eval_constructor(test_params['constructor'])
test = NewModuleTest(**test_params)
add_test(test, decorator)
if 'check_with_long_tensor' in test_params:
fullname = test_params.get('fullname', None)
if fullname:
test_params['fullname'] = fullname + '_with_long_tensor'
else:
desc = test_params.get('desc', None)
test_params['desc'] = 'with_long_tensor' if desc is None else desc + '_with_long_tensor'
def double_equivalent_of_long_tensor(size):
return torch.randint(-1000, 1000, size=size).double()
def apply_to_cons(t):
if t.is_floating_point():
if isinstance(t, Parameter):
return Parameter(double_equivalent_of_long_tensor(t.size()))
elif isinstance(t, torch.Tensor):
return double_equivalent_of_long_tensor(t.size())
else:
return t
def gen_long_tensor_constructor(constructor):
def long_tensor_constructor(*args, **kwargs):
cons = constructor(*args, **kwargs)
cons._apply(apply_to_cons)
return cons
long_tensor_constructor.__name__ = constructor.__name__
return long_tensor_constructor
def gen_long_tensor_input(input_size):
def input_func():
return double_equivalent_of_long_tensor(input_size)
return input_func
def reference_fn(i, p, m):
# For bad reasons this would create LongTensors that requires gradients
# Remove requires_grad to avoid this
for p in m.parameters():
p.requires_grad_(False)
m._apply(lambda t: t.long())
input = i.long()
out = m.forward(input)
return out
test_params['constructor'] = gen_long_tensor_constructor(test_params['constructor'])
test_params['input_fn'] = gen_long_tensor_input(test_params['input_size'])
test_params['reference_fn'] = reference_fn
test_params['check_forward_only'] = True
# Currently we don't support conv2d/conv3d for LongTensor in CUDA
test_params['test_cuda'] = False
test = NewModuleTest(**test_params)
add_test(test, decorator)
for test_params in criterion_tests:
if 'constructor' not in test_params:
name = test_params.pop('module_name')
test_params['constructor'] = getattr(nn, name)
test = CriterionTest(**test_params)
decorator = test_params.pop('decorator', None)
add_test(test, decorator)
if 'check_sum_reduction' in test_params:
desc = test_params.get('desc', None)
test_params['desc'] = 'sum_reduction' if desc is None else desc + '_sum_reduction'
def gen_sum_reduction_constructor(constructor):
def sum_reduction_constructor(*args, **kwargs):
cons = constructor(*args, reduction='sum', **kwargs)
return cons
sum_reduction_constructor.__name__ = constructor.__name__
return sum_reduction_constructor
test_params['constructor'] = gen_sum_reduction_constructor(test_params['constructor'])
test = CriterionTest(**test_params)
add_test(test, decorator)
| TestAddRelu |
python | RaRe-Technologies__gensim | gensim/test/test_doc2vec.py | {
"start": 32720,
"end": 33798
} | class ____:
"""
Concatenation of multiple models for reproducing the Paragraph Vectors paper.
Models must have exactly-matching vocabulary and document IDs. (Models should
be trained separately; this wrapper just returns concatenated results.)
"""
def __init__(self, models):
self.models = models
if hasattr(models[0], 'dv'):
self.dv = ConcatenatedDocvecs([model.dv for model in models])
def __getitem__(self, token):
return np.concatenate([model[token] for model in self.models])
def __str__(self):
"""Abbreviated name, built from submodels' names"""
return "+".join(str(model) for model in self.models)
@property
def epochs(self):
return self.models[0].epochs
def infer_vector(self, document, alpha=None, min_alpha=None, epochs=None):
return np.concatenate([model.infer_vector(document, alpha, min_alpha, epochs) for model in self.models])
def train(self, *ignore_args, **ignore_kwargs):
pass # train subcomponents individually
| ConcatenatedDoc2Vec |
python | huggingface__transformers | src/transformers/models/diffllama/modular_diffllama.py | {
"start": 14279,
"end": 18552
} | class ____(DiffLlamaAttention):
"""
DiffLlama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
`DiffLlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
SDPA API.
"""
# Adapted from DiffLlamaAttention.forward
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
value_states = torch.cat(torch.chunk(value_states, 2, dim=1), dim=-1)
value_states = value_states.repeat(1, 2, 1, 1)
causal_mask = attention_mask
if attention_mask is not None:
causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
# Reference: https://github.com/pytorch/pytorch/issues/112577.
if query_states.device.type == "cuda" and causal_mask is not None:
query_states = query_states.contiguous()
key_states = key_states.contiguous()
value_states = value_states.contiguous()
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
is_causal = causal_mask is None and q_len > 1
attn_output = torch.nn.functional.scaled_dot_product_attention(
query_states,
key_states,
value_states,
attn_mask=causal_mask,
dropout_p=self.attention_dropout if self.training else 0.0,
is_causal=is_causal,
)
attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=1)
lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to(
query_states.dtype
)
lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to(
query_states.dtype
)
lambda_full = lambda_1 - lambda_2 + self.lambda_init
attn_output = attn_output1 - lambda_full * attn_output2
attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(bsz, q_len, -1)
attn_output = self.o_proj(attn_output)
return attn_output, None
DIFFLLAMA_ATTENTION_CLASSES = {
"eager": DiffLlamaAttention,
"flash_attention_2": DiffLlamaFlashAttention2,
"sdpa": DiffLlamaSdpaAttention,
}
| DiffLlamaSdpaAttention |
python | pytorch__pytorch | torch/testing/_internal/distributed/multi_threaded_pg.py | {
"start": 19617,
"end": 21410
} | class ____:
_world = threading.local()
def _get_world(self) -> WorldData:
if not hasattr(ThreadLocalWorld._world, "world"):
ThreadLocalWorld._world.world = WorldData(
None, {}, {}, {}, {}, 0, {}, {}, {}
)
return ThreadLocalWorld._world.world
@property
def default_pg(self):
return self._get_world().default_pg
@default_pg.setter
def default_pg(self, value):
self._get_world().default_pg = value
@property
def pg_map(self):
return self._get_world().pg_map
@property
def pg_names(self):
return self._get_world().pg_names
@property
def pg_group_ranks(self):
return self._get_world().pg_group_ranks
@property
def pg_backend_config(self):
return self._get_world().pg_backend_config
@property
def group_count(self) -> int:
return self._get_world().group_count
@group_count.setter
def group_count(self, value):
self._get_world().group_count = value
@property
def tags_to_pg(self):
return self._get_world().tags_to_pg
@property
def pg_to_tag(self):
return self._get_world().pg_to_tag
@property
def pg_coalesce_state(self) -> dict[dist.ProcessGroup, list[Union[_CollOp, P2POp]]]:
return self._get_world().pg_coalesce_state
_old_pg_world = None
_ctx_manager = None
def _install_threaded_pg():
global _old_pg_world
global _ctx_manager
_old_pg_world = dist.distributed_c10d._world
dist.distributed_c10d._world = ThreadLocalWorld()
_ctx_manager = torch.autograd.set_multithreading_enabled(False)
return dist.distributed_c10d._world
def _uninstall_threaded_pg():
dist.distributed_c10d._world = _old_pg_world
| ThreadLocalWorld |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_assets.py | {
"start": 44306,
"end": 45962
} | class ____(TestQueuedEventEndpoint):
@pytest.mark.usefixtures("time_freezer")
def test_should_respond_200(self, test_client, session, create_dummy_dag):
dag, _ = create_dummy_dag()
dag_id = dag.dag_id
(asset,) = self.create_assets(session=session, num=1)
self._create_asset_dag_run_queues(dag_id, asset.id, session)
with assert_queries_count(4):
response = test_client.get(
f"/dags/{dag_id}/assets/queuedEvents",
)
assert response.status_code == 200
assert response.json() == {
"queued_events": [
{
"asset_id": asset.id,
"dag_id": "dag",
"dag_display_name": "dag",
"created_at": from_datetime_to_zulu_without_ms(DEFAULT_DATE),
}
],
"total_entries": 1,
}
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get("/dags/random/assets/queuedEvents")
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.get("/dags/random/assets/queuedEvents")
assert response.status_code == 403
def test_should_respond_404(self, test_client):
dag_id = "not_exists"
response = test_client.get(
f"/dags/{dag_id}/assets/queuedEvents",
)
assert response.status_code == 404
assert response.json()["detail"] == "Queue event with dag_id: `not_exists` was not found"
| TestGetDagAssetQueuedEvents |
python | miyuchina__mistletoe | mistletoe/block_tokenizer.py | {
"start": 3048,
"end": 3259
} | class ____(list):
"""
A wrapper around builtin list,
so that setattr(list, 'loose') is legal.
"""
def __init__(self, *args):
super().__init__(*args)
self.loose = False
| ParseBuffer |
python | davidhalter__jedi | test/completion/classes.py | {
"start": 8267,
"end": 8636
} | class ____(object):
def normal_method(self):
return 1
if True:
def conditional_method(self):
var = self.normal_method()
#? int()
var
return 2
def other_method(self):
var = self.conditional_method()
#? int()
var
# -----------------
# mro method
# -----------------
| TestX |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/annotated.py | {
"start": 529,
"end": 869
} | class ____:
"""docstring"""
#: Docstring about the ``name`` attribute.
name: Annotated[str, 'attribute']
#: Docstring about the ``max_len`` attribute.
max_len: list[Annotated[str, MaxLen(10, ['word_one', 'word_two'])]]
#: Docstring about the ``validated`` attribute.
validated: ValidatedString
| AnnotatedAttributes |
python | readthedocs__readthedocs.org | readthedocs/oauth/management/commands/sync_vcs_data.py | {
"start": 223,
"end": 4585
} | class ____(BaseCommand):
help = "Sync OAuth RemoteRepository and RemoteOrganization"
def add_arguments(self, parser):
parser.add_argument(
"--queue",
type=str,
default="resync-oauth",
help="Celery queue name.",
)
parser.add_argument(
"--users",
nargs="+",
type=str,
default=[],
help="Re-sync VCS provider data for specific users only.",
)
parser.add_argument(
"--logged-in-days-ago",
type=int,
default=0,
help="Re-sync users logged in in the last days.",
)
parser.add_argument(
"--skip-revoked-users",
action="store_true",
default=False,
help="Skip users who revoked our access token (pulled down from Sentry).",
)
parser.add_argument(
"--skip-users",
nargs="+",
type=str,
default=[],
help="Skip re-sync VCS provider data for specific users.",
)
parser.add_argument(
"--max-users",
type=int,
default=100,
help="Maximum number of users that should be synced.",
)
parser.add_argument(
"--force",
action="store_true",
default=False,
help="Force re-sync VCS provider data even if the users are already synced.",
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="Do not trigger tasks for VCS provider re-sync.",
)
def handle(self, *args, **options):
queue = options.get("queue")
logged_in_days_ago = options.get("logged_in_days_ago")
skip_revoked_users = options.get("skip_revoked_users")
sync_users = options.get("users")
skip_users = options.get("skip_users")
max_users = options.get("max_users")
force_sync = options.get("force")
dry_run = options.get("dry_run")
# Filter users who have social accounts connected to their RTD account
users = User.objects.filter(socialaccount__isnull=False).distinct()
if logged_in_days_ago > 0:
users = users.filter(
last_login__gte=timezone.now() - datetime.timedelta(days=logged_in_days_ago),
)
if not force_sync:
users = users.filter(remote_repository_relations__isnull=True).distinct()
self.stdout.write(self.style.SUCCESS(f"Total {users.count()} user(s) can be synced"))
if sync_users:
users = users.filter(username__in=sync_users)
if skip_users:
users = users.exclude(username__in=skip_users)
revoked_users = []
if skip_revoked_users:
# `revoked-users.json` was created by a script pullig down data from Sentry
# https://gist.github.com/humitos/aba1a004abeb3552fd8ef9a741f5dce1
# pylint: disable=consider-using-with disable=unspecified-encoding
revoked_users = json.load(open("revoked-users.json", "r"))
users = users.exclude(username__in=revoked_users)
self.stdout.write(self.style.WARNING(f"Excluding {len(revoked_users)} revoked users."))
if sync_users or skip_users or revoked_users:
self.stdout.write(
self.style.SUCCESS(f"Found {users.count()} user(s) with the given parameters")
)
# Don't trigger VCS provider re-sync tasks if --dry-run is provided
if dry_run:
self.stdout.write(
self.style.WARNING(
"No VCS provider re-sync task was triggered. "
"Run it without --dry-run to trigger the re-sync tasks."
)
)
else:
users_to_sync = users.values_list("id", flat=True)[:max_users]
self.stdout.write(
self.style.SUCCESS(
f"Triggering VCS provider re-sync task(s) for {len(users_to_sync)} user(s)"
)
)
for user_id in users_to_sync:
# Trigger Sync Remote Repository Tasks for users
sync_remote_repositories.apply_async(args=[user_id], queue=queue)
| Command |
python | great-expectations__great_expectations | tests/sqlalchemy_test_doubles.py | {
"start": 853,
"end": 1422
} | class ____:
def __init__(self, dialect: Dialect):
self.dialect = dialect
@contextmanager
def begin(self):
yield _MockConnection(self.dialect)
@contextmanager
def connect(self):
"""A contextmanager that yields a _MockConnection"""
yield _MockConnection(self.dialect)
def execute(self, *args, **kwargs):
"""This method is needed because currently we sometimes use a
connection in place of an engine.
When this is cleaned up we should remove this method.
"""
pass
| MockSaEngine |
python | pypa__hatch | tests/backend/metadata/test_hatch.py | {
"start": 1748,
"end": 2711
} | class ____:
def test_empty(self, isolation):
with pytest.raises(
ValueError, match="The `source` option under the `tool.hatch.version` table must not be empty if defined"
):
_ = HatchMetadata(isolation, {"version": {"source": ""}}, None).version.source_name
def test_not_table(self, isolation):
with pytest.raises(TypeError, match="Field `tool.hatch.version.source` must be a string"):
_ = HatchMetadata(isolation, {"version": {"source": 9000}}, None).version.source_name
def test_correct(self, isolation):
metadata = HatchMetadata(isolation, {"version": {"source": "foo"}}, None)
assert metadata.version.source_name == metadata.version.source_name == "foo"
def test_default(self, isolation):
metadata = HatchMetadata(isolation, {"version": {}}, None)
assert metadata.version.source_name == metadata.version.source_name == "regex"
| TestVersionSourceName |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_change_between.py | {
"start": 2661,
"end": 10063
} | class ____(ColumnMapExpectation):
"""Expect the numeric difference between current and previous row is within expected range.
E.g:
input = [1,2,5]
expected difference range = between 1 and 2
result = false because the difference between 2 and 5 is not between 1 and 2
Args:
column (str): The column name
Keyword Args:
from_value (float): low range value
to_value (float): high range value
"""
# These examples will be shown in the public gallery, and also executed as unit tests for your Expectation
examples = [
{
"data": {
"numbers_difference_max_3": [1, 3, 5, 7.5, 10, 12, 15],
"numbers_difference_max_5": [-3, -1, 2, 5, 13, 15, 21],
},
"tests": [
{
"title": "positive_test_with_difference_between_1_and_3",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "numbers_difference_max_3",
"from_value": 1,
"to_value": 3,
},
"out": {
"success": True,
"unexpected_index_list": [],
"unexpected_list": [],
},
},
{
"title": "positive_test_with_difference_between_1_and_8",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "numbers_difference_max_5",
"from_value": 1,
"to_value": 8,
},
"out": {
"success": True,
"unexpected_index_list": [],
"unexpected_list": [],
},
},
{
"title": "negative_test_with_difference_between_1_and_5",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "numbers_difference_max_5",
"from_value": 1,
"to_value": 5,
},
"out": {
"success": False,
"unexpected_index_list": [4, 6],
"unexpected_list": [13, 21],
},
},
],
}
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": ["experimental"], # Tags for this Expectation in the gallery
"contributors": ["@maikelpenz"], # Don't forget to add your github handle here!
}
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.change_between"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
# Please see https://docs.greatexpectations.io/en/latest/reference/core_concepts/expectations/expectations.html#expectation-concepts-domain-and-success-keys
# for more information about domain and success keys, and other arguments to Expectations
success_keys = ("from_value", "to_value")
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This method defines a question Renderer
# For more info on Renderers, see
# https://docs.greatexpectations.io/en/latest/guides/how_to_guides/configuring_data_docs/how_to_create_renderers_for_custom_expectations.html
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.question")
# def _question_renderer(
# cls, configuration, result=None, runtime_configuration=None
# ):
# column = configuration.kwargs.get("column")
# mostly = configuration.kwargs.get("mostly")
# return f'Do at least {mostly * 100}% of values in column "{column}" equal 3?'
# This method defines an answer Renderer
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.answer")
# def _answer_renderer(
# cls, configuration=None, result=None, runtime_configuration=None
# ):
# column = result.expectation_config.kwargs.get("column")
# mostly = result.expectation_config.kwargs.get("mostly")
# regex = result.expectation_config.kwargs.get("regex")
# if result.success:
# return f'At least {mostly * 100}% of values in column "{column}" equal 3.'
# else:
# return f'Less than {mostly * 100}% of values in column "{column}" equal 3.'
# This method defines a prescriptive Renderer
# @classmethod
# @renderer(renderer_type="renderer.prescriptive")
# @render_suite_parameter_string
# def _prescriptive_renderer(
# cls,
# configuration=None,
# result=None,
# runtime_configuration=None,
# **kwargs,
# ):
#!!! This example renderer should be shorter
# runtime_configuration = runtime_configuration or {}
# include_column_name = False if runtime_configuration.get("include_column_name") is False else True
# styling = runtime_configuration.get("styling")
# params = substitute_none_for_missing(
# configuration.kwargs,
# ["column", "regex", "mostly", "row_condition", "condition_parser"],
# )
# template_str = "values must be equal to 3"
# if params["mostly"] is not None:
# params["mostly_pct"] = num_to_str(
# params["mostly"] * 100, precision=15, no_scientific=True
# )
# # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
# template_str += ", at least $mostly_pct % of the time."
# else:
# template_str += "."
# if include_column_name:
# template_str = "$column " + template_str
# if params["row_condition"] is not None:
# (
# conditional_template_str,
# conditional_params,
# ) = parse_row_condition_string_pandas_engine(params["row_condition"])
# template_str = conditional_template_str + ", then " + template_str
# params.update(conditional_params)
# return [
# RenderedStringTemplateContent(
# **{
# "content_block_type": "string_template",
# "string_template": {
# "template": template_str,
# "params": params,
# "styling": styling,
# },
# }
# )
# ]
if __name__ == "__main__":
ExpectColumnValuesToChangeBetween().print_diagnostic_checklist()
| ExpectColumnValuesToChangeBetween |
python | openai__openai-python | src/openai/resources/audio/audio.py | {
"start": 2013,
"end": 3177
} | class ____(AsyncAPIResource):
@cached_property
def transcriptions(self) -> AsyncTranscriptions:
return AsyncTranscriptions(self._client)
@cached_property
def translations(self) -> AsyncTranslations:
return AsyncTranslations(self._client)
@cached_property
def speech(self) -> AsyncSpeech:
return AsyncSpeech(self._client)
@cached_property
def with_raw_response(self) -> AsyncAudioWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncAudioWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncAudioWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncAudioWithStreamingResponse(self)
| AsyncAudio |
python | tornadoweb__tornado | tornado/test/concurrent_test.py | {
"start": 1548,
"end": 2555
} | class ____(AsyncTestCase):
@gen_test
async def test_asyncio_futures(self):
fut: Future[int] = Future()
fut2: Future[int] = Future()
chain_future(fut, fut2)
fut.set_result(42)
result = await fut2
self.assertEqual(result, 42)
@gen_test
async def test_concurrent_futures(self):
# A three-step chain: two concurrent futures (showing that both arguments to chain_future
# can be concurrent futures), and then one from a concurrent future to an asyncio future so
# we can use it in await.
fut: futures.Future[int] = futures.Future()
fut2: futures.Future[int] = futures.Future()
fut3: Future[int] = Future()
chain_future(fut, fut2)
chain_future(fut2, fut3)
fut.set_result(42)
result = await fut3
self.assertEqual(result, 42)
# The following series of classes demonstrate and test various styles
# of use, with and without generators and futures.
| ChainFutureTest |
python | getsentry__sentry | src/sentry/analytics/events/inapp_request.py | {
"start": 516,
"end": 660
} | class ____(InviteOrJoinRequest):
pass
analytics.register(InviteRequestSentEvent)
analytics.register(JoinRequestSentEvent)
| JoinRequestSentEvent |
python | ansible__ansible | packaging/release.py | {
"start": 10081,
"end": 11527
} | class ____:
"""Information about a release artifact on PyPI."""
package_type: str
package_label: str
url: str
size: int
digest: str
digest_algorithm: str
# endregion
# region Utilities
SCRIPT_DIR = pathlib.Path(__file__).parent.resolve()
CHECKOUT_DIR = SCRIPT_DIR.parent
ANSIBLE_LIB_DIR = CHECKOUT_DIR / "lib"
ANSIBLE_DIR = ANSIBLE_LIB_DIR / "ansible"
ANSIBLE_BIN_DIR = CHECKOUT_DIR / "bin"
ANSIBLE_RELEASE_FILE = ANSIBLE_DIR / "release.py"
ANSIBLE_REQUIREMENTS_FILE = CHECKOUT_DIR / "requirements.txt"
ANSIBLE_CHANGELOG_REQUIREMENTS_FILE = CHECKOUT_DIR / "test/lib/ansible_test/_data/requirements/sanity.changelog.txt"
ANSIBLE_PYPROJECT_TOML_FILE = CHECKOUT_DIR / "pyproject.toml"
DIST_DIR = CHECKOUT_DIR / "dist"
VENV_DIR = DIST_DIR / ".venv" / "release"
CHANGELOGS_DIR = CHECKOUT_DIR / "changelogs"
CHANGELOGS_FRAGMENTS_DIR = CHANGELOGS_DIR / "fragments"
ANSIBLE_VERSION_PATTERN = re.compile("^__version__ = '(?P<version>.*)'$", re.MULTILINE)
ANSIBLE_VERSION_FORMAT = "__version__ = '{version}'"
DIGEST_ALGORITHM = "sha256"
# These endpoint names match those defined as defaults in twine.
# See: https://github.com/pypa/twine/blob/9c2c0a1c535155931c3d879359330cb836950c6a/twine/utils.py#L82-L85
PYPI_ENDPOINTS = dict(
pypi="https://pypi.org/pypi",
testpypi="https://test.pypi.org/pypi",
)
PIP_ENV = dict(
PIP_REQUIRE_VIRTUALENV="yes",
PIP_DISABLE_PIP_VERSION_CHECK="yes",
)
| ReleaseArtifact |
python | pandas-dev__pandas | pandas/io/formats/info.py | {
"start": 19052,
"end": 20279
} | class ____(_InfoPrinterAbstract):
"""Class for printing series info.
Parameters
----------
info : SeriesInfo
Instance of SeriesInfo.
verbose : bool, optional
Whether to print the full summary.
show_counts : bool, optional
Whether to show the non-null counts.
"""
def __init__(
self,
info: SeriesInfo,
verbose: bool | None = None,
show_counts: bool | None = None,
) -> None:
self.info = info
self.data = info.data
self.verbose = verbose
self.show_counts = self._initialize_show_counts(show_counts)
def _create_table_builder(self) -> _SeriesTableBuilder:
"""
Create instance of table builder based on verbosity.
"""
if self.verbose or self.verbose is None:
return _SeriesTableBuilderVerbose(
info=self.info,
with_counts=self.show_counts,
)
else:
return _SeriesTableBuilderNonVerbose(info=self.info)
def _initialize_show_counts(self, show_counts: bool | None) -> bool:
if show_counts is None:
return True
else:
return show_counts
| _SeriesInfoPrinter |
python | sympy__sympy | sympy/series/sequences.py | {
"start": 28586,
"end": 31972
} | class ____(SeqExprOp):
"""Represents term-wise addition of sequences.
Rules:
* The interval on which sequence is defined is the intersection
of respective intervals of sequences.
* Anything + :class:`EmptySequence` remains unchanged.
* Other rules are defined in ``_add`` methods of sequence classes.
Examples
========
>>> from sympy import EmptySequence, oo, SeqAdd, SeqPer, SeqFormula
>>> from sympy.abc import n
>>> SeqAdd(SeqPer((1, 2), (n, 0, oo)), EmptySequence)
SeqPer((1, 2), (n, 0, oo))
>>> SeqAdd(SeqPer((1, 2), (n, 0, 5)), SeqPer((1, 2), (n, 6, 10)))
EmptySequence
>>> SeqAdd(SeqPer((1, 2), (n, 0, oo)), SeqFormula(n**2, (n, 0, oo)))
SeqAdd(SeqFormula(n**2, (n, 0, oo)), SeqPer((1, 2), (n, 0, oo)))
>>> SeqAdd(SeqFormula(n**3), SeqFormula(n**2))
SeqFormula(n**3 + n**2, (n, 0, oo))
See Also
========
sympy.series.sequences.SeqMul
"""
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
# flatten inputs
args = list(args)
# adapted from sympy.sets.sets.Union
def _flatten(arg):
if isinstance(arg, SeqBase):
if isinstance(arg, SeqAdd):
return sum(map(_flatten, arg.args), [])
else:
return [arg]
if iterable(arg):
return sum(map(_flatten, arg), [])
raise TypeError("Input must be Sequences or "
" iterables of Sequences")
args = _flatten(args)
args = [a for a in args if a is not S.EmptySequence]
# Addition of no sequences is EmptySequence
if not args:
return S.EmptySequence
if Intersection(*(a.interval for a in args)) is S.EmptySet:
return S.EmptySequence
# reduce using known rules
if evaluate:
return SeqAdd.reduce(args)
args = list(ordered(args, SeqBase._start_key))
return Basic.__new__(cls, *args)
@staticmethod
def reduce(args):
"""Simplify :class:`SeqAdd` using known rules.
Iterates through all pairs and ask the constituent
sequences if they can simplify themselves with any other constituent.
Notes
=====
adapted from ``Union.reduce``
"""
new_args = True
while new_args:
for id1, s in enumerate(args):
new_args = False
for id2, t in enumerate(args):
if id1 == id2:
continue
new_seq = s._add(t)
# This returns None if s does not know how to add
# with t. Returns the newly added sequence otherwise
if new_seq is not None:
new_args = [a for a in args if a not in (s, t)]
new_args.append(new_seq)
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return SeqAdd(args, evaluate=False)
def _eval_coeff(self, pt):
"""adds up the coefficients of all the sequences at point pt"""
return sum(a.coeff(pt) for a in self.args)
| SeqAdd |
python | plotly__plotly.py | plotly/graph_objs/scatterternary/_legendgrouptitle.py | {
"start": 233,
"end": 2989
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterternary"
_path_str = "scatterternary.legendgrouptitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.scatterternary.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterternary
.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super().__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterternary.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.Legendgrouptitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Legendgrouptitle |
python | joke2k__faker | tests/providers/test_date_time.py | {
"start": 47424,
"end": 47903
} | class ____(unittest.TestCase):
"""Test Ka_GE date_time provider methods"""
def setUp(self):
self.fake = Faker("Ka_GE")
Faker.seed(0)
def test_day(self):
day = self.fake.day_of_week()
assert isinstance(day, str)
assert day in KaGeProvider.DAY_NAMES.values()
def test_month(self):
month = self.fake.month_name()
assert isinstance(month, str)
assert month in KaGeProvider.MONTH_NAMES.values()
| TestKaGe |
python | redis__redis-py | tests/test_maint_notifications_handling.py | {
"start": 19244,
"end": 21886
} | class ____(TestMaintenanceNotificationsBase):
"""Integration tests for maintenance notifications handling with real connection pool."""
def test_handshake_success_when_enabled(self):
"""Test that handshake is performed correctly."""
maint_notifications_config = MaintNotificationsConfig(
enabled=True, endpoint_type=EndpointType.EXTERNAL_IP
)
test_redis_client = self._get_client(
ConnectionPool, maint_notifications_config=maint_notifications_config
)
try:
# Perform Redis operations that should work with our improved mock responses
result_set = test_redis_client.set("hello", "world")
result_get = test_redis_client.get("hello")
# Verify operations completed successfully
assert result_set is True
assert result_get == b"world"
finally:
test_redis_client.close()
def test_handshake_success_when_auto_and_command_not_supported(self):
"""Test that when maintenance notifications are set to 'auto', the client gracefully handles unsupported MAINT_NOTIFICATIONS commands and normal Redis operations succeed."""
maint_notifications_config = MaintNotificationsConfig(
enabled="auto", endpoint_type=EndpointType.INTERNAL_IP
)
test_redis_client = self._get_client(
ConnectionPool, maint_notifications_config=maint_notifications_config
)
try:
# Perform Redis operations that should work with our improved mock responses
result_set = test_redis_client.set("hello", "world")
result_get = test_redis_client.get("hello")
# Verify operations completed successfully
assert result_set is True
assert result_get == b"world"
finally:
test_redis_client.close()
def test_handshake_failure_when_enabled(self):
"""Test that handshake is performed correctly."""
maint_notifications_config = MaintNotificationsConfig(
enabled=True, endpoint_type=EndpointType.INTERNAL_IP
)
test_redis_client = self._get_client(
ConnectionPool, maint_notifications_config=maint_notifications_config
)
try:
with pytest.raises(ResponseError):
# handshake should fail
# socket mock will return error when enabling maint notifications
# for internal-ip
test_redis_client.set("hello", "world")
finally:
test_redis_client.close()
| TestMaintenanceNotificationsHandshake |
python | getsentry__sentry | src/sentry/migrations/0924_dashboard_add_unique_constraint_for_user_org_position.py | {
"start": 230,
"end": 1982
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0923_dashboard_starred_backfill_orgs"),
]
operations = [
migrations.AlterField(
model_name="dashboardfavoriteuser",
name="organization",
field=sentry.db.models.fields.foreignkey.FlexibleForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="sentry.organization"
),
),
migrations.AddConstraint(
model_name="dashboardfavoriteuser",
constraint=models.UniqueConstraint(
fields=("user_id", "organization_id", "position"),
name="sentry_dashboardfavoriteuser_user_id_organization_id_position_uniq",
),
),
]
| Migration |
python | keras-team__keras | keras/src/ops/math_test.py | {
"start": 39586,
"end": 40354
} | class ____(testing.TestCase):
def test_top_k_call_values(self):
data = np.array([[1, 3, 2], [4, 6, 5]], dtype=np.float32)
k = 2
sorted_flag = True
top_k_op = kmath.TopK(k=k, sorted=sorted_flag)
values, _ = top_k_op.call(data)
expected_values = np.array([[3, 2], [6, 5]], dtype=np.float32)
self.assertAllClose(values, expected_values)
def test_top_k_call_indices(self):
data = np.array([[1, 3, 2], [4, 6, 5]], dtype=np.float32)
k = 2
sorted_flag = True
top_k_op = kmath.TopK(k=k, sorted=sorted_flag)
_, indices = top_k_op.call(data)
expected_indices = np.array([[1, 2], [1, 2]], dtype=np.int32)
self.assertAllClose(indices, expected_indices)
| TopKTest |
python | ray-project__ray | rllib/models/tests/test_catalog.py | {
"start": 2329,
"end": 9738
} | class ____(unittest.TestCase):
def tearDown(self):
ray.shutdown()
def test_default_models(self):
ray.init(object_store_memory=1000 * 1024 * 1024)
# Build test cases
flat_input_case = {
"obs_space": Box(0, 1, shape=(3,), dtype=np.float32),
"action_space": Box(0, 1, shape=(4,)),
"num_outputs": 4,
"expected_model": "FullyConnectedNetwork",
}
img_input_case = {
"obs_space": Box(0, 1, shape=(84, 84, 3), dtype=np.float32),
"action_space": Discrete(5),
"num_outputs": 5,
"expected_model": "VisionNetwork",
}
complex_obs_space = Tuple(
[
Box(0, 1, shape=(3,), dtype=np.float32),
Box(0, 1, shape=(4,), dtype=np.float32),
Discrete(3),
]
)
obs_prep = TupleFlatteningPreprocessor(complex_obs_space)
flat_complex_input_case = {
"obs_space": obs_prep.observation_space,
"action_space": Box(0, 1, shape=(5,)),
"num_outputs": 5,
"expected_model": "FullyConnectedNetwork",
}
nested_complex_input_case = {
"obs_space": Tuple(
[
Box(0, 1, shape=(3,), dtype=np.float32),
Discrete(3),
Tuple(
[
Box(0, 1, shape=(84, 84, 3), dtype=np.float32),
Box(0, 1, shape=(84, 84, 3), dtype=np.float32),
]
),
]
),
"action_space": Box(0, 1, shape=(7,)),
"num_outputs": 7,
"expected_model": "ComplexInputNetwork",
}
# Define which tests to run per framework
test_suite = {
"tf": [
flat_input_case,
img_input_case,
flat_complex_input_case,
nested_complex_input_case,
],
"tf2": [
flat_input_case,
img_input_case,
flat_complex_input_case,
nested_complex_input_case,
],
"torch": [
flat_input_case,
img_input_case,
flat_complex_input_case,
nested_complex_input_case,
],
}
for fw, test_cases in test_suite.items():
for test in test_cases:
model_config = {}
if test["expected_model"] == "ComplexInputNetwork":
model_config["fcnet_hiddens"] = [256, 256]
m = ModelCatalog.get_model_v2(
obs_space=test["obs_space"],
action_space=test["action_space"],
num_outputs=test["num_outputs"],
model_config=model_config,
framework=fw,
)
self.assertTrue(test["expected_model"] in type(m).__name__)
# Do a test forward pass.
batch_size = 16
obs = get_dummy_batch_for_space(
test["obs_space"],
batch_size=batch_size,
fill_value="random",
)
if fw == "torch":
obs = convert_to_torch_tensor(obs)
out, state_outs = m({"obs": obs})
self.assertTrue(out.shape == (batch_size, test["num_outputs"]))
self.assertTrue(state_outs == [])
def test_custom_model(self):
ray.init(object_store_memory=1000 * 1024 * 1024)
ModelCatalog.register_custom_model("foo", CustomModel)
p1 = ModelCatalog.get_model_v2(
obs_space=Box(0, 1, shape=(3,), dtype=np.float32),
action_space=Discrete(5),
num_outputs=5,
model_config={"custom_model": "foo"},
)
self.assertEqual(str(type(p1)), str(CustomModel))
def test_custom_action_distribution(self):
class Model:
pass
ray.init(
object_store_memory=1000 * 1024 * 1024, ignore_reinit_error=True
) # otherwise fails sometimes locally
# registration
ModelCatalog.register_custom_action_dist("test", CustomActionDistribution)
action_space = Box(0, 1, shape=(5, 3), dtype=np.float32)
# test retrieving it
model_config = MODEL_DEFAULTS.copy()
model_config["custom_action_dist"] = "test"
dist_cls, param_shape = ModelCatalog.get_action_dist(action_space, model_config)
self.assertEqual(str(dist_cls), str(CustomActionDistribution))
self.assertEqual(param_shape, action_space.shape)
# test the class works as a distribution
dist_input = tf1.placeholder(tf.float32, (None,) + param_shape)
model = Model()
model.model_config = model_config
dist = dist_cls(dist_input, model=model)
self.assertEqual(dist.sample().shape[1:], dist_input.shape[1:])
self.assertIsInstance(dist.sample(), tf.Tensor)
with self.assertRaises(NotImplementedError):
dist.entropy()
# test passing the options to it
model_config["custom_model_config"].update({"output_dim": (3,)})
dist_cls, param_shape = ModelCatalog.get_action_dist(action_space, model_config)
self.assertEqual(param_shape, (3,))
dist_input = tf1.placeholder(tf.float32, (None,) + param_shape)
model.model_config = model_config
dist = dist_cls(dist_input, model=model)
self.assertEqual(dist.sample().shape[1:], dist_input.shape[1:])
self.assertIsInstance(dist.sample(), tf.Tensor)
with self.assertRaises(NotImplementedError):
dist.entropy()
def test_custom_multi_action_distribution(self):
class Model:
pass
ray.init(
object_store_memory=1000 * 1024 * 1024, ignore_reinit_error=True
) # otherwise fails sometimes locally
# registration
ModelCatalog.register_custom_action_dist("test", CustomMultiActionDistribution)
s1 = Discrete(5)
s2 = Box(0, 1, shape=(3,), dtype=np.float32)
spaces = dict(action_1=s1, action_2=s2)
action_space = Dict(spaces)
# test retrieving it
model_config = MODEL_DEFAULTS.copy()
model_config["custom_action_dist"] = "test"
dist_cls, param_shape = ModelCatalog.get_action_dist(action_space, model_config)
self.assertIsInstance(dist_cls, partial)
self.assertEqual(param_shape, s1.n + 2 * s2.shape[0])
# test the class works as a distribution
dist_input = tf1.placeholder(tf.float32, (None, param_shape))
model = Model()
model.model_config = model_config
dist = dist_cls(dist_input, model=model)
self.assertIsInstance(dist.sample(), dict)
self.assertIn("action_1", dist.sample())
self.assertIn("action_2", dist.sample())
self.assertEqual(dist.sample()["action_1"].dtype, tf.int64)
self.assertEqual(dist.sample()["action_2"].shape[1:], s2.shape)
with self.assertRaises(NotImplementedError):
dist.entropy()
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestModelCatalog |
python | huggingface__transformers | src/transformers/models/granitemoehybrid/modeling_granitemoehybrid.py | {
"start": 51206,
"end": 52114
} | class ____(TypedDict, total=False):
"""
Keyword arguments for advanced Flash Attention, causal-conv1d, and mamba_ssm kernel usage.
Use cases include padding-free training and fewer `torch.compile` graph breaks.
Attributes:
cu_seq_lens_q (`torch.LongTensor`)
Gets cumulative sequence length for query state.
cu_seq_lens_k (`torch.LongTensor`)
Gets cumulative sequence length for key state.
max_length_q (`int`):
Maximum sequence length for query state.
max_length_k (`int`):
Maximum sequence length for key state.
seq_idx (`torch.IntTensor):
Index of each packed sequence.
"""
cu_seq_lens_q: torch.LongTensor
cu_seq_lens_k: torch.LongTensor
max_length_q: int
max_length_k: int
seq_idx: torch.IntTensor
@use_kernel_forward_from_hub("RMSNorm")
| GraniteFlashAttentionKwargs |
python | pytorch__pytorch | tools/jit/gen_unboxing.py | {
"start": 3587,
"end": 10739
} | class ____:
selector: SelectiveBuilder
@method_with_native_function
def __call__(self, f: NativeFunction) -> str:
if not self.selector.is_root_operator(f"aten::{f.func.name}"):
return ""
# We unconditionally generate function wrappers,
sig_group = CppSignatureGroup.from_native_function(f, method=False)
sig = sig_group.most_faithful_signature()
# escape double quote in schema, get rid of extra double quotes
schema = cpp_string(str(sig.func))[1:-1]
# arguments
args = sig.arguments()
connector = ",\n\t\t"
args_code = []
for arg in args:
# Using method=False faithful C++ API, so we should not see SelfArgument/TensorOptionsArgument
assert isinstance(arg.argument, Argument)
if not arg.argument.default:
arg_cpp = "c10::IValue(::std::nullopt)"
else:
# The unboxing code uses the faithful C++ API to avoid the overhead
# from wrapping/unwrapping TensorOptios.
# However, we would look to include default args for schema parsing.
# Default args only show up in the nonfaithful C++ API,
arg_default = cpp.default_expr(
arg.argument.default, arg.argument.type, symint=False
)
if arg_default.startswith("{"):
arg_cpp = f"c10::IntArrayRef({arg_default})"
else:
arg_cpp = f"c10::IValue({arg_default})"
args_code.append(
# pyrefly: ignore [bad-argument-type]
f"""c10::Argument("{arg.name}", nullptr, ::std::nullopt, {arg_cpp})"""
)
returns = f.func.returns
returns_code = []
for ret in returns:
# pyrefly: ignore [bad-argument-type]
returns_code.append(f"""c10::Argument("{ret.name if ret.name else ""}")""")
return f"""
// aten::{schema}
OperatorGenerator(
"aten::{f.func.name.name}",
"{f.func.name.overload_name}",
{{
{connector.join(args_code)}
}},
{{
{connector.join(returns_code)}
}},
[](Stack & stack) {{
RECORD_FUNCTION("{sig.name()}", std::vector<c10::IValue>());
at::unboxing::{unboxing.name(f)}(stack);
}},
aliasAnalysisFromSchema()
),
"""
def gen_unboxing(
*,
native_functions: Sequence[NativeFunction],
cpu_fm: FileManager,
selector: SelectiveBuilder,
) -> None:
def key_func(fn: NativeFunction | NativeFunctionsGroup) -> str:
return fn.root_name
selected_op_num: int = len(selector.operators)
# a best practice threshold of operators to enable sharding
sharding_threshold: int = 100
cpu_fm.write_sharded(
"UnboxingFunctions.cpp",
native_functions,
key_fn=key_func,
env_callable=lambda fn: {
"definitions": [ComputeUnboxingFunctions(Target.DEFINITION, selector)(fn)]
},
num_shards=1 if selected_op_num < sharding_threshold else 5,
sharded_keys={"definitions"},
)
cpu_fm.write(
"UnboxingFunctions.h",
lambda: {
"declarations": list(
mapMaybe(
ComputeUnboxingFunctions(Target.DECLARATION, selector),
native_functions,
)
),
},
)
cpu_fm.write_sharded(
"RegisterCodegenUnboxedKernels.cpp",
native_functions,
key_fn=key_func,
env_callable=lambda fn: {
"unboxed_ops": [ComputeCodegenUnboxedKernels(selector)(fn)]
},
num_shards=1 if selected_op_num < sharding_threshold else 10,
sharded_keys={"unboxed_ops"},
)
def main(args: list[str]) -> None:
parser = argparse.ArgumentParser(description="Generate unboxing source files")
parser.add_argument(
"-s",
"--source-path",
help="path to source directory for ATen",
default="aten/src/ATen",
)
parser.add_argument(
"-d",
"--install-dir",
"--install_dir",
help="output directory",
default="build/aten/src/ATen",
)
parser.add_argument(
"-o",
"--output-dependencies",
help="output a list of dependencies into the given file and exit",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="run without writing any files (still updates outputs)",
)
parser.add_argument(
"--op-selection-yaml-path",
"--op_selection_yaml_path",
help="Provide a path to the operator selection (for custom build) YAML "
"that contains the information about the set of selected operators "
"and their categories (training, ...). Each operator is either a "
"full operator name with overload or just a bare operator name. "
"The operator names also contain the namespace prefix (e.g. aten::)",
)
parser.add_argument(
"--op-registration-allowlist",
"--op_registration_allowlist",
nargs="*",
help="filter op registrations by the allowlist (if set); "
"each item is `namespace`::`operator name` without overload name; "
"e.g.: aten::empty aten::conv2d ...",
)
parser.add_argument(
"--TEST-ONLY-op-registration-allowlist-yaml-path",
"--TEST_ONLY_op_registration_allowlist_yaml_path",
help="Provide a path to the operator selection (for custom build) YAML "
"which contains a list of operators. It is to serve testing purpose and "
"each item is `namespace`::`operator name` without overload name; "
"e.g.: aten::empty aten::conv2d ...",
)
options = parser.parse_args(args)
if options.op_registration_allowlist:
op_registration_allowlist = options.op_registration_allowlist
elif options.TEST_ONLY_op_registration_allowlist_yaml_path:
with open(options.TEST_ONLY_op_registration_allowlist_yaml_path) as f:
op_registration_allowlist = yaml.safe_load(f)
else:
op_registration_allowlist = None
selector = get_custom_build_selector(
op_registration_allowlist,
options.op_selection_yaml_path,
)
native_yaml_path = os.path.join(options.source_path, "native/native_functions.yaml")
tags_yaml_path = os.path.join(options.source_path, "native/tags.yaml")
parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path)
native_functions, _backend_indices = (
parsed_yaml.native_functions,
parsed_yaml.backend_indices,
)
cpu_fm = make_file_manager(options=options)
gen_unboxing(native_functions=native_functions, cpu_fm=cpu_fm, selector=selector)
if options.output_dependencies:
depfile_path = Path(options.output_dependencies).resolve()
depfile_name = depfile_path.name
depfile_stem = depfile_path.stem
path = depfile_path.parent / depfile_name
cpu_fm.write_outputs(depfile_stem, str(path))
if __name__ == "__main__":
main(sys.argv[1:])
| ComputeCodegenUnboxedKernels |
python | django-extensions__django-extensions | tests/test_admin_filter.py | {
"start": 281,
"end": 763
} | class ____(TestCase):
"""Base class for filter test cases."""
@classmethod
def setUpClass(cls):
SecretFactory.create_batch(5, text=Iterator([None, None, "foo", "bar", None]))
cls.request = RequestFactory().get("/admin/testapp/secret")
cls.field = Secret._meta.get_field("text")
cls.field_path = "text"
cls.qs = Secret.objects.all()
@classmethod
def tearDownClass(cls):
Secret.objects.all().delete()
| BaseFieldFilter |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/dataplex.py | {
"start": 3007,
"end": 3279
} | class ____(BaseGoogleLink):
"""Helper class for constructing Dataplex Catalog EntryGroups link."""
name = "Dataplex Catalog EntryGroups"
key = "dataplex_catalog_entry_groups_key"
format_str = DATAPLEX_CATALOG_ENTRY_GROUPS_LINK
| DataplexCatalogEntryGroupsLink |
python | rushter__MLAlgorithms | mla/tsne.py | {
"start": 273,
"end": 4023
} | class ____(BaseEstimator):
y_required = False
def __init__(
self, n_components=2, perplexity=30.0, max_iter=200, learning_rate=500
):
"""A t-Distributed Stochastic Neighbor Embedding implementation.
Parameters
----------
max_iter : int, default 200
perplexity : float, default 30.0
n_components : int, default 2
"""
self.max_iter = max_iter
self.perplexity = perplexity
self.n_components = n_components
self.initial_momentum = 0.5
self.final_momentum = 0.8
self.min_gain = 0.01
self.lr = learning_rate
self.tol = 1e-5
self.perplexity_tries = 50
def fit_transform(self, X, y=None):
self._setup_input(X, y)
Y = np.random.randn(self.n_samples, self.n_components)
velocity = np.zeros_like(Y)
gains = np.ones_like(Y)
P = self._get_pairwise_affinities(X)
iter_num = 0
while iter_num < self.max_iter:
iter_num += 1
D = l2_distance(Y)
Q = self._q_distribution(D)
# Normalizer q distribution
Q_n = Q / np.sum(Q)
# Early exaggeration & momentum
pmul = 4.0 if iter_num < 100 else 1.0
momentum = 0.5 if iter_num < 20 else 0.8
# Perform gradient step
grads = np.zeros(Y.shape)
for i in range(self.n_samples):
grad = 4 * np.dot((pmul * P[i] - Q_n[i]) * Q[i], Y[i] - Y)
grads[i] = grad
gains = (gains + 0.2) * ((grads > 0) != (velocity > 0)) + (gains * 0.8) * (
(grads > 0) == (velocity > 0)
)
gains = gains.clip(min=self.min_gain)
velocity = momentum * velocity - self.lr * (gains * grads)
Y += velocity
Y = Y - np.mean(Y, 0)
error = np.sum(P * np.log(P / Q_n))
logging.info("Iteration %s, error %s" % (iter_num, error))
return Y
def _get_pairwise_affinities(self, X):
"""Computes pairwise affinities."""
affines = np.zeros((self.n_samples, self.n_samples), dtype=np.float32)
target_entropy = np.log(self.perplexity)
distances = l2_distance(X)
for i in range(self.n_samples):
affines[i, :] = self._binary_search(distances[i], target_entropy)
# Fill diagonal with near zero value
np.fill_diagonal(affines, 1.0e-12)
affines = affines.clip(min=1e-100)
affines = (affines + affines.T) / (2 * self.n_samples)
return affines
def _binary_search(self, dist, target_entropy):
"""Performs binary search to find suitable precision."""
precision_min = 0
precision_max = 1.0e15
precision = 1.0e5
for _ in range(self.perplexity_tries):
denom = np.sum(np.exp(-dist[dist > 0.0] / precision))
beta = np.exp(-dist / precision) / denom
# Exclude zeros
g_beta = beta[beta > 0.0]
entropy = -np.sum(g_beta * np.log2(g_beta))
error = entropy - target_entropy
if error > 0:
# Decrease precision
precision_max = precision
precision = (precision + precision_min) / 2.0
else:
# Increase precision
precision_min = precision
precision = (precision + precision_max) / 2.0
if np.abs(error) < self.tol:
break
return beta
def _q_distribution(self, D):
"""Computes Student t-distribution."""
Q = 1.0 / (1.0 + D)
np.fill_diagonal(Q, 0.0)
Q = Q.clip(min=1e-100)
return Q
| TSNE |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeParams5.py | {
"start": 338,
"end": 380
} | class ____[R: "ClassE[Any]"]:
...
| ClassD |
python | django-compressor__django-compressor | compressor/exceptions.py | {
"start": 448,
"end": 571
} | class ____(Exception):
"""
Offline compression generation related exceptions
"""
pass
| OfflineGenerationError |
python | django__django | django/contrib/sessions/backends/cached_db.py | {
"start": 308,
"end": 4148
} | class ____(DBStore):
"""
Implement cached, database backed sessions.
"""
cache_key_prefix = KEY_PREFIX
def __init__(self, session_key=None):
self._cache = caches[settings.SESSION_CACHE_ALIAS]
super().__init__(session_key)
@property
def cache_key(self):
return self.cache_key_prefix + self._get_or_create_session_key()
async def acache_key(self):
return self.cache_key_prefix + await self._aget_or_create_session_key()
def load(self):
try:
data = self._cache.get(self.cache_key)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
data = None
if data is None:
s = self._get_session_from_db()
if s:
data = self.decode(s.session_data)
self._cache.set(
self.cache_key, data, self.get_expiry_age(expiry=s.expire_date)
)
else:
data = {}
return data
async def aload(self):
try:
data = await self._cache.aget(await self.acache_key())
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
data = None
if data is None:
s = await self._aget_session_from_db()
if s:
data = self.decode(s.session_data)
await self._cache.aset(
await self.acache_key(),
data,
await self.aget_expiry_age(expiry=s.expire_date),
)
else:
data = {}
return data
def exists(self, session_key):
return (
session_key
and (self.cache_key_prefix + session_key) in self._cache
or super().exists(session_key)
)
async def aexists(self, session_key):
return (
session_key
and (self.cache_key_prefix + session_key) in self._cache
or await super().aexists(session_key)
)
def save(self, must_create=False):
super().save(must_create)
try:
self._cache.set(self.cache_key, self._session, self.get_expiry_age())
except Exception:
logger.exception("Error saving to cache (%s)", self._cache)
async def asave(self, must_create=False):
await super().asave(must_create)
try:
await self._cache.aset(
await self.acache_key(),
self._session,
await self.aget_expiry_age(),
)
except Exception:
logger.exception("Error saving to cache (%s)", self._cache)
def delete(self, session_key=None):
super().delete(session_key)
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(self.cache_key_prefix + session_key)
async def adelete(self, session_key=None):
await super().adelete(session_key)
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
await self._cache.adelete(self.cache_key_prefix + session_key)
def flush(self):
"""
Remove the current session data from the database and regenerate the
key.
"""
self.clear()
self.delete(self.session_key)
self._session_key = None
async def aflush(self):
"""See flush()."""
self.clear()
await self.adelete(self.session_key)
self._session_key = None
| SessionStore |
python | encode__django-rest-framework | tests/test_filters.py | {
"start": 32599,
"end": 32808
} | class ____(serializers.ModelSerializer):
user = serializers.CharField(source='username')
class Meta:
model = SensitiveOrderingFilterModel
fields = ('id', 'user')
| SensitiveDataSerializer3 |
python | nedbat__coveragepy | setup.py | {
"start": 4625,
"end": 4858
} | class ____(Exception):
"""Raise this to indicate the C extension wouldn't build."""
def __init__(self):
Exception.__init__(self)
self.cause = sys.exc_info()[1] # work around py 2/3 different syntax
| BuildFailed |
python | jmcnamara__XlsxWriter | xlsxwriter/chart_pie.py | {
"start": 308,
"end": 7533
} | class ____(chart.Chart):
"""
A class for writing the Excel XLSX Pie charts.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self) -> None:
"""
Constructor.
"""
super().__init__()
self.vary_data_color = 1
self.rotation = 0
# Set the available data label positions for this chart type.
self.label_position_default = "best_fit"
self.label_positions = {
"center": "ctr",
"inside_end": "inEnd",
"outside_end": "outEnd",
"best_fit": "bestFit",
}
def set_rotation(self, rotation: int) -> None:
"""
Set the Pie/Doughnut chart rotation: the angle of the first slice.
Args:
rotation: First segment angle: 0 <= rotation <= 360.
Returns:
Nothing.
"""
if rotation is None:
return
# Ensure the rotation is in Excel's range.
if rotation < 0 or rotation > 360:
warn(
f"Chart rotation '{rotation}' outside Excel range: 0 <= rotation <= 360"
)
return
self.rotation = int(rotation)
###########################################################################
#
# Private API.
#
###########################################################################
def _write_chart_type(self, args) -> None:
# Override the virtual superclass method with a chart specific method.
# Write the c:pieChart element.
self._write_pie_chart()
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_pie_chart(self) -> None:
# Write the <c:pieChart> element. Over-ridden method to remove
# axis_id code since Pie charts don't require val and cat axes.
self._xml_start_tag("c:pieChart")
# Write the c:varyColors element.
self._write_vary_colors()
# Write the series elements.
for data in self.series:
self._write_ser(data)
# Write the c:firstSliceAng element.
self._write_first_slice_ang()
self._xml_end_tag("c:pieChart")
def _write_plot_area(self) -> None:
# Over-ridden method to remove the cat_axis() and val_axis() code
# since Pie charts don't require those axes.
#
# Write the <c:plotArea> element.
self._xml_start_tag("c:plotArea")
# Write the c:layout element.
self._write_layout(self.plotarea.get("layout"), "plot")
# Write the subclass chart type element.
self._write_chart_type(None)
# Configure a combined chart if present.
second_chart = self.combined
if second_chart:
# Secondary axis has unique id otherwise use same as primary.
if second_chart.is_secondary:
second_chart.id = 1000 + self.id
else:
second_chart.id = self.id
# Share the same filehandle for writing.
second_chart.fh = self.fh
# Share series index with primary chart.
second_chart.series_index = self.series_index
# Write the subclass chart type elements for combined chart.
# pylint: disable-next=protected-access
second_chart._write_chart_type(None)
# Write the c:spPr element for the plotarea formatting.
self._write_sp_pr(self.plotarea)
self._xml_end_tag("c:plotArea")
def _write_legend(self) -> None:
# Over-ridden method to add <c:txPr> to legend.
# Write the <c:legend> element.
legend = self.legend
position = legend.get("position", "right")
font = legend.get("font")
delete_series = []
overlay = 0
if legend.get("delete_series") and isinstance(legend["delete_series"], list):
delete_series = legend["delete_series"]
if position.startswith("overlay_"):
position = position.replace("overlay_", "")
overlay = 1
allowed = {
"right": "r",
"left": "l",
"top": "t",
"bottom": "b",
"top_right": "tr",
}
if position == "none":
return
if position not in allowed:
return
position = allowed[position]
self._xml_start_tag("c:legend")
# Write the c:legendPos element.
self._write_legend_pos(position)
# Remove series labels from the legend.
for index in delete_series:
# Write the c:legendEntry element.
self._write_legend_entry(index)
# Write the c:layout element.
self._write_layout(legend.get("layout"), "legend")
# Write the c:overlay element.
if overlay:
self._write_overlay()
# Write the c:spPr element.
self._write_sp_pr(legend)
# Write the c:txPr element. Over-ridden.
self._write_tx_pr_legend(None, font)
self._xml_end_tag("c:legend")
def _write_tx_pr_legend(self, horiz, font) -> None:
# Write the <c:txPr> element for legends.
if font and font.get("rotation"):
rotation = font["rotation"]
else:
rotation = None
self._xml_start_tag("c:txPr")
# Write the a:bodyPr element.
self._write_a_body_pr(rotation, horiz)
# Write the a:lstStyle element.
self._write_a_lst_style()
# Write the a:p element.
self._write_a_p_legend(font)
self._xml_end_tag("c:txPr")
def _write_a_p_legend(self, font) -> None:
# Write the <a:p> element for legends.
self._xml_start_tag("a:p")
# Write the a:pPr element.
self._write_a_p_pr_legend(font)
# Write the a:endParaRPr element.
self._write_a_end_para_rpr()
self._xml_end_tag("a:p")
def _write_a_p_pr_legend(self, font) -> None:
# Write the <a:pPr> element for legends.
attributes = [("rtl", 0)]
self._xml_start_tag("a:pPr", attributes)
# Write the a:defRPr element.
self._write_a_def_rpr(font)
self._xml_end_tag("a:pPr")
def _write_vary_colors(self) -> None:
# Write the <c:varyColors> element.
attributes = [("val", 1)]
self._xml_empty_tag("c:varyColors", attributes)
def _write_first_slice_ang(self) -> None:
# Write the <c:firstSliceAng> element.
attributes = [("val", self.rotation)]
self._xml_empty_tag("c:firstSliceAng", attributes)
def _write_show_leader_lines(self) -> None:
# Write the <c:showLeaderLines> element.
#
# This is for Pie/Doughnut charts. Other chart types only supported
# leader lines after Excel 2015 via an extension element.
attributes = [("val", 1)]
self._xml_empty_tag("c:showLeaderLines", attributes)
| ChartPie |
python | getsentry__sentry | src/sentry/monitors/endpoints/organization_monitor_index_count.py | {
"start": 659,
"end": 2685
} | class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.CRONS
permission_classes = (OrganizationAlertRulePermission,)
def get(self, request: AuthenticatedHttpRequest, organization: Organization) -> Response:
"""
Retrieves the count of cron monitors for an organization.
"""
try:
filter_params = self.get_filter_params(request, organization, date_filter_optional=True)
except NoProjects:
return self.respond(
{
"counts": {
"total": 0,
"active": 0,
"disabled": 0,
},
}
)
queryset = Monitor.objects.filter(
organization_id=organization.id, project_id__in=filter_params["project_id"]
).exclude(
status__in=[
ObjectStatus.PENDING_DELETION,
ObjectStatus.DELETION_IN_PROGRESS,
]
)
environments = filter_params.get("environment_objects")
if environments is not None:
environment_ids = [e.id for e in environments]
# use a distinct() filter as queries spanning multiple tables can include duplicates
queryset = queryset.filter(
Q(monitorenvironment__environment_id__in=environment_ids)
| Q(monitorenvironment=None)
).distinct()
all_monitors_count = queryset.count()
disabled_monitors_count = queryset.filter(status=ObjectStatus.DISABLED).count()
active_monitors_count = all_monitors_count - disabled_monitors_count
return self.respond(
{
"counts": {
"total": all_monitors_count,
"active": active_monitors_count,
"disabled": disabled_monitors_count,
},
}
)
| OrganizationMonitorIndexCountEndpoint |
python | pytorch__pytorch | torch/distributed/tensor/_utils.py | {
"start": 645,
"end": 19013
} | class ____:
"""
Within this context manager, DTensor will refuse to perform implicit redistribution,
instead raising an error. Manual calls to ``redistribute()`` are required wherever a redistribution
must occur to avoid erroring. This can be used to ensure that the user is aware of all redistribution.
Note: it is easier to use this mode on just the forward pass of a typical DTensor program, as the backwards pass
may contain implicit redistribution calls that are not visible to the user and difficult to replace with manual
calls. Redistribution during backward can be made explicit by writing `autograd.Function`s that are no-op
during forward and perform a manual redistribution during backwards.
"""
_local = threading.local()
def __init__(self, enable: bool = True, strict: bool = False):
self._enable = enable
self._strict = strict
@classmethod
def is_redistribute_allowed(cls, src_spec: DTensorSpec, dst_spec: DTensorSpec):
if instance := getattr(cls._local, "_active", None):
if instance._enable:
if instance._strict:
return False
return redistribute_cost(src_spec, dst_spec) <= 0
return True
def __enter__(self):
self._prev = getattr(ExplicitRedistributionContext._local, "_active", None)
ExplicitRedistributionContext._local._active = self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
ExplicitRedistributionContext._local._active = self._prev
def _explicit_order_placements(
mesh_shape: ShapeType, placements: Sequence[Placement]
) -> Sequence[tuple[int, Placement]]:
"""
Replace Strided Shards with regular shards in an adjusted order.
Returns a list of (mesh_dim, placement) tuples where the list order is the sharding order.
ex.
[Shard(0), _StridedShard(0, split_factor=2), Shard(0)] ->
[(0, Shard(0)), (2, Shard(0)), (1, Shard(0))]
"""
if not len(placements) == len(mesh_shape):
raise RuntimeError(
"Expected one placement per mesh dim, "
f"but found {len(placements)} placements and {len(mesh_shape)} mesh dims."
)
ordered = []
deferred_strided_placements = defaultdict(list)
strided_part_ended_for_dim = set()
for mesh_dim, p in enumerate(placements):
if isinstance(p, _StridedShard):
# validate the stride is the correct multiple of the meshdim and the earlier shard
deferred_strided_placements[p.dim].append((mesh_dim, p))
else:
ordered.append((mesh_dim, p))
if isinstance(p, Shard):
if p.dim in strided_part_ended_for_dim:
raise NotImplementedError(
f"Strided sharding does not allow Shard() to appear after "
f"the strided part has ended. {p} at mesh dim {mesh_dim} in "
f"{placements} violates this assumption."
)
if p.dim in deferred_strided_placements:
strided_part_ended_for_dim.add(p.dim)
strided_placements = deferred_strided_placements.pop(p.dim)
aggregate_size = mesh_shape[mesh_dim]
while len(strided_placements) > 0:
strided_mesh_dim, strided = strided_placements.pop()
if not strided.split_factor == aggregate_size:
raise RuntimeError(
f"Can only convert _StridedShard to ordered Shard if split_factor({strided.split_factor})"
f" == aggregate mesh size ({aggregate_size})"
)
aggregate_size *= mesh_shape[strided_mesh_dim]
ordered.append((strided_mesh_dim, Shard(p.dim)))
return ordered
def compute_local_shape_and_global_offset(
global_shape: ShapeType, mesh: DeviceMesh, placements: Sequence[Placement]
) -> tuple[tuple[int, ...], tuple[int, ...]]:
"""
Compute the local tensor shape and the global offsets into the original tensor
of a DTensor on its current global rank. This is useful for checkpointing purpose.
Example:
global_tensor = [[0, 1, 2, 3, 4], sharded on mesh (DP=2, TP=2) with (Shard(1), Shard(1))
[10, 11, 12, 13, 14]]
This table shows the return value of local_shape and global_offset for each rank.
(`local_tensor` is for illustration only).
Note how the first coordinate of global_offset is always 0, corresponding to tensor dim 0 being replicated.
Rank local_tensor local_shape global_offset
-------------------------------------------------------------
0 [[0, 1], (2, 2) (0, 0)
[10, 11]]
1 [[2], (2, 1) (0, 2)
[12]]
2 [[3], (2, 1) (0, 3)
[13]]
3 [[4], (2, 1) (0, 4)
[14]]
Args:
global_shape (ShapeType): The global shape of the DTensor.
mesh (:class:`DeviceMesh`): The device mesh this DTensor is distributed on.
placements (Sequence[:class:`Placement`]]): The placements of the DTensor.
Return:
local_shape: the shape of the DTensor's _local_tensor on the current rank.
global_offset: a tuple of offsets for each dimension of the global tensor shape,
identifying how this shard fits into the global tensor in each dimension.
"""
return _compute_local_shape_and_global_offset(
global_shape, mesh.shape, mesh.get_coordinate(), placements
)
# accept 'plain data types' to enable simpler unit testing without creating device mesh
def _compute_local_shape_and_global_offset(
global_shape: ShapeType,
mesh_shape: ShapeType,
my_coordinate: list[int] | None,
placements: Sequence[Placement],
) -> tuple[tuple[int, ...], tuple[int, ...]]:
"""
Suppose you have a full tensor with size global_shape, and you have sharded
it according to placements for mesh_shape. This function returns, for a
specific coordinate my_coordinate in the device mesh:
- The size of your local shard WITHOUT padding (i.e., if you have
an uneven split, your size might be smaller than the other entries
in your dim), and
- Where the data for your shard begins, in the full tensor.
This function is fairly simple if your tensor is evenly sharded; the complication
is around uneven splits. There is also some complication for handling StridedShard,
which changes the order you should apply sharding.
"""
if my_coordinate is None:
# if rank not in the mesh, return empty offset
return ((0,), ())
# StridedShard implies a non-standard order to apply shards; get the
# correct order to start applying splits
ordered_placements = _explicit_order_placements(mesh_shape, placements)
local_shape = list(global_shape)
# We'll compute the data for where the shard begins on a per-dim basis.
# However, a single dim can be sharded multiple times, so we will end up
# doing a Sum(size*stride) like computation to determine the location of our
# shard for each of the shardings on that dim.
global_offset = [0] * len(global_shape)
for mesh_dim, placement in ordered_placements:
mesh_dim_size = mesh_shape[mesh_dim]
if isinstance(placement, Shard):
shard_dim = placement.dim
assert shard_dim < len(local_shape), (
f"Sharding dim {shard_dim} greater than tensor ndim {len(local_shape)}"
)
shard_size, shard_offset = placement._local_shard_size_and_offset(
local_shape[shard_dim],
mesh_dim_size,
my_coordinate[mesh_dim],
)
local_shape[shard_dim] = shard_size
shard_global_offset = global_offset[shard_dim] + not_none(shard_offset)
zero_global_offset = global_shape[shard_dim]
if isinstance(shard_global_offset, torch.SymInt) and not isinstance(
zero_global_offset, torch.SymInt
):
zero_global_offset = torch.SymInt(zero_global_offset)
global_offset[shard_dim] = torch.sym_ite(
shard_size == 0,
# Special case to fill in a standardized non-garbage value for
# the global_offset of zero-sized shards. This value is out
# of bounds of the tensor, so it won't conflict with any real
# offsets. DCP may rely on this value to de-duplicate shards.
# Note that you can end up with zero-size shards that are
# still otherwise in bounds for the tensor (TODO: give an
# example).
zero_global_offset,
# As we successively shard the same dimension, we keep
# advancing our pointer beyond our original offset until we
# get to the final chunk start.
shard_global_offset,
)
# NOTE: the offset compute relies on the local shard index and it has no
# problem when strided sharding is not present. To correctly compute, we assume
# that the ``_StridedShard.split_factor`` field encodes how many partitions
# each local tensor will be further split into when sharding on higher mesh
# dimensions. However, this number is only correct if the DTensor is not
# sharded after the strided sharding completes. For example,
# [Shard(0), _StridedShard(0, split_factor=2), Shard(0)] is the placements
# where the DTensor's dim-0 is first sharded on device mesh dim-0, then on
# device mesh dim-2, and last on mesh dim-1. We define the
# "_StridedShard(0, split_factor=2), Shard(0)" part as the strided sharding
# part because strided sharding happens on mesh dim-1 and it was caused by
# the fact that sharding on dim-2 occurred ahead. In this case, there's no
# further sharding after this strided sharding part and ``split_factor``
# correctly encodes the number. Another example is
# [_StridedShard(0, split_factor=2), Shard(0), Shard(0)] where the DTensor's
# dim-0 is first sharded on mesh dim-1, then on mesh dim-0, and last on mesh
# dim-2. This violates our assumption that no further sharding shall occur
# after the strided sharding part and ``split_factor`` won't correctly
# encode the number of further split. So far, the only case where _StridedShard
# placement would appear is FSDP2 + TP on 2D mesh and the above case could only
# happen on mesh of 3 or more dimensions.
# TODO: change this function to correctly address this.
# TODO: this logic can be applied to contiguous sharding as well
return tuple(local_shape), tuple(global_offset)
compute_global_tensor_info = torch._C._DTensor_compute_global_tensor_info
def compute_local_tensor_info(
global_tensor: torch.Tensor,
mesh: DeviceMesh,
placements: Sequence[Placement],
) -> tuple[list[int], list[int]]:
"""
Compute the local size and stride of a DTensor from the given global tensor info.
For example, if we have a global tensor with size (4, 8, 4) and stride (32, 1, 8).
If the DTensor placements are [Shard(2)] and world_size is 2;
then the local size is (4, 8, 2) and stride is (16, 1, 8).
Args:
tensor (:class:`torch.Tensor`):
Global tensor which DTensor will distribute
mesh (:class:`DeviceMesh`):
Object which describes the mesh topology
of devices for the DTensor.
placements (Sequence[:class:`Placement`]):
The attribute of the DTensor that describes its layout
on the mesh topology.
Returns:
local_shape: A List of int which specifies the size of the local tensor.
local_stride: A List of int which specifies the stride of the local tensor.
"""
local_shape = list(global_tensor.size())
local_stride = list(global_tensor.stride())
for idx, placement in enumerate(placements):
mesh_dim_size = mesh.size(idx)
if placement.is_shard():
shard_placement = cast(Shard, placement)
if shard_placement.dim < 0:
raise AssertionError(
"Shard placements should have negative dims normalized in "
f"the user-facing APIs: {shard_placement}"
)
shard_dim = shard_placement.dim
assert shard_dim < len(local_shape), (
f"Sharding dim {shard_dim} greater than tensor ndim {len(local_shape)} "
f"for placement number {idx}."
)
global_dim_size = local_shape[shard_dim]
assert global_dim_size % mesh_dim_size == 0, (
f"Global dim {global_dim_size} not divisible by mesh size {mesh_dim_size}"
)
local_shape[shard_dim] = global_dim_size // mesh_dim_size
# shrink strides that were scaled up globally
for i in range(len(local_stride)):
if (
i != shard_dim
and local_stride[i] >= local_stride[shard_dim] * mesh_dim_size
):
local_stride[i] = local_stride[i] // mesh_dim_size
elif not isinstance(placement, (Replicate, Partial)):
raise RuntimeError(f"placement type {type(placement)} not supported!")
return local_shape, local_stride
def compute_global_tensor_shape(
shape: torch.Size, mesh: DeviceMesh, placements: Sequence[Placement]
) -> torch.Size:
"""
Compute the global size of a DTensor from the given local tensor shape,
the mesh and placements. Different from `compute_global_tensor_info`,
which assumes sharding is even, this util allgathers local shards' shapes
from all ranks and thus can support uneven sharding.
NOTE: Currently this function only supports 1D mesh.
Args:
shape (:class:`torch.Size`):
Shape of the local tensor
mesh (:class:`DeviceMesh`):
Object which describes the mesh topology
of devices for the DTensor.
placements (Sequence[:class:`Placement`]]):
The attribute of the DTensor that describes its layout
on the mesh topology.
Return:
tensor_shape: Shape of the global DTensor.
"""
if len(placements) != 1:
raise NotImplementedError(
"compute_global_tensor_shape only supports 1 placement for now."
)
if len(placements) != mesh.ndim:
raise RuntimeError(
"Expected one placement per mesh dim, "
f"but found {len(placements)} placements and {mesh.ndim} mesh dims."
)
if isinstance(placements[0], Replicate):
return shape
elif isinstance(placements[0], Shard):
local_shape = torch.tensor(list(shape), device=mesh.device_type)
gathered_shaped_tensors = [
torch.empty_like(local_shape, device=local_shape.device)
for _ in range(mesh.size())
]
funcol.all_gather_inplace(gathered_shaped_tensors, local_shape, mesh)
sharded_dim_sum = 0
shard_dim = placements[0].dim
other_dims = [d for d in range(mesh.ndim) if d != shard_dim]
for shape_tensor in gathered_shaped_tensors:
if not torch.equal(local_shape[other_dims], shape_tensor[other_dims]):
raise RuntimeError(
"Non-sharded dimensions should have identical size across ranks."
)
shape_tensor_list = shape_tensor.tolist()
sharded_dim_sum += shape_tensor_list[shard_dim]
global_shape = list(shape)
global_shape[placements[0].dim] = sharded_dim_sum
return torch.Size(global_shape)
else:
raise NotImplementedError(
f"Placement type {type(placements[0])} not supported."
)
def try_find_mesh_from_args(
op_call: torch._ops.OpOverload, args: Sequence[object]
) -> DeviceMesh:
"""
Find the device mesh object from args.
It returns None if no mesh is found.
NOTE: we can optimize this search if needed
"""
for arg in args:
if isinstance(arg, (dtensor.DTensor, DTensorSpec)):
return arg.device_mesh
elif (
isinstance(arg, (list, tuple))
and len(arg) > 0
and isinstance(arg[0], (dtensor.DTensor, DTensorSpec))
):
return arg[0].device_mesh
raise ValueError(f"Cannot find device mesh from args for op : {op_call}.")
def compute_local_stride(
global_stride: ShapeType, mesh: DeviceMesh, placements: Sequence[Placement]
) -> tuple[int, ...]:
"""
Compute the stride of a local tensor shard, given the global stride of the DTensor.
NOTE: Currently this function is assuming the DTensor is evenly shardable.
"""
stride_divisors = [1] * len(global_stride)
for mesh_idx, p in enumerate(placements):
if p.is_shard():
i = cast(Shard, p).dim
# tensor dimension i is sharded on mesh dimension mesh_idx,
# so we need to divide all the strides larger than stride[i]
# (by the submesh size)
for j in range(len(global_stride)):
if global_stride[j] > global_stride[i]:
stride_divisors[j] *= mesh.size(mesh_idx)
return tuple(
global_stride[i] // stride_divisors[i] for i in range(len(global_stride))
)
def normalize_to_torch_size(size) -> torch.Size: # type: ignore[no-untyped-def]
"""
Unify variable types of size argument to torch.Size
Acceptable types include:
int, Sequence[int], Tuple[int], Tuple[Sequence[int]],
or torch.Size
"""
if isinstance(size, torch.Size):
return size
if isinstance(size, int):
torch_size = [size]
elif len(size) == 1 and isinstance(size[0], Sequence):
torch_size = list(size[0])
else:
torch_size = list(size)
return torch.Size(torch_size)
| ExplicitRedistributionContext |
python | ipython__ipython | IPython/testing/plugin/ipdoctest.py | {
"start": 11412,
"end": 11881
} | class ____(doctest.DocTestRunner):
"""Test runner that synchronizes the IPython namespace with test globals.
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
# Override terminal size to standardise traceback format
with modified_env({'COLUMNS': '80', 'LINES': '24'}):
return super(IPDocTestRunner,self).run(test,
compileflags,out,clear_globs)
| IPDocTestRunner |
python | huggingface__transformers | src/transformers/models/mt5/modeling_mt5.py | {
"start": 54617,
"end": 58442
} | class ____(MT5PreTrainedModel):
r"""
Examples:
```python
>>> from transformers import MT5EncoderModel, AutoTokenizer
>>> model = MT5EncoderModel.from_pretrained("google/mt5-small")
>>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
>>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien."
>>> input_ids = tokenizer(article, return_tensors="pt").input_ids
>>> outputs = model(input_ids)
>>> hidden_state = outputs.last_hidden_state
```"""
model_type = "mt5"
config: MT5Config
_tied_weights_keys = {
"encoder.embed_tokens.weight": "shared.weight",
}
# Copied from transformers.models.t5.modeling_t5.T5EncoderModel.__init__ with T5->MT5
def __init__(self, config: MT5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = config
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = MT5Stack(encoder_config)
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.t5.modeling_t5.T5EncoderModel.get_input_embeddings
def get_input_embeddings(self):
return self.shared
# Copied from transformers.models.t5.modeling_t5.T5EncoderModel.set_input_embeddings
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
@auto_docstring
# Copied from transformers.models.t5.modeling_t5.T5EncoderModel.forward with google-t5/->google/, T5->MT5, t5->mt5
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.FloatTensor], BaseModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./mt5#training).
Example:
```python
>>> from transformers import AutoTokenizer, MT5EncoderModel
>>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small")
>>> model = MT5EncoderModel.from_pretrained("google/mt5-small")
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return encoder_outputs
@auto_docstring(
custom_intro="""
MT5 model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
tasks.
"""
)
| MT5EncoderModel |
python | django__django | tests/responses/test_fileresponse.py | {
"start": 218,
"end": 302
} | class ____(io.BytesIO):
def seekable(self):
return False
| UnseekableBytesIO |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/graphql.py | {
"start": 5257,
"end": 10984
} | class ____:
# AVERAGE_REVIEWS - optimal number of reviews to fetch inside every pull request.
# If we try to fetch too many (up to 100) we will spend too many scores of query cost.
# https://docs.github.com/en/graphql/overview/resource-limitations#calculating-a-rate-limit-score-before-running-the-call
# If we query too low we would need to make additional sub-queries to fetch the rest of the reviews inside specific pull request.
AVERAGE_REVIEWS = 5
AVERAGE_COMMENTS = 2
AVERAGE_REACTIONS = 2
def get_query_root_repository(self, owner: str, name: str, first: int, after: Optional[str] = None):
"""
Get GraphQL query which allows fetching reactions starting from the repository:
query {
repository {
pull_requests(first: page_size) {
reviews(first: AVERAGE_REVIEWS) {
comments(first: AVERAGE_COMMENTS) {
reactions(first: AVERAGE_REACTIONS) {
}
}
}
}
}
}
"""
op = self._get_operation()
repository = op.repository(owner=owner, name=name)
repository.name()
repository.owner.login()
kwargs = {"first": first}
if after:
kwargs["after"] = after
pull_requests = repository.pull_requests(**kwargs)
pull_requests.page_info.__fields__(has_next_page=True, end_cursor=True)
pull_requests.total_count()
pull_requests.nodes.id(__alias__="node_id")
reviews = self._select_reviews(pull_requests.nodes, first=self.AVERAGE_REVIEWS)
comments = self._select_comments(reviews.nodes, first=self.AVERAGE_COMMENTS)
self._select_reactions(comments.nodes, first=self.AVERAGE_REACTIONS)
return str(op)
def get_query_root_pull_request(self, node_id: str, first: int, after: str):
"""
Get GraphQL query which allows fetching reactions starting from the pull_request:
query {
pull_request {
reviews(first: AVERAGE_REVIEWS) {
comments(first: AVERAGE_COMMENTS) {
reactions(first: AVERAGE_REACTIONS) {
}
}
}
}
}
"""
op = self._get_operation()
pull_request = op.node(id=node_id).__as__(_schema_root.PullRequest)
pull_request.id(__alias__="node_id")
pull_request.repository.name()
pull_request.repository.owner.login()
reviews = self._select_reviews(pull_request, first, after)
comments = self._select_comments(reviews.nodes, first=self.AVERAGE_COMMENTS)
self._select_reactions(comments.nodes, first=self.AVERAGE_REACTIONS)
return str(op)
def get_query_root_review(self, node_id: str, first: int, after: str):
"""
Get GraphQL query which allows fetching reactions starting from the review:
query {
review {
comments(first: AVERAGE_COMMENTS) {
reactions(first: AVERAGE_REACTIONS) {
}
}
}
}
"""
op = self._get_operation()
review = op.node(id=node_id).__as__(_schema_root.PullRequestReview)
review.id(__alias__="node_id")
review.repository.name()
review.repository.owner.login()
comments = self._select_comments(review, first, after)
self._select_reactions(comments.nodes, first=self.AVERAGE_REACTIONS)
return str(op)
def get_query_root_comment(self, node_id: str, first: int, after: str):
"""
Get GraphQL query which allows fetching reactions starting from the comment:
query {
comment {
reactions(first: AVERAGE_REACTIONS) {
}
}
}
"""
op = self._get_operation()
comment = op.node(id=node_id).__as__(_schema_root.PullRequestReviewComment)
comment.id(__alias__="node_id")
comment.database_id(__alias__="id")
comment.repository.name()
comment.repository.owner.login()
self._select_reactions(comment, first, after)
return str(op)
def _select_reactions(self, comment: Selector, first: int, after: Optional[str] = None):
kwargs = {"first": first}
if after:
kwargs["after"] = after
reactions = comment.reactions(**kwargs)
reactions.page_info.__fields__(has_next_page=True, end_cursor=True)
reactions.total_count()
reactions.nodes.__fields__(id="node_id", database_id="id", content=True, created_at="created_at")
select_user_fields(reactions.nodes.user())
return reactions
def _select_comments(self, review: Selector, first: int, after: Optional[str] = None):
kwargs = {"first": first}
if after:
kwargs["after"] = after
comments = review.comments(**kwargs)
comments.page_info.__fields__(has_next_page=True, end_cursor=True)
comments.total_count()
comments.nodes.id(__alias__="node_id")
comments.nodes.database_id(__alias__="id")
return comments
def _select_reviews(self, pull_request: Selector, first: int, after: Optional[str] = None):
kwargs = {"first": first}
if after:
kwargs["after"] = after
reviews = pull_request.reviews(**kwargs)
reviews.page_info.__fields__(has_next_page=True, end_cursor=True)
reviews.total_count()
reviews.nodes.id(__alias__="node_id")
reviews.nodes.database_id(__alias__="id")
return reviews
def _get_operation(self):
return sgqlc.operation.Operation(_schema_root.query_type)
| QueryReactions |
python | jupyterlab__jupyterlab | examples/terminal/main.py | {
"start": 1827,
"end": 2734
} | class ____(LabServerApp):
extension_url = "/example"
default_url = "/example"
app_url = "/example"
name = __name__
# In jupyter-server v2 terminals are an extension.
load_other_extensions = True
app_name = "JupyterLab Example Terminal"
app_settings_dir = os.path.join(HERE, "build", "application_settings")
schemas_dir = os.path.join(HERE, "build", "schemas")
static_dir = os.path.join(HERE, "build")
templates_dir = os.path.join(HERE, "templates")
themes_dir = os.path.join(HERE, "build", "themes")
user_settings_dir = os.path.join(HERE, "build", "user_settings")
workspaces_dir = os.path.join(HERE, "build", "workspaces")
def initialize_handlers(self):
"""Add example handler to Lab Server's handler list."""
self.handlers.append(("/example", ExampleHandler))
if __name__ == "__main__":
ExampleApp.launch_instance()
| ExampleApp |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-redis/llama_index/vector_stores/redis/schema.py | {
"start": 783,
"end": 1592
} | class ____(IndexSchema):
"""The default Redis Vector Store Schema."""
def __init__(self, **data) -> None:
index = RedisIndexInfo()
fields: List[Dict[str, Any]] = [
{"type": "tag", "name": NODE_ID_FIELD_NAME, "attrs": {"sortable": False}},
{"type": "tag", "name": DOC_ID_FIELD_NAME, "attrs": {"sortable": False}},
{"type": "text", "name": TEXT_FIELD_NAME, "attrs": {"weight": 1.0}},
{
"type": "vector",
"name": VECTOR_FIELD_NAME,
"attrs": {
"dims": 1536,
"algorithm": "flat",
"distance_metric": "cosine",
},
},
]
super().__init__(index=index.__dict__, fields=fields)
| RedisVectorStoreSchema |
python | pytorch__pytorch | torch/_inductor/codegen/wrapper.py | {
"start": 20273,
"end": 21235
} | class ____(WrapperLine):
wrapper: PythonWrapperCodegen
kernel_name: str
call_args: tuple[Any, ...]
raw_keys: tuple[Any, ...]
raw_args: tuple[Any, ...]
arg_types: list[str]
triton: bool
triton_meta: dict[str, Any]
device: torch.device
graph_name: str
original_fxnode_name: str
def codegen(self, code: IndentedBuffer) -> None:
self.wrapper._generate_kernel_call_helper(
self.kernel_name,
self.call_args,
triton=self.triton,
arg_types=self.arg_types,
raw_keys=self.raw_keys,
raw_args=self.raw_args,
triton_meta=self.triton_meta,
device=self.device,
graph_name=self.graph_name,
original_fxnode_name=self.original_fxnode_name,
)
def codegen_fx(self, converter: FxConverter) -> FxConversionFunc:
return converter._generate_kernel_call
@dataclasses.dataclass
| KernelCallLine |
python | has2k1__plotnine | plotnine/geoms/geom_tile.py | {
"start": 210,
"end": 1431
} | class ____(geom_rect):
"""
Rectangles specified using a center points
{usage}
Parameters
----------
{common_parameters}
See Also
--------
plotnine.geom_rect
"""
DEFAULT_AES = {
"alpha": 1,
"color": None,
"fill": "#333333",
"linetype": "solid",
"size": 0.1,
"width": None,
"height": None,
}
REQUIRED_AES = {"x", "y"}
DEFAULT_PARAMS = {
"stat": "identity",
"position": "identity",
"na_rm": False,
}
def setup_data(self, data: pd.DataFrame) -> pd.DataFrame:
try:
width = data.pop("width")
except KeyError:
width = self.aes_params.get(
"width",
resolution(data["x"], False),
)
try:
height = data.pop("height")
except KeyError:
height = self.aes_params.get(
"height",
resolution(data["y"], False),
)
data["xmin"] = data["x"] - width / 2
data["xmax"] = data["x"] + width / 2
data["ymin"] = data["y"] - height / 2
data["ymax"] = data["y"] + height / 2
return data
| geom_tile |
python | scipy__scipy | scipy/stats/tests/test_hypotests.py | {
"start": 55995,
"end": 61858
} | class ____:
"""Some tests to show that barnard_exact() works correctly."""
@pytest.mark.parametrize(
"input_sample,expected",
[
([[43, 40], [10, 39]], (3.555406779643, 0.000362832367)),
([[100, 2], [1000, 5]], (-1.776382925679, 0.135126970878)),
([[2, 7], [8, 2]], (-2.518474945157, 0.019210815430)),
([[5, 1], [10, 10]], (1.449486150679, 0.156277546306)),
([[5, 15], [20, 20]], (-1.851640199545, 0.066363501421)),
([[5, 16], [20, 25]], (-1.609639949352, 0.116984852192)),
([[10, 5], [10, 1]], (-1.449486150679, 0.177536588915)),
([[5, 0], [1, 4]], (2.581988897472, 0.013671875000)),
([[0, 1], [3, 2]], (-1.095445115010, 0.509667991877)),
([[0, 2], [6, 4]], (-1.549193338483, 0.197019618792)),
([[2, 7], [8, 2]], (-2.518474945157, 0.019210815430)),
],
)
def test_precise(self, input_sample, expected):
"""The expected values have been generated by R, using a resolution
for the nuisance parameter of 1e-6 :
```R
library(Barnard)
options(digits=10)
barnard.test(43, 40, 10, 39, dp=1e-6, pooled=TRUE)
```
"""
res = barnard_exact(input_sample)
statistic, pvalue = res.statistic, res.pvalue
assert_allclose([statistic, pvalue], expected)
@pytest.mark.parametrize(
"input_sample,expected",
[
([[43, 40], [10, 39]], (3.920362887717, 0.000289470662)),
([[100, 2], [1000, 5]], (-1.139432816087, 0.950272080594)),
([[2, 7], [8, 2]], (-3.079373904042, 0.020172119141)),
([[5, 1], [10, 10]], (1.622375939458, 0.150599922226)),
([[5, 15], [20, 20]], (-1.974771239528, 0.063038448651)),
([[5, 16], [20, 25]], (-1.722122973346, 0.133329494287)),
([[10, 5], [10, 1]], (-1.765469659009, 0.250566655215)),
([[5, 0], [1, 4]], (5.477225575052, 0.007812500000)),
([[0, 1], [3, 2]], (-1.224744871392, 0.509667991877)),
([[0, 2], [6, 4]], (-1.732050807569, 0.197019618792)),
([[2, 7], [8, 2]], (-3.079373904042, 0.020172119141)),
],
)
def test_pooled_param(self, input_sample, expected):
"""The expected values have been generated by R, using a resolution
for the nuisance parameter of 1e-6 :
```R
library(Barnard)
options(digits=10)
barnard.test(43, 40, 10, 39, dp=1e-6, pooled=FALSE)
```
"""
res = barnard_exact(input_sample, pooled=False)
statistic, pvalue = res.statistic, res.pvalue
assert_allclose([statistic, pvalue], expected)
def test_raises(self):
# test we raise an error for wrong input number of nuisances.
error_msg = (
"Number of points `n` must be strictly positive, found 0"
)
with assert_raises(ValueError, match=error_msg):
barnard_exact([[1, 2], [3, 4]], n=0)
# test we raise an error for wrong shape of input.
error_msg = "The input `table` must be of shape \\(2, 2\\)."
with assert_raises(ValueError, match=error_msg):
barnard_exact(np.arange(6).reshape(2, 3))
# Test all values must be positives
error_msg = "All values in `table` must be nonnegative."
with assert_raises(ValueError, match=error_msg):
barnard_exact([[-1, 2], [3, 4]])
# Test value error on wrong alternative param
error_msg = (
"`alternative` should be one of {'two-sided', 'less', 'greater'},"
" found .*"
)
with assert_raises(ValueError, match=error_msg):
barnard_exact([[1, 2], [3, 4]], "not-correct")
@pytest.mark.parametrize(
"input_sample,expected",
[
([[0, 0], [4, 3]], (1.0, 0)),
],
)
def test_edge_cases(self, input_sample, expected):
res = barnard_exact(input_sample)
statistic, pvalue = res.statistic, res.pvalue
assert_equal(pvalue, expected[0])
assert_equal(statistic, expected[1])
@pytest.mark.parametrize(
"input_sample,expected",
[
([[0, 5], [0, 10]], (1.0, np.nan)),
([[5, 0], [10, 0]], (1.0, np.nan)),
],
)
def test_row_or_col_zero(self, input_sample, expected):
res = barnard_exact(input_sample)
statistic, pvalue = res.statistic, res.pvalue
assert_equal(pvalue, expected[0])
assert_equal(statistic, expected[1])
@pytest.mark.parametrize(
"input_sample,expected",
[
([[2, 7], [8, 2]], (-2.518474945157, 0.009886140845)),
([[7, 200], [300, 8]], (-21.320036698460, 0.0)),
([[21, 28], [1957, 6]], (-30.489638143953, 0.0)),
],
)
@pytest.mark.parametrize("alternative", ["greater", "less"])
def test_less_greater(self, input_sample, expected, alternative):
"""
"The expected values have been generated by R, using a resolution
for the nuisance parameter of 1e-6 :
```R
library(Barnard)
options(digits=10)
a = barnard.test(2, 7, 8, 2, dp=1e-6, pooled=TRUE)
a$p.value[1]
```
In this test, we are using the "one-sided" return value `a$p.value[1]`
to test our pvalue.
"""
expected_stat, less_pvalue_expect = expected
if alternative == "greater":
input_sample = np.array(input_sample)[:, ::-1]
expected_stat = -expected_stat
res = barnard_exact(input_sample, alternative=alternative)
statistic, pvalue = res.statistic, res.pvalue
assert_allclose(
[statistic, pvalue], [expected_stat, less_pvalue_expect], atol=1e-7
)
| TestBarnardExact |
python | sympy__sympy | sympy/polys/orderings.py | {
"start": 333,
"end": 908
} | class ____:
"""Base class for monomial orderings. """
alias: str | None = None
is_global: bool | None = None
is_default = False
def __repr__(self):
return self.__class__.__name__ + "()"
def __str__(self):
return self.alias
def __call__(self, monomial: tuple[int, ...]) -> Any:
raise NotImplementedError
def __eq__(self, other):
return self.__class__ == other.__class__
def __hash__(self):
return hash(self.__class__)
def __ne__(self, other):
return not (self == other)
| MonomialOrder |
python | FactoryBoy__factory_boy | tests/test_fuzzy.py | {
"start": 6035,
"end": 8081
} | class ____(unittest.TestCase):
def test_definition(self):
"""Tests all ways of defining a FuzzyFloat."""
fuzz = fuzzy.FuzzyFloat(2.0, 3.0)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertTrue(2.0 <= res <= 3.0, "value %d is not between 2.0 and 3.0" % res)
fuzz = fuzzy.FuzzyFloat(4.0)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertTrue(0.0 <= res <= 4.0, "value %d is not between 0.0 and 4.0" % res)
fuzz = fuzzy.FuzzyDecimal(1.0, 4.0, precision=5)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertTrue(1.0 <= res <= 4.0, "value %d is not between 1.0 and 4.0" % res)
self.assertTrue(res.as_tuple().exponent, -5)
def test_biased(self):
fake_uniform = lambda low, high: low + high
fuzz = fuzzy.FuzzyFloat(2.0, 8.0)
with mock.patch('factory.random.randgen.uniform', fake_uniform):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(10.0, res)
def test_biased_high_only(self):
fake_uniform = lambda low, high: low + high
fuzz = fuzzy.FuzzyFloat(8.0)
with mock.patch('factory.random.randgen.uniform', fake_uniform):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(8.0, res)
def test_default_precision(self):
fake_uniform = lambda low, high: low + high + 0.000000000000011
fuzz = fuzzy.FuzzyFloat(8.0)
with mock.patch('factory.random.randgen.uniform', fake_uniform):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(8.00000000000001, res)
def test_precision(self):
fake_uniform = lambda low, high: low + high + 0.001
fuzz = fuzzy.FuzzyFloat(8.0, precision=4)
with mock.patch('factory.random.randgen.uniform', fake_uniform):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(8.001, res)
| FuzzyFloatTestCase |
python | automl__auto-sklearn | test/test_evaluation/evaluation_util.py | {
"start": 1477,
"end": 8130
} | class ____(unittest.TestCase):
def __init__(self, methodName):
super(BaseEvaluatorTest, self).__init__(methodName)
self.output_directories = []
def _fit(self, evaluator):
return self.__fit(evaluator.fit)
def _partial_fit(self, evaluator, fold):
partial_fit = functools.partial(evaluator.partial_fit, fold=fold)
return self.__fit(partial_fit)
def __fit(self, function_handle):
"""Allow us to catch known and valid exceptions for all evaluate
scripts."""
try:
function_handle()
return True
except KeyError as e:
if (
"Floating-point under-/overflow occurred at epoch" in e.args[0]
or "removed all features" in e.args[0]
or "failed to create intent" in e.args[0]
):
pass
else:
traceback.print_exc()
raise e
except ValueError as e:
if (
"Floating-point under-/overflow occurred at epoch" in e.args[0]
or "removed all features" in e.args[0]
or "failed to create intent" in e.args[0]
):
pass
else:
raise e
except LinAlgError as e:
if "not positive definite, even with jitter" in e.args[0]:
pass
else:
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
pass
elif "divide by zero encountered in divide" in e.args[0]:
pass
else:
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
pass
else:
raise e
def get_multiclass_classification_datamanager():
X_train, Y_train, X_test, Y_test = get_dataset("iris")
indices = list(range(X_train.shape[0]))
np.random.seed(1)
np.random.shuffle(indices)
X_train = X_train[indices]
Y_train = Y_train[indices]
X_test = X_test[25:]
Y_test = Y_test[25:]
D = Dummy()
D.info = {"task": MULTICLASS_CLASSIFICATION, "is_sparse": False, "label_num": 3}
D.data = {
"X_train": X_train,
"Y_train": Y_train,
"X_test": X_test,
"Y_test": Y_test,
}
D.feat_type = {0: "numerical", 1: "Numerical", 2: "numerical", 3: "numerical"}
return D
def get_abalone_datamanager():
# https://www.openml.org/d/183
dataset_name = "abalone"
data = sklearn.datasets.fetch_openml(data_id=183, as_frame=True)
feat_type = {
i: "Categorical" if x.name == "category" else "Numerical"
for i, x in enumerate(data["data"].dtypes)
}
X, y = sklearn.datasets.fetch_openml(data_id=183, return_X_y=True, as_frame=False)
y = preprocessing.LabelEncoder().fit_transform(y)
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
X, y, random_state=1
)
D = XYDataManager(
X_train,
y_train,
X_test,
y_test,
MULTICLASS_CLASSIFICATION,
feat_type,
dataset_name,
)
return D
def get_multilabel_classification_datamanager():
X_train, Y_train, X_test, Y_test = get_dataset("iris")
indices = list(range(X_train.shape[0]))
np.random.seed(1)
np.random.shuffle(indices)
X_train = X_train[indices]
Y_train = Y_train[indices]
Y_train = np.array(convert_to_bin(Y_train, 3))
Y_test = np.array(convert_to_bin(Y_test, 3))
X_test = X_test[25:]
Y_test = Y_test[25:]
D = Dummy()
D.info = {"task": MULTILABEL_CLASSIFICATION, "is_sparse": False, "label_num": 3}
D.data = {
"X_train": X_train,
"Y_train": Y_train,
"X_test": X_test,
"Y_test": Y_test,
}
D.feat_type = {0: "numerical", 1: "Numerical", 2: "numerical", 3: "numerical"}
return D
def get_binary_classification_datamanager():
X_train, Y_train, X_test, Y_test = get_dataset("iris")
indices = list(range(X_train.shape[0]))
np.random.seed(1)
np.random.shuffle(indices)
X_train = X_train[indices]
Y_train = Y_train[indices]
eliminate_class_two = Y_train != 2
X_train = X_train[eliminate_class_two]
Y_train = Y_train[eliminate_class_two]
eliminate_class_two = Y_test != 2
X_test = X_test[eliminate_class_two]
Y_test = Y_test[eliminate_class_two]
X_test = X_test[25:]
Y_test = Y_test[25:]
D = Dummy()
D.info = {"task": BINARY_CLASSIFICATION, "is_sparse": False, "label_num": 2}
D.data = {
"X_train": X_train,
"Y_train": Y_train.reshape((-1, 1)),
"X_test": X_test,
"Y_test": Y_test.reshape((-1, 1)),
}
D.feat_type = {0: "numerical", 1: "Numerical", 2: "numerical", 3: "numerical"}
return D
def get_regression_datamanager():
X_train, Y_train, X_test, Y_test = get_dataset("boston")
indices = list(range(X_train.shape[0]))
np.random.seed(1)
np.random.shuffle(indices)
X_train = X_train[indices]
Y_train = Y_train[indices]
X_test = X_test[200:]
Y_test = Y_test[200:]
D = Dummy()
D.info = {"task": REGRESSION, "is_sparse": False, "label_num": 1}
D.data = {
"X_train": X_train,
"Y_train": Y_train.reshape((-1, 1)),
"X_test": X_test,
"Y_test": Y_test.reshape((-1, 1)),
}
D.feat_type = {i: "numerical" for i in range(X_train.shape[1])}
return D
def get_500_classes_datamanager():
weights = ([0.002] * 475) + ([0.001] * 25)
X, Y = sklearn.datasets.make_classification(
n_samples=1000,
n_features=20,
n_classes=500,
n_clusters_per_class=1,
n_informative=15,
n_redundant=5,
n_repeated=0,
weights=weights,
flip_y=0,
class_sep=1.0,
hypercube=True,
shift=None,
scale=1.0,
shuffle=True,
random_state=1,
)
D = Dummy()
D.info = {"task": MULTICLASS_CLASSIFICATION, "is_sparse": False, "label_num": 500}
D.data = {
"X_train": X[:700],
"Y_train": Y[:700],
"X_test": X[710:],
"Y_test": Y[710:],
}
D.feat_type = {i: "numerical" for i in range(20)}
return D
def get_dataset_getters():
return [
get_binary_classification_datamanager,
get_multiclass_classification_datamanager,
get_multilabel_classification_datamanager,
get_500_classes_datamanager,
get_abalone_datamanager,
get_regression_datamanager,
]
| BaseEvaluatorTest |
python | readthedocs__readthedocs.org | readthedocs/core/apps.py | {
"start": 132,
"end": 589
} | class ____(AppConfig):
name = "readthedocs.core"
verbose_name = "Core"
def ready(self):
# Import `readthedocs.core.logs` to set up structlog
import readthedocs.core.logs # noqa
import readthedocs.core.signals # noqa
try:
import readthedocsext.monitoring.metrics.tasks # noqa
except (ModuleNotFoundError, ImportError):
log.info("Metrics tasks could not be imported.")
| CoreAppConfig |
python | openai__openai-python | src/openai/types/responses/response_custom_tool_call_output.py | {
"start": 640,
"end": 1217
} | class ____(BaseModel):
call_id: str
"""The call ID, used to map this custom tool call output to a custom tool call."""
output: Union[str, List[OutputOutputContentList]]
"""
The output from the custom tool call generated by your code. Can be a string or
an list of output content.
"""
type: Literal["custom_tool_call_output"]
"""The type of the custom tool call output. Always `custom_tool_call_output`."""
id: Optional[str] = None
"""The unique ID of the custom tool call output in the OpenAI platform."""
| ResponseCustomToolCallOutput |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_aggregate_metrics/column_standard_deviation.py | {
"start": 1166,
"end": 3666
} | class ____(ColumnAggregateMetricProvider):
"""MetricProvider Class for Aggregate Standard Deviation metric"""
metric_name = "column.standard_deviation"
@column_aggregate_value(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
"""Pandas Standard Deviation implementation"""
convert_pandas_series_decimal_to_float_dtype(data=column, inplace=True)
return column.std()
@column_aggregate_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(cls, column, _dialect, _metrics, **kwargs):
"""SqlAlchemy Standard Deviation implementation"""
if _dialect.name.lower() == GXSqlDialect.MSSQL:
standard_deviation = sa.func.stdev(column)
else:
standard_deviation = sa.func.stddev_samp(column)
return standard_deviation
@column_aggregate_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, **kwargs):
"""Spark Standard Deviation implementation"""
return F.stddev_samp(column)
@classmethod
@override
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""Returns a dictionary of given metric names and their corresponding configuration, specifying the metric
types and their respective domains""" # noqa: E501 # FIXME CoP
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if isinstance(execution_engine, SqlAlchemyExecutionEngine):
dependencies["column.mean"] = MetricConfiguration(
metric_name="column.mean",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=None,
)
dependencies[
f"column_values.null.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}"
] = MetricConfiguration(
metric_name=f"column_values.null.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=None,
)
return dependencies
| ColumnStandardDeviation |
python | tensorflow__tensorflow | tensorflow/python/ops/weak_tensor_math_ops_test.py | {
"start": 8045,
"end": 11124
} | class ____(test_util.TensorFlowTestCase):
def testReduceLogSumExp(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with test_util.use_gpu():
y_tf_np = math_ops.reduce_logsumexp(x_np)
y_np = np.log(np.sum(np.exp(x_np)))
self.assertAllClose(y_tf_np, y_np)
def testReductionIndices(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with test_util.use_gpu():
y_tf = math_ops.reduce_logsumexp(x_np, axis=[0])
y_np = np.log(np.sum(np.exp(x_np), axis=0))
self.assertShapeEqual(y_np, y_tf)
y_tf_np = self.evaluate(y_tf)
self.assertAllClose(y_tf_np, y_np)
def testReductionIndices2(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with test_util.use_gpu():
y_tf = math_ops.reduce_logsumexp(x_np, axis=0)
y_np = np.log(np.sum(np.exp(x_np), axis=0))
self.assertShapeEqual(y_np, y_tf)
y_tf_np = self.evaluate(y_tf)
self.assertAllClose(y_tf_np, y_np)
def testKeepDims(self):
for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype)
with test_util.use_gpu():
y_tf_np = math_ops.reduce_logsumexp(x_np, keepdims=True)
self.assertEqual(y_tf_np.shape.rank, x_np.ndim)
y_np = np.log(np.sum(np.exp(x_np), keepdims=True))
self.assertAllClose(y_tf_np, y_np)
def testOverflow(self):
x = [1000, 1001, 1002, 1003]
for dtype in [np.float32, np.double]:
x_np = np.array(x, dtype=dtype)
max_np = np.max(x_np)
with self.assertRaisesRegex(RuntimeWarning,
"overflow encountered in exp"):
out = np.log(np.sum(np.exp(x_np)))
if out == np.inf:
raise RuntimeWarning("overflow encountered in exp")
with test_util.use_gpu():
x_tf = _get_weak_tensor(x_np, shape=x_np.shape)
y_tf_np = math_ops.reduce_logsumexp(x_tf)
y_np = np.log(np.sum(np.exp(x_np - max_np))) + max_np
self.assertAllClose(y_tf_np, y_np)
def testUnderflow(self):
x = [-1000, -1001, -1002, -1003]
for dtype in [np.float32, np.double]:
x_np = np.array(x, dtype=dtype)
max_np = np.max(x_np)
with self.assertRaisesRegex(RuntimeWarning,
"divide by zero encountered in log"):
out = np.log(np.sum(np.exp(x_np)))
if out == -np.inf:
raise RuntimeWarning("divide by zero encountered in log")
with test_util.use_gpu():
x_tf = _get_weak_tensor(x_np, shape=x_np.shape)
y_tf_np = math_ops.reduce_logsumexp(x_tf)
y_np = np.log(np.sum(np.exp(x_np - max_np))) + max_np
self.assertAllClose(y_tf_np, y_np)
def testInfinity(self):
with test_util.use_gpu():
res = math_ops.reduce_logsumexp(-np.inf)
self.assertEqual(-np.inf, self.evaluate(res))
@test_util.run_all_in_graph_and_eager_modes
| LogSumExpTest |
python | PrefectHQ__prefect | src/prefect/events/actions.py | {
"start": 8458,
"end": 8591
} | class ____(AutomationAction):
"""Pauses a Work Queue"""
type: Literal["pause-automation"] = "pause-automation"
| PauseAutomation |
python | getsentry__sentry | tests/sentry/integrations/data_forwarding/amazon_sqs/test_forwarder.py | {
"start": 486,
"end": 5435
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.data_forwarder = DataForwarder.objects.create(
organization=self.organization,
provider=DataForwarderProviderSlug.SQS,
config={
"queue_url": "https://sqs.us-east-1.amazonaws.com/12345678/myqueue",
"region": "us-east-1",
"access_key": "access-key",
"secret_key": "secret-key",
},
is_enabled=True,
)
self.data_forwarder_project = DataForwarderProject.objects.create(
data_forwarder=self.data_forwarder,
project=self.project,
is_enabled=True,
)
self.forwarder = AmazonSQSForwarder()
@patch("boto3.client")
def test_simple_notification(self, mock_client):
event = self.store_event(
data={
"exception": {"type": "ValueError", "value": "foo bar"},
"user": {"id": "1", "email": "foo@example.com"},
"type": "error",
"metadata": {"type": "ValueError", "value": "foo bar"},
},
project_id=self.project.id,
)
self.forwarder.post_process(event, self.data_forwarder_project)
mock_client.assert_called_once_with(
service_name="sqs",
region_name="us-east-1",
aws_access_key_id="access-key",
aws_secret_access_key="secret-key",
)
mock_client.return_value.send_message.assert_called_once_with(
QueueUrl="https://sqs.us-east-1.amazonaws.com/12345678/myqueue",
MessageBody=orjson.dumps(serialize(event), option=orjson.OPT_UTC_Z).decode(),
)
@patch("boto3.client")
def test_token_error(self, mock_client):
mock_client.return_value.send_message.side_effect = ClientError(
{"Error": {"Code": "InvalidClientTokenId", "Message": "Invalid token"}},
"SendMessage",
)
event = self.store_event(
data={"exception": {"type": "ValueError", "value": "foo bar"}, "type": "error"},
project_id=self.project.id,
)
self.forwarder.post_process(event, self.data_forwarder_project)
@patch("boto3.client")
def test_message_group_error(self, mock_client):
mock_client.return_value.send_message.side_effect = ClientError(
{
"Error": {
"Code": "MissingParameter",
"Message": "The request must contain the parameter MessageGroupId.",
}
},
"SendMessage",
)
event = self.store_event(
data={"exception": {"type": "ValueError", "value": "foo bar"}, "type": "error"},
project_id=self.project.id,
)
self.forwarder.post_process(event, self.data_forwarder_project)
@patch("boto3.client")
def test_pass_message_group_id(self, mock_client):
self.data_forwarder.config["message_group_id"] = "my_group"
self.data_forwarder.save()
event = self.store_event(
data={
"exception": {"type": "ValueError", "value": "foo bar"},
"type": "error",
},
project_id=self.project.id,
)
self.forwarder.post_process(event, self.data_forwarder_project)
call_args = mock_client.return_value.send_message.call_args[1]
assert call_args["MessageGroupId"] == "my_group"
assert "MessageDeduplicationId" in call_args
@patch("boto3.client")
def test_use_s3_bucket(self, mock_client):
self.data_forwarder.config["s3_bucket"] = "my_bucket"
self.data_forwarder.save()
event = self.store_event(
data={
"exception": {"type": "ValueError", "value": "foo bar"},
"type": "error",
},
project_id=self.project.id,
)
self.forwarder.post_process(event, self.data_forwarder_project)
mock_client.return_value.put_object.assert_called_once()
put_object_call = mock_client.return_value.put_object.call_args[1]
assert put_object_call["Bucket"] == "my_bucket"
date = event.datetime.strftime("%Y-%m-%d")
expected_key = f"{event.project.slug}/{date}/{event.event_id}"
assert put_object_call["Key"] == expected_key
send_message_call = mock_client.return_value.send_message.call_args[1]
message_body = orjson.loads(send_message_call["MessageBody"])
assert "s3Url" in message_body
assert message_body["eventID"] == event.event_id
# Verify S3 URL uses correct format with s3.{region} not s3-{region}
expected_url = f"https://my_bucket.s3.us-east-1.amazonaws.com/{expected_key}"
assert message_body["s3Url"] == expected_url
assert "s3-" not in message_body["s3Url"]
| AmazonSQSDataForwarderTest |
python | ray-project__ray | python/ray/util/client/common.py | {
"start": 11511,
"end": 14580
} | class ____(ClientStub):
"""A stub created on the Ray Client to represent an actor class.
It is wrapped by ray.remote and can be executed on the cluster.
Args:
actor_cls: The actual class to execute remotely
_name: The original name of the class
_ref: The ClientObjectRef of the pickled `actor_cls`
"""
def __init__(self, actor_cls, options=None):
self.actor_cls = actor_cls
self._lock = threading.Lock()
self._name = actor_cls.__name__
self._init_signature = inspect.Signature(
parameters=extract_signature(actor_cls.__init__, ignore_first=True)
)
self._ref = None
self._client_side_ref = ClientSideRefID.generate_id()
self._options = validate_options(options)
def __call__(self, *args, **kwargs):
raise TypeError(
"Remote actor cannot be instantiated directly. "
f"Use {self._name}.remote() instead"
)
def _ensure_ref(self):
with self._lock:
if self._ref is None:
# As before, set the state of the reference to be an
# in-progress self reference value, which
# the encoding can detect and handle correctly.
self._ref = InProgressSentinel()
data = ray.worker._dumps_from_client(self.actor_cls)
# Check pickled size before sending it to server, which is more
# efficient and can be done synchronously inside remote() call.
check_oversized_function(data, self._name, "actor", None)
self._ref = ray.worker._put_pickled(
data, client_ref_id=self._client_side_ref.id
)
def remote(self, *args, **kwargs) -> "ClientActorHandle":
self._init_signature.bind(*args, **kwargs)
# Actually instantiate the actor
futures = ray.call_remote(self, *args, **kwargs)
assert len(futures) == 1
return ClientActorHandle(ClientActorRef(futures[0]), actor_class=self)
def options(self, **kwargs):
return ActorOptionWrapper(self, kwargs)
def _remote(self, args=None, kwargs=None, **option_args):
if args is None:
args = []
if kwargs is None:
kwargs = {}
return self.options(**option_args).remote(*args, **kwargs)
def __repr__(self):
return "ClientActorClass(%s, %s)" % (self._name, self._ref)
def __getattr__(self, key):
if key not in self.__dict__:
raise AttributeError("Not a class attribute")
raise NotImplementedError("static methods")
def _prepare_client_task(self) -> ray_client_pb2.ClientTask:
self._ensure_ref()
task = ray_client_pb2.ClientTask()
task.type = ray_client_pb2.ClientTask.ACTOR
task.name = self._name
task.payload_id = self._ref.id
set_task_options(task, self._options, "baseline_options")
return task
@staticmethod
def _num_returns() -> int:
return 1
| ClientActorClass |
python | pandas-dev__pandas | pandas/tests/arrays/sparse/test_reductions.py | {
"start": 5356,
"end": 7922
} | class ____:
@pytest.mark.parametrize(
"raw_data,max_expected,min_expected",
[
(np.arange(5.0), [4], [0]),
(-np.arange(5.0), [0], [-4]),
(np.array([0, 1, 2, np.nan, 4]), [4], [0]),
(np.array([np.nan] * 5), [np.nan], [np.nan]),
(np.array([]), [np.nan], [np.nan]),
],
)
def test_nan_fill_value(self, raw_data, max_expected, min_expected):
arr = SparseArray(raw_data)
max_result = arr.max()
min_result = arr.min()
assert max_result in max_expected
assert min_result in min_expected
max_result = arr.max(skipna=False)
min_result = arr.min(skipna=False)
if np.isnan(raw_data).any():
assert np.isnan(max_result)
assert np.isnan(min_result)
else:
assert max_result in max_expected
assert min_result in min_expected
@pytest.mark.parametrize(
"fill_value,max_expected,min_expected",
[
(100, 100, 0),
(-100, 1, -100),
],
)
def test_fill_value(self, fill_value, max_expected, min_expected):
arr = SparseArray(
np.array([fill_value, 0, 1]), dtype=SparseDtype("int", fill_value)
)
max_result = arr.max()
assert max_result == max_expected
min_result = arr.min()
assert min_result == min_expected
def test_only_fill_value(self):
fv = 100
arr = SparseArray(np.array([fv, fv, fv]), dtype=SparseDtype("int", fv))
assert len(arr._valid_sp_values) == 0
assert arr.max() == fv
assert arr.min() == fv
assert arr.max(skipna=False) == fv
assert arr.min(skipna=False) == fv
@pytest.mark.parametrize("func", ["min", "max"])
@pytest.mark.parametrize("data", [np.array([]), np.array([np.nan, np.nan])])
@pytest.mark.parametrize(
"dtype,expected",
[
(SparseDtype(np.float64, np.nan), np.nan),
(SparseDtype(np.float64, 5.0), np.nan),
(SparseDtype("datetime64[ns]", NaT), NaT),
(SparseDtype("datetime64[ns]", Timestamp("2018-05-05")), NaT),
],
)
def test_na_value_if_no_valid_values(self, func, data, dtype, expected):
arr = SparseArray(data, dtype=dtype)
result = getattr(arr, func)()
if expected is NaT:
# TODO: pin down whether we wrap datetime64("NaT")
assert result is NaT or np.isnat(result)
else:
assert np.isnan(result)
| TestMinMax |
python | numpy__numpy | numpy/_core/tests/test_indexing.py | {
"start": 29658,
"end": 48051
} | class ____:
"""
These tests use code to mimic the C-Code indexing for selection.
NOTE:
* This still lacks tests for complex item setting.
* If you change behavior of indexing, you might want to modify
these tests to try more combinations.
* Behavior was written to match numpy version 1.8. (though a
first version matched 1.7.)
* Only tuple indices are supported by the mimicking code.
(and tested as of writing this)
* Error types should match most of the time as long as there
is only one error. For multiple errors, what gets raised
will usually not be the same one. They are *not* tested.
Update 2016-11-30: It is probably not worth maintaining this test
indefinitely and it can be dropped if maintenance becomes a burden.
"""
def _create_array(self):
return np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
def _create_complex_indices(self):
return ['skip', Ellipsis,
0,
# Boolean indices, up to 3-d for some special cases of eating up
# dimensions, also need to test all False
np.array([True, False, False]),
np.array([[True, False], [False, True]]),
np.array([[[False, False], [False, False]]]),
# Some slices:
slice(-5, 5, 2),
slice(1, 1, 100),
slice(4, -1, -2),
slice(None, None, -3),
# Some Fancy indexes:
np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast
np.array([0, 1, -2]),
np.array([[2], [0], [1]]),
np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()),
np.array([2, -1], dtype=np.int8),
np.zeros([1] * 31, dtype=int), # trigger too large array.
np.array([0., 1.])] # invalid datatype
def _get_multi_index(self, arr, indices):
"""Mimic multi dimensional indexing.
Parameters
----------
arr : ndarray
Array to be indexed.
indices : tuple of index objects
Returns
-------
out : ndarray
An array equivalent to the indexing operation (but always a copy).
`arr[indices]` should be identical.
no_copy : bool
Whether the indexing operation requires a copy. If this is `True`,
`np.may_share_memory(arr, arr[indices])` should be `True` (with
some exceptions for scalars and possibly 0-d arrays).
Notes
-----
While the function may mostly match the errors of normal indexing this
is generally not the case.
"""
in_indices = list(indices)
indices = []
# if False, this is a fancy or boolean index
no_copy = True
# number of fancy/scalar indexes that are not consecutive
num_fancy = 0
# number of dimensions indexed by a "fancy" index
fancy_dim = 0
# NOTE: This is a funny twist (and probably OK to change).
# The boolean array has illegal indexes, but this is
# allowed if the broadcast fancy-indices are 0-sized.
# This variable is to catch that case.
error_unless_broadcast_to_empty = False
# We need to handle Ellipsis and make arrays from indices, also
# check if this is fancy indexing (set no_copy).
ndim = 0
ellipsis_pos = None # define here mostly to replace all but first.
for i, indx in enumerate(in_indices):
if indx is None:
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool:
no_copy = False
if indx.ndim == 0:
raise IndexError
# boolean indices can have higher dimensions
ndim += indx.ndim
fancy_dim += indx.ndim
continue
if indx is Ellipsis:
if ellipsis_pos is None:
ellipsis_pos = i
continue # do not increment ndim counter
raise IndexError
if isinstance(indx, slice):
ndim += 1
continue
if not isinstance(indx, np.ndarray):
# This could be open for changes in numpy.
# numpy should maybe raise an error if casting to intp
# is not safe. It rejects np.array([1., 2.]) but not
# [1., 2.] as index (same for ie. np.take).
# (Note the importance of empty lists if changing this here)
try:
indx = np.array(indx, dtype=np.intp)
except ValueError:
raise IndexError
in_indices[i] = indx
elif indx.dtype.kind not in 'bi':
raise IndexError('arrays used as indices must be of '
'integer (or boolean) type')
if indx.ndim != 0:
no_copy = False
ndim += 1
fancy_dim += 1
if arr.ndim - ndim < 0:
# we can't take more dimensions then we have, not even for 0-d
# arrays. since a[()] makes sense, but not a[(),]. We will
# raise an error later on, unless a broadcasting error occurs
# first.
raise IndexError
if ndim == 0 and None not in in_indices:
# Well we have no indexes or one Ellipsis. This is legal.
return arr.copy(), no_copy
if ellipsis_pos is not None:
in_indices[ellipsis_pos:ellipsis_pos + 1] = ([slice(None, None)] *
(arr.ndim - ndim))
for ax, indx in enumerate(in_indices):
if isinstance(indx, slice):
# convert to an index array
indx = np.arange(*indx.indices(arr.shape[ax]))
indices.append(['s', indx])
continue
elif indx is None:
# this is like taking a slice with one element from a new axis:
indices.append(['n', np.array([0], dtype=np.intp)])
arr = arr.reshape(arr.shape[:ax] + (1,) + arr.shape[ax:])
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool:
if indx.shape != arr.shape[ax:ax + indx.ndim]:
raise IndexError
try:
flat_indx = np.ravel_multi_index(np.nonzero(indx),
arr.shape[ax:ax + indx.ndim], mode='raise')
except Exception:
error_unless_broadcast_to_empty = True
# fill with 0s instead, and raise error later
flat_indx = np.array([0] * indx.sum(), dtype=np.intp)
# concatenate axis into a single one:
if indx.ndim != 0:
arr = arr.reshape(arr.shape[:ax]
+ (np.prod(arr.shape[ax:ax + indx.ndim]),)
+ arr.shape[ax + indx.ndim:])
indx = flat_indx
else:
# This could be changed, a 0-d boolean index can
# make sense (even outside the 0-d indexed array case)
# Note that originally this is could be interpreted as
# integer in the full integer special case.
raise IndexError
# If the index is a singleton, the bounds check is done
# before the broadcasting. This used to be different in <1.9
elif indx.ndim == 0 and not (
-arr.shape[ax] <= indx < arr.shape[ax]
):
raise IndexError
if indx.ndim == 0:
# The index is a scalar. This used to be two fold, but if
# fancy indexing was active, the check was done later,
# possibly after broadcasting it away (1.7. or earlier).
# Now it is always done.
if indx >= arr.shape[ax] or indx < - arr.shape[ax]:
raise IndexError
if (len(indices) > 0 and
indices[-1][0] == 'f' and
ax != ellipsis_pos):
# NOTE: There could still have been a 0-sized Ellipsis
# between them. Checked that with ellipsis_pos.
indices[-1].append(indx)
else:
# We have a fancy index that is not after an existing one.
# NOTE: A 0-d array triggers this as well, while one may
# expect it to not trigger it, since a scalar would not be
# considered fancy indexing.
num_fancy += 1
indices.append(['f', indx])
if num_fancy > 1 and not no_copy:
# We have to flush the fancy indexes left
new_indices = indices[:]
axes = list(range(arr.ndim))
fancy_axes = []
new_indices.insert(0, ['f'])
ni = 0
ai = 0
for indx in indices:
ni += 1
if indx[0] == 'f':
new_indices[0].extend(indx[1:])
del new_indices[ni]
ni -= 1
for ax in range(ai, ai + len(indx[1:])):
fancy_axes.append(ax)
axes.remove(ax)
ai += len(indx) - 1 # axis we are at
indices = new_indices
# and now we need to transpose arr:
arr = arr.transpose(*(fancy_axes + axes))
# We only have one 'f' index now and arr is transposed accordingly.
# Now handle newaxis by reshaping...
ax = 0
for indx in indices:
if indx[0] == 'f':
if len(indx) == 1:
continue
# First of all, reshape arr to combine fancy axes into one:
orig_shape = arr.shape
orig_slice = orig_shape[ax:ax + len(indx[1:])]
arr = arr.reshape(arr.shape[:ax]
+ (np.prod(orig_slice).astype(int),)
+ arr.shape[ax + len(indx[1:]):])
# Check if broadcasting works
res = np.broadcast(*indx[1:])
# unfortunately the indices might be out of bounds. So check
# that first, and use mode='wrap' then. However only if
# there are any indices...
if res.size != 0:
if error_unless_broadcast_to_empty:
raise IndexError
for _indx, _size in zip(indx[1:], orig_slice):
if _indx.size == 0:
continue
if np.any(_indx >= _size) or np.any(_indx < -_size):
raise IndexError
if len(indx[1:]) == len(orig_slice):
if np.prod(orig_slice) == 0:
# Work around for a crash or IndexError with 'wrap'
# in some 0-sized cases.
try:
mi = np.ravel_multi_index(indx[1:], orig_slice,
mode='raise')
except Exception:
# This happens with 0-sized orig_slice (sometimes?)
# here it is a ValueError, but indexing gives a:
raise IndexError('invalid index into 0-sized')
else:
mi = np.ravel_multi_index(indx[1:], orig_slice,
mode='wrap')
else:
# Maybe never happens...
raise ValueError
arr = arr.take(mi.ravel(), axis=ax)
try:
arr = arr.reshape(arr.shape[:ax]
+ mi.shape
+ arr.shape[ax + 1:])
except ValueError:
# too many dimensions, probably
raise IndexError
ax += mi.ndim
continue
# If we are here, we have a 1D array for take:
arr = arr.take(indx[1], axis=ax)
ax += 1
return arr, no_copy
def _check_multi_index(self, arr, index):
"""Check a multi index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be a reshaped arange.
index : tuple of indexing objects
Index being tested.
"""
# Test item getting
try:
mimic_get, no_copy = self._get_multi_index(arr, index)
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
assert_raises(type(e), arr.__getitem__, index)
assert_raises(type(e), arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _check_single_index(self, arr, index):
"""Check a single index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be an arange.
index : indexing object
Index being tested. Must be a single index and not a tuple
of indexing objects (see also `_check_multi_index`).
"""
try:
mimic_get, no_copy = self._get_multi_index(arr, (index,))
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
assert_raises(type(e), arr.__getitem__, index)
assert_raises(type(e), arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _compare_index_result(self, arr, index, mimic_get, no_copy):
"""Compare mimicked result to indexing result.
"""
arr = arr.copy()
if HAS_REFCOUNT:
startcount = sys.getrefcount(arr)
indexed_arr = arr[index]
assert_array_equal(indexed_arr, mimic_get)
# Check if we got a view, unless its a 0-sized or 0-d array.
# (then its not a view, and that does not matter)
if indexed_arr.size != 0 and indexed_arr.ndim != 0:
assert_(np.may_share_memory(indexed_arr, arr) == no_copy)
# Check reference count of the original array
if HAS_REFCOUNT:
if no_copy:
# refcount increases by one:
assert_equal(sys.getrefcount(arr), startcount + 1)
else:
assert_equal(sys.getrefcount(arr), startcount)
# Test non-broadcast setitem:
b = arr.copy()
b[index] = mimic_get + 1000
if b.size == 0:
return # nothing to compare here...
if no_copy and indexed_arr.ndim != 0:
# change indexed_arr in-place to manipulate original:
indexed_arr += 1000
assert_array_equal(arr, b)
return
# Use the fact that the array is originally an arange:
arr.flat[indexed_arr.ravel()] += 1000
assert_array_equal(arr, b)
def test_boolean(self):
a = np.array(5)
assert_equal(a[np.array(True)], 5)
a[np.array(True)] = 1
assert_equal(a, 1)
# NOTE: This is different from normal broadcasting, as
# arr[boolean_array] works like in a multi index. Which means
# it is aligned to the left. This is probably correct for
# consistency with arr[boolean_array,] also no broadcasting
# is done at all
a = self._create_array()
self._check_multi_index(
a, (np.zeros_like(a, dtype=bool),))
self._check_multi_index(
a, (np.zeros_like(a, dtype=bool)[..., 0],))
self._check_multi_index(
a, (np.zeros_like(a, dtype=bool)[None, ...],))
def test_multidim(self):
# Automatically test combinations with complex indexes on 2nd (or 1st)
# spot and the simple ones in one other spot.
a = self._create_array()
b = np.empty((3, 0, 5, 6))
complex_indices = self._create_complex_indices()
simple_indices = [Ellipsis, None, -1, [1], np.array([True]), 'skip']
fill_indices = [slice(None, None), 0]
with warnings.catch_warnings():
# This is so that np.array(True) is not accepted in a full integer
# index, when running the file separately.
warnings.filterwarnings('error', '', DeprecationWarning)
warnings.filterwarnings('error', '', VisibleDeprecationWarning)
def isskip(idx):
return isinstance(idx, str) and idx == "skip"
for simple_pos in [0, 2, 3]:
tocheck = [fill_indices, complex_indices,
fill_indices, fill_indices]
tocheck[simple_pos] = simple_indices
for index in product(*tocheck):
index = tuple(i for i in index if not isskip(i))
self._check_multi_index(a, index)
self._check_multi_index(b, index)
# Check very simple item getting:
self._check_multi_index(a, (0, 0, 0, 0))
self._check_multi_index(b, (0, 0, 0, 0))
# Also check (simple cases of) too many indices:
assert_raises(IndexError, a.__getitem__, (0, 0, 0, 0, 0))
assert_raises(IndexError, a.__setitem__, (0, 0, 0, 0, 0), 0)
assert_raises(IndexError, a.__getitem__, (0, 0, [1], 0, 0))
assert_raises(IndexError, a.__setitem__, (0, 0, [1], 0, 0), 0)
def test_1d(self):
a = np.arange(10)
complex_indices = self._create_complex_indices()
for index in complex_indices:
self._check_single_index(a, index)
| TestMultiIndexingAutomated |
python | langchain-ai__langchain | libs/core/langchain_core/language_models/base.py | {
"start": 3072,
"end": 11598
} | class ____(
RunnableSerializable[LanguageModelInput, LanguageModelOutputVar], ABC
):
"""Abstract base class for interfacing with language models.
All language model wrappers inherited from `BaseLanguageModel`.
"""
cache: BaseCache | bool | None = Field(default=None, exclude=True)
"""Whether to cache the response.
* If `True`, will use the global cache.
* If `False`, will not use a cache
* If `None`, will use the global cache if it's set, otherwise no cache.
* If instance of `BaseCache`, will use the provided cache.
Caching is not currently supported for streaming methods of models.
"""
verbose: bool = Field(default_factory=_get_verbosity, exclude=True, repr=False)
"""Whether to print out response text."""
callbacks: Callbacks = Field(default=None, exclude=True)
"""Callbacks to add to the run trace."""
tags: list[str] | None = Field(default=None, exclude=True)
"""Tags to add to the run trace."""
metadata: dict[str, Any] | None = Field(default=None, exclude=True)
"""Metadata to add to the run trace."""
custom_get_token_ids: Callable[[str], list[int]] | None = Field(
default=None, exclude=True
)
"""Optional encoder to use for counting tokens."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@field_validator("verbose", mode="before")
def set_verbose(cls, verbose: bool | None) -> bool: # noqa: FBT001
"""If verbose is `None`, set it.
This allows users to pass in `None` as verbose to access the global setting.
Args:
verbose: The verbosity setting to use.
Returns:
The verbosity setting to use.
"""
if verbose is None:
return _get_verbosity()
return verbose
@property
@override
def InputType(self) -> TypeAlias:
"""Get the input type for this `Runnable`."""
# This is a version of LanguageModelInput which replaces the abstract
# base class BaseMessage with a union of its subclasses, which makes
# for a much better schema.
return str | StringPromptValue | ChatPromptValueConcrete | list[AnyMessage]
@abstractmethod
def generate_prompt(
self,
prompts: list[PromptValue],
stop: list[str] | None = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
"""Pass a sequence of prompts to the model and return model generations.
This method should make use of batched calls for models that expose a batched
API.
Use this method when you want to:
1. Take advantage of batched calls,
2. Need more output from the model than just the top generated value,
3. Are building chains that are agnostic to the underlying language model
type (e.g., pure text completion models vs chat models).
Args:
prompts: List of `PromptValue` objects.
A `PromptValue` is an object that can be converted to match the format
of any language model (string for pure text generation models and
`BaseMessage` objects for chat models).
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
callbacks: `Callbacks` to pass through.
Used for executing additional functionality, such as logging or
streaming, throughout generation.
**kwargs: Arbitrary additional keyword arguments.
These are usually passed to the model provider API call.
Returns:
An `LLMResult`, which contains a list of candidate `Generation` objects for
each input prompt and additional model provider-specific output.
"""
@abstractmethod
async def agenerate_prompt(
self,
prompts: list[PromptValue],
stop: list[str] | None = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
"""Asynchronously pass a sequence of prompts and return model generations.
This method should make use of batched calls for models that expose a batched
API.
Use this method when you want to:
1. Take advantage of batched calls,
2. Need more output from the model than just the top generated value,
3. Are building chains that are agnostic to the underlying language model
type (e.g., pure text completion models vs chat models).
Args:
prompts: List of `PromptValue` objects.
A `PromptValue` is an object that can be converted to match the format
of any language model (string for pure text generation models and
`BaseMessage` objects for chat models).
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
callbacks: `Callbacks` to pass through.
Used for executing additional functionality, such as logging or
streaming, throughout generation.
**kwargs: Arbitrary additional keyword arguments.
These are usually passed to the model provider API call.
Returns:
An `LLMResult`, which contains a list of candidate `Generation` objects for
each input prompt and additional model provider-specific output.
"""
def with_structured_output(
self, schema: dict | type, **kwargs: Any
) -> Runnable[LanguageModelInput, dict | BaseModel]:
"""Not implemented on this class."""
# Implement this on child class if there is a way of steering the model to
# generate responses that match a given schema.
raise NotImplementedError
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return self.lc_attributes
def get_token_ids(self, text: str) -> list[int]:
"""Return the ordered IDs of the tokens in a text.
Args:
text: The string input to tokenize.
Returns:
A list of IDs corresponding to the tokens in the text, in order they occur
in the text.
"""
if self.custom_get_token_ids is not None:
return self.custom_get_token_ids(text)
return _get_token_ids_default_method(text)
def get_num_tokens(self, text: str) -> int:
"""Get the number of tokens present in the text.
Useful for checking if an input fits in a model's context window.
This should be overridden by model-specific implementations to provide accurate
token counts via model-specific tokenizers.
Args:
text: The string input to tokenize.
Returns:
The integer number of tokens in the text.
"""
return len(self.get_token_ids(text))
def get_num_tokens_from_messages(
self,
messages: list[BaseMessage],
tools: Sequence | None = None,
) -> int:
"""Get the number of tokens in the messages.
Useful for checking if an input fits in a model's context window.
This should be overridden by model-specific implementations to provide accurate
token counts via model-specific tokenizers.
!!! note
* The base implementation of `get_num_tokens_from_messages` ignores tool
schemas.
* The base implementation of `get_num_tokens_from_messages` adds additional
prefixes to messages in represent user roles, which will add to the
overall token count. Model-specific implementations may choose to
handle this differently.
Args:
messages: The message inputs to tokenize.
tools: If provided, sequence of dict, `BaseModel`, function, or
`BaseTool` objects to be converted to tool schemas.
Returns:
The sum of the number of tokens across the messages.
"""
if tools is not None:
warnings.warn(
"Counting tokens in tool schemas is not yet supported. Ignoring tools.",
stacklevel=2,
)
return sum(self.get_num_tokens(get_buffer_string([m])) for m in messages)
| BaseLanguageModel |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py | {
"start": 40806,
"end": 43014
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen2_5OmniToken2WavModel`].
It is used to instantiate the Qwen2.5-Omni-Token2Wav model which combines a Diffusion Transformer (DiT) for mel-spectrogram generation with a BigVGAN model for waveform synthesis. The configuration contains sub-configurations for both components.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
dit_config ([`DiT_Args`], *optional*):
Configuration class for the Diffusion Transformer (DiT) module responsible for generating mel-spectrograms.
bigvgan_config ([`BigVGAN_Args`], *optional*):
Configuration class for the BigVGAN module responsible for converting mel-spectrograms to waveforms.
Example:
```python
>>> from transformers import Qwen2_5OmniToken2WavModel, DiT_Args, BigVGAN_Args
>>> # Initialize DiT configuration
>>> dit_config = DiT_Args(
... dim=1024,
... depth=22,
... heads=16,
... ff_mult=2
... )
>>> # Initialize BigVGAN configuration
>>> bigvgan_config = BigVGAN_Args(
... mel_dim=80,
... upsample_rates=[5,3,2,2,2,2]
... )
>>> # Initialize main configuration
>>> config = Qwen2_5OmniToken2WavConfig(dit_config, bigvgan_config)
>>> # Initialize model with config
>>> model = Qwen2_5OmniToken2Wav(config)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "qwen2_5_omni_token2wav"
sub_configs = {
"dit_config": Qwen2_5OmniDiTConfig,
"bigvgan_config": Qwen2_5OmniBigVGANConfig,
}
def __init__(self, dit_config=None, bigvgan_config=None, **kwargs):
if dit_config is None:
dit_config = {}
if bigvgan_config is None:
bigvgan_config = {}
self.dit_config = Qwen2_5OmniDiTConfig(**dit_config)
self.bigvgan_config = Qwen2_5OmniBigVGANConfig(**bigvgan_config)
super().__init__(**kwargs)
| Qwen2_5OmniToken2WavConfig |
python | FactoryBoy__factory_boy | factory/builder.py | {
"start": 7841,
"end": 10091
} | class ____:
"""A factory instantiation step.
Attributes:
- parent: the parent StepBuilder, or None for the root step
- extras: the passed-in kwargs for this branch
- factory: the factory class being built
- strategy: the strategy to use
"""
def __init__(self, factory_meta, extras, strategy):
self.factory_meta = factory_meta
self.strategy = strategy
self.extras = extras
self.force_init_sequence = extras.pop('__sequence', None)
def build(self, parent_step=None, force_sequence=None):
"""Build a factory instance."""
# TODO: Handle "batch build" natively
pre, post = parse_declarations(
self.extras,
base_pre=self.factory_meta.pre_declarations,
base_post=self.factory_meta.post_declarations,
)
if force_sequence is not None:
sequence = force_sequence
elif self.force_init_sequence is not None:
sequence = self.force_init_sequence
else:
sequence = self.factory_meta.next_sequence()
step = BuildStep(
builder=self,
sequence=sequence,
parent_step=parent_step,
)
step.resolve(pre)
args, kwargs = self.factory_meta.prepare_arguments(step.attributes)
instance = self.factory_meta.instantiate(
step=step,
args=args,
kwargs=kwargs,
)
postgen_results = {}
for declaration_name in post.sorted():
declaration = post[declaration_name]
postgen_results[declaration_name] = declaration.declaration.evaluate_post(
instance=instance,
step=step,
overrides=declaration.context,
)
self.factory_meta.use_postgeneration_results(
instance=instance,
step=step,
results=postgen_results,
)
return instance
def recurse(self, factory_meta, extras):
"""Recurse into a sub-factory call."""
return self.__class__(factory_meta, extras, strategy=self.strategy)
def __repr__(self):
return f"<StepBuilder({self.factory_meta!r}, strategy={self.strategy!r})>"
| StepBuilder |
python | pytorch__pytorch | torch/_dynamo/variables/higher_order_ops.py | {
"start": 158569,
"end": 159572
} | class ____(WrapHigherOrderVariable):
supports_input_mutation = False
supports_aliasing = False
def python_type(self):
return type(self.value)
def _call_function(
self,
tx: "InstructionTranslator",
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
(
p_args,
p_kwargs,
example_value,
body_r,
_,
_,
body_graph_output_vts,
) = self.create_wrapped_node(
tx, args[0], args[1:], {}, self.value._name, subgraph_name="subgraph"
)
assert len(p_kwargs) == 0
p_kwargs = {key: value.as_proxy() for key, value in kwargs.items()}
return _call_function_with_auto_output_flattening(
tx,
self.value,
p_args,
p_kwargs,
example_value,
body_r,
body_graph_output_vts,
)
| BaseHOPVariable |
python | doocs__leetcode | solution/3600-3699/3631.Sort Threats by Severity and Exploitability/Solution.py | {
"start": 0,
"end": 174
} | class ____:
def sortThreats(self, threats: List[List[int]]) -> List[List[int]]:
threats.sort(key=lambda x: (-(x[1] * 2 + x[2]), x[0]))
return threats
| Solution |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_sagemaker_pipeline.py | {
"start": 2917,
"end": 4190
} | class ____:
@mock.patch.object(SageMakerHook, "stop_pipeline")
def test_execute(self, stop_pipeline):
op = SageMakerStopPipelineOperator(
task_id="test_sagemaker_operator", pipeline_exec_arn="pipeline_arn"
)
op.execute({})
stop_pipeline.assert_called_once_with(
pipeline_exec_arn="pipeline_arn",
fail_if_not_running=False,
)
@mock.patch.object(SageMakerHook, "stop_pipeline")
def test_defer(self, stop_mock: MagicMock):
stop_mock.return_value = "Stopping"
op = SageMakerStopPipelineOperator(
task_id="test_sagemaker_operator",
pipeline_exec_arn="my_pipeline_arn",
deferrable=True,
)
with pytest.raises(TaskDeferred) as defer:
op.execute({})
assert isinstance(defer.value.trigger, SageMakerPipelineTrigger)
assert defer.value.trigger.waiter_type == SageMakerPipelineTrigger.Type.STOPPED
def test_template_fields(self):
operator = SageMakerStopPipelineOperator(
task_id="test_sagemaker_operator",
pipeline_exec_arn="my_pipeline_arn",
deferrable=True,
)
validate_template_fields(operator)
| TestSageMakerStopPipelineOperator |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/collections.py | {
"start": 47821,
"end": 47913
} | class ____(List[_T]):
"""An instrumented version of the built-in list."""
| InstrumentedList |
python | Textualize__textual | tests/css/test_initial.py | {
"start": 436,
"end": 2380
} | class ____(CustomWidget2):
pass
async def test_initial_default():
class InitialApp(App):
def compose(self) -> ComposeResult:
yield Base(id="base")
yield CustomWidget1(id="custom1")
yield CustomWidget2(id="custom2")
app = InitialApp()
async with app.run_test():
base = app.query_one("#base", Base)
custom1 = app.query_one("#custom1", CustomWidget1)
custom2 = app.query_one("#custom2", CustomWidget2)
# No background set on base
default_background = base.styles.background
assert default_background == Color.parse("rgba(0,0,0,0)")
# Customized background value, should be red
assert custom1.styles.background == Color.parse("red")
# Background has default value
assert custom2.styles.background == default_background
async def test_initial():
class InitialApp(App):
CSS = """
CustomWidget1 {
color: red;
}
CustomWidget2 {
color: initial;
}
CustomWidget3 {
color: blue;
}
"""
def compose(self) -> ComposeResult:
yield Base(id="base")
yield CustomWidget1(id="custom1")
yield CustomWidget2(id="custom2")
yield CustomWidget3(id="custom3")
app = InitialApp()
async with app.run_test():
base = app.query_one("#base")
custom1 = app.query_one("#custom1")
custom2 = app.query_one("#custom2")
custom3 = app.query_one("#custom3")
# Default color
assert base.styles.color == Color.parse("magenta")
# Explicitly set to red
assert custom1.styles.color == Color.parse("red")
# Set to initial, should be same as base
assert custom2.styles.color == Color.parse("magenta")
# Set to blue
assert custom3.styles.color == Color.parse("blue")
| CustomWidget3 |
python | fastai__fastai | fastai/test_utils.py | {
"start": 1778,
"end": 6093
} | class ____(Callback):
"Callback that prints the name of each event called"
def __call__(self, event_name):
print(event_name)
super().__call__(event_name)
# %% ../nbs/97_test_utils.ipynb 9
def get_env(name):
"Return env var value if it's defined and not an empty string, or return Unknown"
res = os.environ.get(name,'')
return res if len(res) else "Unknown"
# %% ../nbs/97_test_utils.ipynb 10
def try_import(module):
"Try to import `module`. Returns module's object on success, None on failure"
try: return importlib.import_module(module)
except: return None
# %% ../nbs/97_test_utils.ipynb 11
def nvidia_smi(cmd = "nvidia-smi"):
try: res = run(cmd)
except OSError as e: return None
return res
# %% ../nbs/97_test_utils.ipynb 13
def nvidia_mem():
try: mem = run("nvidia-smi --query-gpu=memory.total --format=csv,nounits,noheader")
except: return None
return mem.strip().split('\n')
# %% ../nbs/97_test_utils.ipynb 15
def show_install(show_nvidia_smi:bool=False):
"Print user's setup information"
import fastai, platform, fastprogress, fastcore
rep = []
opt_mods = []
rep.append(["=== Software ===", None])
rep.append(["python", platform.python_version()])
rep.append(["fastai", fastai.__version__])
rep.append(["fastcore", fastcore.__version__])
rep.append(["fastprogress", fastprogress.__version__])
rep.append(["torch", torch.__version__])
# nvidia-smi
smi = nvidia_smi()
if smi:
match = re.findall(r'Driver Version: +(\d+\.\d+)', smi)
if match: rep.append(["nvidia driver", match[0]])
available = "available" if torch.cuda.is_available() else "**Not available** "
rep.append(["torch cuda", f"{torch.version.cuda} / is {available}"])
# no point reporting on cudnn if cuda is not available, as it
# seems to be enabled at times even on cpu-only setups
if torch.cuda.is_available():
enabled = "enabled" if torch.backends.cudnn.enabled else "**Not enabled** "
rep.append(["torch cudnn", f"{torch.backends.cudnn.version()} / is {enabled}"])
rep.append(["\n=== Hardware ===", None])
gpu_total_mem = []
nvidia_gpu_cnt = 0
if smi:
mem = nvidia_mem()
nvidia_gpu_cnt = len(ifnone(mem, []))
if nvidia_gpu_cnt: rep.append(["nvidia gpus", nvidia_gpu_cnt])
torch_gpu_cnt = torch.cuda.device_count()
if torch_gpu_cnt:
rep.append(["torch devices", torch_gpu_cnt])
# information for each gpu
for i in range(torch_gpu_cnt):
rep.append([f" - gpu{i}", (f"{gpu_total_mem[i]}MB | " if gpu_total_mem else "") + torch.cuda.get_device_name(i)])
else:
if nvidia_gpu_cnt:
rep.append([f"Have {nvidia_gpu_cnt} GPU(s), but torch can't use them (check nvidia driver)", None])
else:
rep.append([f"No GPUs available", None])
rep.append(["\n=== Environment ===", None])
rep.append(["platform", platform.platform()])
if platform.system() == 'Linux':
distro = try_import('distro')
if distro:
# full distro info
rep.append(["distro", ' '.join(distro.linux_distribution())])
else:
opt_mods.append('distro');
# partial distro info
rep.append(["distro", platform.uname().version])
rep.append(["conda env", get_env('CONDA_DEFAULT_ENV')])
rep.append(["python", sys.executable])
rep.append(["sys.path", "\n".join(sys.path)])
print("\n\n```text")
keylen = max([len(e[0]) for e in rep if e[1] is not None])
for e in rep:
print(f"{e[0]:{keylen}}", (f": {e[1]}" if e[1] is not None else ""))
if smi:
if show_nvidia_smi: print(f"\n{smi}")
else:
if torch_gpu_cnt: print("no nvidia-smi is found")
else: print("no supported gpus found on this system")
print("```\n")
print("Please make sure to include opening/closing ``` when you paste into forums/github to make the reports appear formatted as code sections.\n")
if opt_mods:
print("Optional package(s) to enhance the diagnostics can be installed with:")
print(f"pip install {' '.join(opt_mods)}")
print("Once installed, re-run this utility to get the additional information")
| VerboseCallback |
python | django__django | tests/i18n/test_compilation.py | {
"start": 12531,
"end": 12744
} | class ____(MessageCompilationTests):
LOCALE = "ru"
PROJECT_MO_FILE = "locale/%s/LC_MESSAGES/django.mo" % LOCALE
APP_MO_FILE = "app_with_locale/locale/%s/LC_MESSAGES/django.mo" % LOCALE
| ProjectAndAppTests |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 674533,
"end": 674929
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("IpAllowListEntry", graphql_name="node")
"""The item at the end of the edge."""
| IpAllowListEntryEdge |
python | catalyst-team__catalyst | tests/benchmarks/test_benchmark.py | {
"start": 624,
"end": 7999
} | class ____(dl.Runner):
def get_loaders(self) -> "OrderedDict[str, DataLoader]":
return {
"train": DataLoader(
MNIST(DATA_ROOT, train=True, download=True),
batch_size=128,
num_workers=1,
)
}
def get_model(self) -> TorchModel:
return nn.Sequential(
nn.Flatten(),
nn.Linear(in_features=28 * 28, out_features=128),
nn.BatchNorm1d(128),
nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(in_features=128, out_features=10),
)
def get_criterion(self) -> TorchCriterion:
return nn.CrossEntropyLoss()
def get_optimizer(self, model: TorchModel) -> TorchOptimizer:
return torch.optim.Adam(model.parameters(), lr=0.02)
def handle_batch(self, batch: Mapping[str, Any]) -> None:
raise NotImplementedError()
def _get_used_memory():
if torch.cuda.is_available():
torch.cuda.synchronize()
used_memory = torch.cuda.max_memory_allocated()
else:
used_memory = np.nan
return used_memory
def run_pytorch(
irunner: dl.IRunner, idx: int, device: str = "cuda", num_epochs: int = 10
):
device = torch.device(device)
utils.set_global_seed(idx)
loader = irunner.get_loaders()["train"]
model = irunner.get_model().to(device)
criterion = irunner.get_criterion()
optimizer = irunner.get_optimizer(model)
epoch_scores = []
epoch_losses = []
for i in range(num_epochs):
epoch_score = 0
epoch_loss = 0
for features, targets in loader:
features = features.to(device)
targets = targets.to(device)
logits = model(features)
loss = criterion(logits, targets)
epoch_loss += loss.item()
pred = logits.argmax(dim=1, keepdim=True)
epoch_score += pred.eq(targets.view_as(pred)).sum().item()
self.engine.backward(loss)
optimizer.step()
optimizer.zero_grad()
epoch_score /= len(loader.dataset)
epoch_loss /= len(loader)
print(f"Epoch {i} \t Score: {epoch_score} \t Loss: {epoch_loss}")
epoch_scores.append(epoch_score)
epoch_losses.append(epoch_loss)
return epoch_scores[-1], epoch_losses[-1], _get_used_memory()
def run_catalyst(
irunner: dl.IRunner, idx: int, device: str = "cuda", num_epochs: int = 10
):
utils.set_global_seed(idx)
loader = irunner.get_loaders()["train"]
model = irunner.get_model().to(device)
criterion = irunner.get_criterion()
optimizer = irunner.get_optimizer(model)
runner = dl.SupervisedRunner()
runner.train(
engine=dl.GPUEngine() if device == "cuda" else dl.CPUEngine(),
model=model,
criterion=criterion,
optimizer=optimizer,
loaders={"train": loader},
num_epochs=num_epochs,
verbose=False,
callbacks=[
dl.AccuracyCallback(
input_key=runner._output_key,
target_key=runner._target_key,
topk=(1,),
)
],
)
return (
runner.epoch_metrics["train"]["accuracy01"],
runner.epoch_metrics["train"]["loss"],
_get_used_memory(),
)
def score_runs(
irunner: dl.IRunner,
mode: RunMode,
device: str,
num_runs: int = 10,
num_epochs: int = 10,
):
hist_scores = []
hist_losses = []
hist_time = []
hist_memory = []
torch.backends.cudnn.deterministic = True
for i in tqdm(range(num_runs), desc=f"{mode} with {irunner.__class__.__name__}"):
gc.collect()
if device == "cuda":
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_cached()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_accumulated_memory_stats()
torch.cuda.reset_peak_memory_stats()
time.sleep(1)
time_start = time.perf_counter()
_run_fn = run_catalyst if mode == RunMode.catalyst else run_pytorch
final_score, final_loss, used_memory = _run_fn(
irunner, idx=i, device=device, num_epochs=num_epochs
)
time_end = time.perf_counter()
hist_scores.append(final_score)
hist_losses.append(final_loss)
hist_time.append(time_end - time_start)
hist_memory.append(used_memory)
return {
"scores": hist_scores,
"losses": hist_losses,
"time": hist_time,
"memory": hist_memory,
}
def assert_relative_equal(
catalyst_values, torch_values, max_diff: float, norm: float = 1
):
diffs = np.asarray(catalyst_values) - np.mean(torch_values)
diffs = diffs / norm
diffs = diffs / np.mean(torch_values)
assert (
np.mean(diffs) < max_diff
), f"Catalyst diff {diffs} worse than PyTorch (threshold {max_diff})"
def assert_absolute_equal(
catalyst_values, torch_values, max_diff: float, norm: float = 1
):
diffs = np.asarray(catalyst_values) - np.mean(torch_values)
diffs = diffs / norm
assert (
np.mean(diffs) < max_diff
), f"Catalyst {diffs} worse than PyTorch (threshold {max_diff})"
BENCHMARKS = [(TestMnistRunner, 4, "cpu", 3, 2, 0.15, 0.001)]
if torch.cuda.is_available():
BENCHMARKS.append((TestMnistRunner, 4, "cuda", 3, 2, 0.15, 0.001))
@pytest.mark.parametrize(
"irunner,num_epochs,device,num_runs,precision,max_diff_time,max_diff_memory",
BENCHMARKS,
)
@pytest.mark.skipif(~IS_BENCHMARK_REQUIRED, reason="Benchmark is not required.")
def test_benchmark(
tmpdir,
irunner: dl.IRunner,
device: str,
num_epochs: int,
num_runs: int,
precision: int,
max_diff_time: float,
max_diff_memory: float,
):
irunner = irunner()
# prepare data
_ = irunner.get_loaders()
# score runs
pytorch = score_runs(
irunner,
mode=RunMode.pytorch,
device=device,
num_epochs=num_epochs,
num_runs=num_runs,
)
catalyst = score_runs(
irunner,
mode=RunMode.catalyst,
device=device,
num_epochs=num_epochs,
num_runs=num_runs,
)
# check performance
print(
"Scores are for... \n "
f"PyTorch: {pytorch['scores']} \n Catalyst: {catalyst['scores']}"
)
for catalyst_, pytorch_ in zip(catalyst["scores"], pytorch["scores"]):
np.testing.assert_almost_equal(catalyst_, pytorch_, precision)
# check loss
print(
"Losses are for... \n "
f"PyTorch: {pytorch['losses']} \n Catalyst: {catalyst['losses']}"
)
for catalyst_, pytorch_ in zip(catalyst["losses"], pytorch["losses"]):
np.testing.assert_almost_equal(catalyst_, pytorch_, precision)
# check time
print(
f"Times are for... \n PyTorch: {pytorch['time']} \n Catalyst: {catalyst['time']}"
)
assert_absolute_equal(
catalyst["time"],
pytorch["time"],
norm=num_epochs,
max_diff=max_diff_time,
)
# check memory
if torch.cuda.is_available():
print(
"Memory usages are for... \n "
f"PyTorch: {pytorch['memory']} \n Catalyst: {catalyst['memory']}"
)
assert_relative_equal(
catalyst["memory"], pytorch["memory"], max_diff=max_diff_memory
)
| TestMnistRunner |
python | huggingface__transformers | src/transformers/models/dpt/image_processing_dpt.py | {
"start": 1749,
"end": 4183
} | class ____(ImagesKwargs, total=False):
"""
ensure_multiple_of (`int`, *optional*, defaults to 1):
If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Can be overridden
by `ensure_multiple_of` in `preprocess`.
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. Can
be overridden by `keep_aspect_ratio` in `preprocess`.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
"""
ensure_multiple_of: int
size_divisor: int
keep_aspect_ratio: bool
do_reduce_labels: bool
def get_resize_output_image_size(
input_image: np.ndarray,
output_size: Union[int, Iterable[int]],
keep_aspect_ratio: bool,
multiple: int,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> tuple[int, int]:
def constrain_to_multiple_of(val, multiple, min_val=0, max_val=None):
x = round(val / multiple) * multiple
if max_val is not None and x > max_val:
x = math.floor(val / multiple) * multiple
if x < min_val:
x = math.ceil(val / multiple) * multiple
return x
output_size = (output_size, output_size) if isinstance(output_size, int) else output_size
input_height, input_width = get_image_size(input_image, input_data_format)
output_height, output_width = output_size
# determine new height and width
scale_height = output_height / input_height
scale_width = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width) < abs(1 - scale_height):
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
new_height = constrain_to_multiple_of(scale_height * input_height, multiple=multiple)
new_width = constrain_to_multiple_of(scale_width * input_width, multiple=multiple)
return (new_height, new_width)
@requires(backends=("vision",))
| DPTImageProcessorKwargs |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-ads/unit_tests/integrations/ad_responses/records/profiles_record_builder.py | {
"start": 156,
"end": 363
} | class ____(RecordBuilder):
@classmethod
def profiles_record(cls) -> "ProfilesRecordBuilder":
return cls(find_template("profiles", __file__)[0], FieldPath("profileId"), None)
| ProfilesRecordBuilder |
python | pytorch__pytorch | test/package/package_a/subpackage.py | {
"start": 34,
"end": 119
} | class ____:
pass
def leaf_function(a, b):
return a + b
| PackageASubpackageObject |
python | ipython__ipython | IPython/core/prefilter.py | {
"start": 15221,
"end": 15537
} | class ____(PrefilterChecker):
priority = Integer(250).tag(config=True)
def check(self, line_info):
obj = self.shell.user_ns.get(line_info.ifun)
if isinstance(obj, Macro):
return self.prefilter_manager.get_handler_by_name('macro')
else:
return None
| MacroChecker |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 5309,
"end": 5470
} | class ____(OpcodeWithArg):
_FLAGS = HAS_ARGUMENT
__slots__ = ()
def __str__(self):
return self.basic_str() + " " + str(self.arg.value)
| LOAD_FOLDED_CONST |
python | kamyu104__LeetCode-Solutions | Python/counting-elements.py | {
"start": 29,
"end": 272
} | class ____(object):
def countElements(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
lookup = set(arr)
return sum(1 for x in arr if x+1 in lookup)
# Time: O(nlogn)
# Space: O(1)
| Solution |
python | streamlit__streamlit | lib/tests/streamlit/web/server/oidc_mixin_test.py | {
"start": 884,
"end": 5579
} | class ____(unittest.TestCase):
@patch(
"streamlit.web.server.oidc_mixin.OAuth2Mixin.load_server_metadata",
MagicMock(
return_value={
"authorization_endpoint": "https://accounts.google.com/o/oauth2/v2/auth",
}
),
)
def test_load_server_metadata_empty(self):
"""Test load_server_metadata with empty dict."""
app = TornadoOAuth2App(MagicMock())
result = app.load_server_metadata()
assert result == {
"authorization_endpoint": "https://accounts.google.com/o/oauth2/v2/auth"
}
assert app.client_kwargs == {}
@patch(
"streamlit.web.server.oidc_mixin.OAuth2Mixin.load_server_metadata",
MagicMock(
return_value={
"authorization_endpoint": "https://accounts.google.com/o/oauth2/v2/auth",
"code_challenge_methods_supported": ["plain", "S256"],
}
),
)
def test_load_server_metadata_s256_plain(self):
"""Test load_server_metadata with S256 and plain code challenge methods."""
app = TornadoOAuth2App(MagicMock())
result = app.load_server_metadata()
assert result == {
"authorization_endpoint": "https://accounts.google.com/o/oauth2/v2/auth",
"code_challenge_methods_supported": ["plain", "S256"],
}
assert app.client_kwargs == {"code_challenge_method": "S256"}
def test_authorize_redirect(self):
"""Test authorize_redirect."""
app = TornadoOAuth2App(MagicMock())
app.create_authorization_url = MagicMock(
return_value={"url": "https://example.com", "state": "some_state"}
)
request_handler = MagicMock()
app.authorize_redirect(request_handler)
request_handler.redirect.assert_called_once_with(
"https://example.com", status=302
)
app.framework.set_state_data.assert_called_once_with(
None, "some_state", {"redirect_uri": None, "url": "https://example.com"}
)
def test_authorize_redirect_error_no_state(self):
"""Test authorize_redirect without state raises error."""
app = TornadoOAuth2App(MagicMock())
app.create_authorization_url = MagicMock(
return_value={"url": "https://example.com"}
)
request_handler = MagicMock()
with pytest.raises(RuntimeError) as e:
app.authorize_redirect(request_handler)
assert e.match("Missing state value")
def test_authorize_access_token_error(self):
"""Test authorize_access_token with error."""
app = TornadoOAuth2App(MagicMock())
with pytest.raises(OAuthError) as e:
app.authorize_access_token(
MagicMock(
get_argument=lambda x, *args: "some_error" if x == "error" else None
)
)
assert e.match("some_error")
@patch(
"streamlit.web.server.oidc_mixin.TornadoOAuth2App.client_cls.request",
MagicMock(
return_value=MagicMock(
json=MagicMock(
return_value={
"access_token": "payload",
"id_token": "id_token_payload",
}
),
status_code=200,
)
),
)
def test_authorize_access_token_success(self):
"""Test authorize_access_token with success."""
app = TornadoOAuth2App(
MagicMock(
get_state_data=MagicMock(
return_value={
"redirect_uri": "http://localhost:8501/oauth2callback",
"nonce": "some_nonce",
}
)
)
)
app.parse_id_token = MagicMock(
return_value={"email": "authed_user@example.com"}
)
def get_argument_mock(name: str, *args):
if name == "code":
return "some_code"
if name == "state":
return "some_state"
return None
token = app.authorize_access_token(MagicMock(get_argument=get_argument_mock))
app.parse_id_token.assert_called_once_with(
{
"access_token": "payload",
"id_token": "id_token_payload",
},
nonce="some_nonce",
claims_options=None,
)
assert token == {
"access_token": "payload",
"id_token": "id_token_payload",
"userinfo": {
"email": "authed_user@example.com",
},
}
| TornadoOAuth2AppTest |
python | getsentry__sentry | src/sentry/users/api/endpoints/user_emails.py | {
"start": 1072,
"end": 2565
} | class ____(serializers.Serializer[UserEmail]):
email = AllowedEmailField(required=True, help_text="The email address to add/remove.")
def add_email_signed(email: str, user: User) -> None:
"""New path for adding email - uses signed URLs"""
EMAIL_CONFIRMATION_SALT = options.get("user-settings.signed-url-confirmation-emails-salt")
if email is None:
raise InvalidEmailError
if UserEmail.objects.filter(user=user, email__iexact=email.lower()).exists():
raise DuplicateEmailError
# Generate signed data for verification URL
signed_data = sign(
user_id=user.id,
email=email,
salt=EMAIL_CONFIRMATION_SALT,
)
# Send verification email with signed URL
user.send_signed_url_confirm_email_singular(email, signed_data)
def add_email(email: str, user: User) -> UserEmail:
"""
Adds an email to user account
Can be either primary or secondary
"""
# Bad email
if email is None:
raise InvalidEmailError
if UserEmail.objects.filter(user=user, email__iexact=email.lower()).exists():
raise DuplicateEmailError
try:
with transaction.atomic(using=router.db_for_write(UserEmail)):
new_email = UserEmail.objects.create(user=user, email=email)
except IntegrityError:
raise DuplicateEmailError
new_email.set_hash()
new_email.save()
user.send_confirm_email_singular(new_email)
return new_email
@control_silo_endpoint
| EmailValidator |
python | tensorflow__tensorflow | tensorflow/python/autograph/converters/lists.py | {
"start": 1735,
"end": 1811
} | class ____(object):
def __init__(self):
self.pop_uses = None
| _Statement |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVar9.py | {
"start": 2664,
"end": 3135
} | class ____(Generic[_T2]):
def __init__(self, x: _T2 = ...) -> None: ...
# This should generate an error because _T appears only once.
def f17(
arg, # type: _T
): # type: (...) -> int
return 1
def f18(
arg, # type: _T
): # type: (...) -> _T
return arg
# This should generate an error because _T appears only once.
def f19(
arg,
): # type: (_T) -> int
return 1
def f20(
arg, # type: _T
): # type: (...) -> _T
return arg
| ClassB |
python | numpy__numpy | numpy/linalg/tests/test_linalg.py | {
"start": 56891,
"end": 57981
} | class ____:
def test_longdouble_norm(self):
# Non-regression test: p-norm of longdouble would previously raise
# UnboundLocalError.
x = np.arange(10, dtype=np.longdouble)
old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2)
def test_intmin(self):
# Non-regression test: p-norm of signed integer would previously do
# float cast and abs in the wrong order.
x = np.array([-2 ** 31], dtype=np.int32)
old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5)
def test_complex_high_ord(self):
# gh-4156
d = np.empty((2,), dtype=np.clongdouble)
d[0] = 6 + 7j
d[1] = -6 + 7j
res = 11.615898132184
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10)
d = d.astype(np.complex128)
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9)
d = d.astype(np.complex64)
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5)
# Separate definitions so we can use them for matrix tests.
| TestNorm_NonSystematic |
python | tensorflow__tensorflow | tensorflow/python/ops/linalg/linear_operator_test_util.py | {
"start": 2577,
"end": 38408
} | class ____(test.TestCase, metaclass=abc.ABCMeta):
"""Tests for derived classes.
Subclasses should implement every abstractmethod, and this will enable all
test methods to work.
"""
# Absolute/relative tolerance for tests.
_atol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-12,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-12
}
_rtol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-12,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-12
}
def assertAC(self, x, y, check_dtype=False):
"""Derived classes can set _atol, _rtol to get different tolerance."""
dtype = dtypes.as_dtype(x.dtype)
atol = self._atol[dtype]
rtol = self._rtol[dtype]
self.assertAllClose(x, y, atol=atol, rtol=rtol)
if check_dtype:
self.assertDTypeEqual(x, y.dtype)
@staticmethod
def adjoint_options():
return [False, True]
@staticmethod
def adjoint_arg_options():
return [False, True]
@staticmethod
def dtypes_to_test():
# TODO(langmore) Test tf.float16 once tf.linalg.solve works in 16bit.
return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
@staticmethod
def use_placeholder_options():
return [False, True]
@staticmethod
def use_blockwise_arg():
return False
@staticmethod
def operator_shapes_infos():
"""Returns list of OperatorShapesInfo, encapsulating the shape to test."""
raise NotImplementedError("operator_shapes_infos has not been implemented.")
@abc.abstractmethod
def operator_and_matrix(
self, shapes_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
"""Build a batch matrix and an Operator that should have similar behavior.
Every operator acts like a (batch) matrix. This method returns both
together, and is used by tests.
Args:
shapes_info: `OperatorShapesInfo`, encoding shape information about the
operator.
dtype: Numpy dtype. Data type of returned array/operator.
use_placeholder: Python bool. If True, initialize the operator with a
placeholder of undefined shape and correct dtype.
ensure_self_adjoint_and_pd: If `True`,
construct this operator to be Hermitian Positive Definite, as well
as ensuring the hints `is_positive_definite` and `is_self_adjoint`
are set.
This is useful for testing methods such as `cholesky`.
Returns:
operator: `LinearOperator` subclass instance.
mat: `Tensor` representing operator.
"""
# Create a matrix as a numpy array with desired shape/dtype.
# Create a LinearOperator that should have the same behavior as the matrix.
raise NotImplementedError("Not implemented yet.")
@abc.abstractmethod
def make_rhs(self, operator, adjoint, with_batch=True):
"""Make a rhs appropriate for calling operator.solve(rhs).
Args:
operator: A `LinearOperator`
adjoint: Python `bool`. If `True`, we are making a 'rhs' value for the
adjoint operator.
with_batch: Python `bool`. If `True`, create `rhs` with the same batch
shape as operator, and otherwise create a matrix without any batch
shape.
Returns:
A `Tensor`
"""
raise NotImplementedError("make_rhs is not defined.")
@abc.abstractmethod
def make_x(self, operator, adjoint, with_batch=True):
"""Make an 'x' appropriate for calling operator.matmul(x).
Args:
operator: A `LinearOperator`
adjoint: Python `bool`. If `True`, we are making an 'x' value for the
adjoint operator.
with_batch: Python `bool`. If `True`, create `x` with the same batch shape
as operator, and otherwise create a matrix without any batch shape.
Returns:
A `Tensor`
"""
raise NotImplementedError("make_x is not defined.")
@staticmethod
def skip_these_tests():
"""List of test names to skip."""
# Subclasses should over-ride if they want to skip some tests.
# To skip "test_foo", add "foo" to this list.
return []
@staticmethod
def optional_tests():
"""List of optional test names to run."""
# Subclasses should over-ride if they want to add optional tests.
# To add "test_foo", add "foo" to this list.
return []
def assertRaisesError(self, msg):
"""assertRaisesRegexp or OpError, depending on context.executing_eagerly."""
if context.executing_eagerly():
return self.assertRaisesRegex(Exception, msg)
return self.assertRaisesOpError(msg)
def check_convert_variables_to_tensors(self, operator):
"""Checks that internal Variables are correctly converted to Tensors."""
self.assertIsInstance(operator, composite_tensor.CompositeTensor)
tensor_operator = composite_tensor.convert_variables_to_tensors(operator)
self.assertIs(type(operator), type(tensor_operator))
self.assertEmpty(tensor_operator.variables)
self._check_tensors_equal_variables(operator, tensor_operator)
def _check_tensors_equal_variables(self, obj, tensor_obj):
"""Checks that Variables in `obj` have equivalent Tensors in `tensor_obj."""
if isinstance(obj, variables.Variable):
self.assertAllClose(ops.convert_to_tensor(obj),
ops.convert_to_tensor(tensor_obj))
elif isinstance(obj, composite_tensor.CompositeTensor):
params = getattr(obj, "parameters", {})
tensor_params = getattr(tensor_obj, "parameters", {})
self.assertAllEqual(params.keys(), tensor_params.keys())
self._check_tensors_equal_variables(params, tensor_params)
elif nest.is_mapping(obj):
for k, v in obj.items():
self._check_tensors_equal_variables(v, tensor_obj[k])
elif nest.is_nested(obj):
for x, y in zip(obj, tensor_obj):
self._check_tensors_equal_variables(x, y)
else:
# We only check Tensor, CompositeTensor, and nested structure parameters.
pass
def check_tape_safe(self, operator, skip_options=None):
"""Check gradients are not None w.r.t. operator.variables.
Meant to be called from the derived class.
This ensures grads are not w.r.t every variable in operator.variables. If
more fine-grained testing is needed, a custom test should be written.
Args:
operator: LinearOperator. Exact checks done will depend on hints.
skip_options: Optional list of CheckTapeSafeSkipOptions.
Makes this test skip particular checks.
"""
skip_options = skip_options or []
if not operator.variables:
raise AssertionError("`operator.variables` was empty")
def _assert_not_none(iterable):
for item in iterable:
self.assertIsNotNone(item)
# Tape tests that can be run on every operator below.
with backprop.GradientTape() as tape:
grad = tape.gradient(operator.to_dense(), operator.variables)
_assert_not_none(grad)
with backprop.GradientTape() as tape:
var_grad = tape.gradient(operator, operator.variables)
_assert_not_none(var_grad)
nest.assert_same_structure(var_grad, grad)
with backprop.GradientTape() as tape:
_assert_not_none(
tape.gradient(operator.adjoint().to_dense(), operator.variables))
x = math_ops.cast(
array_ops.ones(shape=operator.H.shape_tensor()[:-1]), operator.dtype)
with backprop.GradientTape() as tape:
_assert_not_none(tape.gradient(operator.matvec(x), operator.variables))
# Tests for square, but possibly non-singular operators below.
if not operator.is_square:
return
for option in [
CheckTapeSafeSkipOptions.DETERMINANT,
CheckTapeSafeSkipOptions.LOG_ABS_DETERMINANT,
CheckTapeSafeSkipOptions.DIAG_PART,
CheckTapeSafeSkipOptions.TRACE,
]:
with backprop.GradientTape() as tape:
if option not in skip_options:
_assert_not_none(
tape.gradient(getattr(operator, option)(), operator.variables))
# Tests for non-singular operators below.
if operator.is_non_singular is False: # pylint: disable=g-bool-id-comparison
return
with backprop.GradientTape() as tape:
_assert_not_none(
tape.gradient(operator.inverse().to_dense(), operator.variables))
with backprop.GradientTape() as tape:
_assert_not_none(tape.gradient(operator.solvevec(x), operator.variables))
# Tests for SPD operators below.
if not (operator.is_self_adjoint and operator.is_positive_definite):
return
with backprop.GradientTape() as tape:
_assert_not_none(
tape.gradient(operator.cholesky().to_dense(), operator.variables))
# pylint:disable=missing-docstring
def _test_slicing(use_placeholder, shapes_info, dtype):
def test_slicing(self: "LinearOperatorDerivedClassTest"):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
batch_shape = shapes_info.shape[:-2]
# Don't bother slicing for uninteresting batch shapes.
if not batch_shape or batch_shape[0] <= 1:
return
slices = [slice(1, -1)]
if len(batch_shape) > 1:
# Slice out the last member.
slices += [..., slice(0, 1)]
sliced_operator = operator[slices]
matrix_slices = slices + [slice(None), slice(None)]
sliced_matrix = mat[matrix_slices]
sliced_op_dense = sliced_operator.to_dense()
op_dense_v, mat_v = sess.run([sliced_op_dense, sliced_matrix])
self.assertAC(op_dense_v, mat_v)
return test_slicing
def _test_to_dense(use_placeholder, shapes_info, dtype):
def test_to_dense(self: "LinearOperatorDerivedClassTest"):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_dense = operator.to_dense()
if not use_placeholder:
self.assertAllEqual(shapes_info.shape, op_dense.shape)
op_dense_v, mat_v = sess.run([op_dense, mat])
self.assertAC(op_dense_v, mat_v)
return test_to_dense
def _test_det(use_placeholder, shapes_info, dtype):
def test_det(self: "LinearOperatorDerivedClassTest"):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_det = operator.determinant()
if not use_placeholder:
self.assertAllEqual(shapes_info.shape[:-2], op_det.shape)
op_det_v, mat_det_v = sess.run(
[op_det, linalg_ops.matrix_determinant(mat)])
self.assertAC(op_det_v, mat_det_v)
return test_det
def _test_log_abs_det(use_placeholder, shapes_info, dtype):
def test_log_abs_det(self: "LinearOperatorDerivedClassTest"):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_log_abs_det = operator.log_abs_determinant()
_, mat_log_abs_det = linalg.slogdet(mat)
if not use_placeholder:
self.assertAllEqual(
shapes_info.shape[:-2], op_log_abs_det.shape)
op_log_abs_det_v, mat_log_abs_det_v = sess.run(
[op_log_abs_det, mat_log_abs_det])
self.assertAC(op_log_abs_det_v, mat_log_abs_det_v)
return test_log_abs_det
def _test_operator_matmul_with_same_type(use_placeholder, shapes_info, dtype):
"""op_a.matmul(op_b), in the case where the same type is returned."""
@test_util.run_without_tensor_float_32("Use FP32 in matmul")
def test_operator_matmul_with_same_type(
self: "LinearOperatorDerivedClassTest"):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator_a, mat_a = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
operator_b, mat_b = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
mat_matmul = math_ops.matmul(mat_a, mat_b)
op_matmul = operator_a.matmul(operator_b)
mat_matmul_v, op_matmul_v = sess.run([mat_matmul, op_matmul.to_dense()])
self.assertIsInstance(op_matmul, operator_a.__class__)
self.assertAC(mat_matmul_v, op_matmul_v)
return test_operator_matmul_with_same_type
def _test_operator_solve_with_same_type(use_placeholder, shapes_info, dtype):
"""op_a.solve(op_b), in the case where the same type is returned."""
def test_operator_solve_with_same_type(
self: "LinearOperatorDerivedClassTest"):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator_a, mat_a = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
operator_b, mat_b = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
mat_solve = linear_operator_util.matrix_solve_with_broadcast(mat_a, mat_b)
op_solve = operator_a.solve(operator_b)
mat_solve_v, op_solve_v = sess.run([mat_solve, op_solve.to_dense()])
self.assertIsInstance(op_solve, operator_a.__class__)
self.assertAC(mat_solve_v, op_solve_v)
return test_operator_solve_with_same_type
def _test_matmul_base(
self: "LinearOperatorDerivedClassTest",
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
blockwise_arg,
with_batch):
# If batch dimensions are omitted, but there are
# no batch dimensions for the linear operator, then
# skip the test case. This is already checked with
# with_batch=True.
if not with_batch and len(shapes_info.shape) <= 2:
return
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
x = self.make_x(
operator, adjoint=adjoint, with_batch=with_batch)
# If adjoint_arg, compute A X^H^H = A X.
if adjoint_arg:
op_matmul = operator.matmul(
linalg.adjoint(x),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_matmul = operator.matmul(x, adjoint=adjoint)
mat_matmul = math_ops.matmul(mat, x, adjoint_a=adjoint)
if not use_placeholder:
self.assertAllEqual(op_matmul.shape,
mat_matmul.shape)
# If the operator is blockwise, test both blockwise `x` and `Tensor` `x`;
# else test only `Tensor` `x`. In both cases, evaluate all results in a
# single `sess.run` call to avoid re-sampling the random `x` in graph mode.
if blockwise_arg and len(operator.operators) > 1:
# pylint: disable=protected-access
block_dimensions = (
operator._block_range_dimensions() if adjoint else
operator._block_domain_dimensions())
block_dimensions_fn = (
operator._block_range_dimension_tensors if adjoint else
operator._block_domain_dimension_tensors)
# pylint: enable=protected-access
split_x = linear_operator_util.split_arg_into_blocks(
block_dimensions,
block_dimensions_fn,
x, axis=-2)
if adjoint_arg:
split_x = [linalg.adjoint(y) for y in split_x]
split_matmul = operator.matmul(
split_x, adjoint=adjoint, adjoint_arg=adjoint_arg)
self.assertEqual(len(split_matmul), len(operator.operators))
split_matmul = linear_operator_util.broadcast_matrix_batch_dims(
split_matmul)
fused_block_matmul = array_ops.concat(split_matmul, axis=-2)
op_matmul_v, mat_matmul_v, fused_block_matmul_v = sess.run([
op_matmul, mat_matmul, fused_block_matmul])
# Check that the operator applied to blockwise input gives the same result
# as matrix multiplication.
self.assertAC(fused_block_matmul_v, mat_matmul_v)
else:
op_matmul_v, mat_matmul_v = sess.run([op_matmul, mat_matmul])
# Check that the operator applied to a `Tensor` gives the same result as
# matrix multiplication.
self.assertAC(op_matmul_v, mat_matmul_v)
def _test_matmul(
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
blockwise_arg):
@test_util.run_without_tensor_float_32("Use FP32 in matmul")
def test_matmul(self: "LinearOperatorDerivedClassTest"):
_test_matmul_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
blockwise_arg,
with_batch=True)
return test_matmul
def _test_matmul_with_broadcast(
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
blockwise_arg):
@test_util.run_without_tensor_float_32("Use FP32 in matmul")
def test_matmul_with_broadcast(self: "LinearOperatorDerivedClassTest"):
_test_matmul_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
blockwise_arg,
with_batch=True)
return test_matmul_with_broadcast
def _test_adjoint(use_placeholder, shapes_info, dtype):
def test_adjoint(self: "LinearOperatorDerivedClassTest"):
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_adjoint = operator.adjoint().to_dense()
op_adjoint_h = operator.H.to_dense()
mat_adjoint = linalg.adjoint(mat)
op_adjoint_v, op_adjoint_h_v, mat_adjoint_v = sess.run(
[op_adjoint, op_adjoint_h, mat_adjoint])
self.assertAC(mat_adjoint_v, op_adjoint_v)
self.assertAC(mat_adjoint_v, op_adjoint_h_v)
return test_adjoint
def _test_cholesky(use_placeholder, shapes_info, dtype):
def test_cholesky(self: "LinearOperatorDerivedClassTest"):
with self.test_session(graph=ops.Graph()) as sess:
# This test fails to pass for float32 type by a small margin if we use
# random_seed.DEFAULT_GRAPH_SEED. The correct fix would be relaxing the
# test tolerance but the tolerance in this test is configured universally
# depending on its type. So instead of lowering tolerance for all tests
# or special casing this, just use a seed, +2, that makes this test pass.
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED + 2
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder,
ensure_self_adjoint_and_pd=True)
op_chol = operator.cholesky().to_dense()
mat_chol = linalg_ops.cholesky(mat)
op_chol_v, mat_chol_v = sess.run([op_chol, mat_chol])
self.assertAC(mat_chol_v, op_chol_v)
return test_cholesky
def _test_eigvalsh(use_placeholder, shapes_info, dtype):
def test_eigvalsh(self: "LinearOperatorDerivedClassTest"):
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder,
ensure_self_adjoint_and_pd=True)
# Eigenvalues are real, so we'll cast these to float64 and sort
# for comparison.
op_eigvals = sort_ops.sort(
math_ops.cast(operator.eigvals(), dtype=dtypes.float64), axis=-1)
if dtype.is_complex:
mat = math_ops.cast(mat, dtype=dtypes.complex128)
else:
mat = math_ops.cast(mat, dtype=dtypes.float64)
mat_eigvals = sort_ops.sort(
math_ops.cast(
linalg_ops.self_adjoint_eigvals(mat), dtype=dtypes.float64),
axis=-1)
op_eigvals_v, mat_eigvals_v = sess.run([op_eigvals, mat_eigvals])
atol = self._atol[dtype] # pylint: disable=protected-access
rtol = self._rtol[dtype] # pylint: disable=protected-access
if dtype == dtypes.float32 or dtype == dtypes.complex64:
atol = 2e-4
rtol = 2e-4
self.assertAllClose(op_eigvals_v, mat_eigvals_v, atol=atol, rtol=rtol)
return test_eigvalsh
def _test_cond(use_placeholder, shapes_info, dtype):
def test_cond(self: "LinearOperatorDerivedClassTest"):
with self.test_session(graph=ops.Graph()) as sess:
# svd does not work with zero dimensional matrices, so we'll
# skip
if 0 in shapes_info.shape[-2:]:
return
# ROCm platform does not yet support complex types
if test.is_built_with_rocm() and \
((dtype == dtypes.complex64) or (dtype == dtypes.complex128)):
return
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
# Ensure self-adjoint and PD so we get finite condition numbers.
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder,
ensure_self_adjoint_and_pd=True)
# Eigenvalues are real, so we'll cast these to float64 and sort
# for comparison.
op_cond = operator.cond()
s = math_ops.abs(linalg_ops.svd(mat, compute_uv=False))
mat_cond = math_ops.reduce_max(s, axis=-1) / math_ops.reduce_min(
s, axis=-1)
op_cond_v, mat_cond_v = sess.run([op_cond, mat_cond])
atol_override = {
dtypes.float16: 1e-2,
dtypes.float32: 1e-3,
dtypes.float64: 1e-6,
dtypes.complex64: 1e-3,
dtypes.complex128: 1e-6,
}
rtol_override = {
dtypes.float16: 1e-2,
dtypes.float32: 1e-3,
dtypes.float64: 1e-4,
dtypes.complex64: 1e-3,
dtypes.complex128: 1e-6,
}
atol = atol_override[dtype]
rtol = rtol_override[dtype]
self.assertAllClose(op_cond_v, mat_cond_v, atol=atol, rtol=rtol)
return test_cond
def _test_solve_base(
self: "LinearOperatorDerivedClassTest",
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
blockwise_arg,
with_batch):
# If batch dimensions are omitted, but there are
# no batch dimensions for the linear operator, then
# skip the test case. This is already checked with
# with_batch=True.
if not with_batch and len(shapes_info.shape) <= 2:
return
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
rhs = self.make_rhs(
operator, adjoint=adjoint, with_batch=with_batch)
# If adjoint_arg, solve A X = (rhs^H)^H = rhs.
if adjoint_arg:
op_solve = operator.solve(
linalg.adjoint(rhs),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_solve = operator.solve(
rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
mat_solve = linear_operator_util.matrix_solve_with_broadcast(
mat, rhs, adjoint=adjoint)
if not use_placeholder:
self.assertAllEqual(op_solve.shape,
mat_solve.shape)
# If the operator is blockwise, test both blockwise rhs and `Tensor` rhs;
# else test only `Tensor` rhs. In both cases, evaluate all results in a
# single `sess.run` call to avoid re-sampling the random rhs in graph mode.
if blockwise_arg and len(operator.operators) > 1:
# pylint: disable=protected-access
block_dimensions = (
operator._block_range_dimensions() if adjoint else
operator._block_domain_dimensions())
block_dimensions_fn = (
operator._block_range_dimension_tensors if adjoint else
operator._block_domain_dimension_tensors)
# pylint: enable=protected-access
split_rhs = linear_operator_util.split_arg_into_blocks(
block_dimensions,
block_dimensions_fn,
rhs, axis=-2)
if adjoint_arg:
split_rhs = [linalg.adjoint(y) for y in split_rhs]
split_solve = operator.solve(
split_rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
self.assertEqual(len(split_solve), len(operator.operators))
split_solve = linear_operator_util.broadcast_matrix_batch_dims(
split_solve)
fused_block_solve = array_ops.concat(split_solve, axis=-2)
op_solve_v, mat_solve_v, fused_block_solve_v = sess.run([
op_solve, mat_solve, fused_block_solve])
# Check that the operator and matrix give the same solution when the rhs
# is blockwise.
self.assertAC(mat_solve_v, fused_block_solve_v)
else:
op_solve_v, mat_solve_v = sess.run([op_solve, mat_solve])
# Check that the operator and matrix give the same solution when the rhs is
# a `Tensor`.
self.assertAC(op_solve_v, mat_solve_v)
def _test_solve(
use_placeholder, shapes_info, dtype, adjoint, adjoint_arg, blockwise_arg):
def test_solve(self: "LinearOperatorDerivedClassTest"):
_test_solve_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
blockwise_arg,
with_batch=True)
return test_solve
def _test_solve_with_broadcast(
use_placeholder, shapes_info, dtype, adjoint, adjoint_arg, blockwise_arg):
def test_solve_with_broadcast(self: "LinearOperatorDerivedClassTest"):
_test_solve_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
blockwise_arg,
with_batch=False)
return test_solve_with_broadcast
def _test_inverse(use_placeholder, shapes_info, dtype):
def test_inverse(self: "LinearOperatorDerivedClassTest"):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_inverse_v, mat_inverse_v = sess.run([
operator.inverse().to_dense(), linalg.inv(mat)])
self.assertAC(op_inverse_v, mat_inverse_v, check_dtype=True)
return test_inverse
def _test_trace(use_placeholder, shapes_info, dtype):
def test_trace(self: "LinearOperatorDerivedClassTest"):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_trace = operator.trace()
mat_trace = math_ops.trace(mat)
if not use_placeholder:
self.assertAllEqual(op_trace.shape, mat_trace.shape)
op_trace_v, mat_trace_v = sess.run([op_trace, mat_trace])
self.assertAC(op_trace_v, mat_trace_v)
return test_trace
def _test_add_to_tensor(use_placeholder, shapes_info, dtype):
def test_add_to_tensor(self: "LinearOperatorDerivedClassTest"):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_plus_2mat = operator.add_to_tensor(2 * mat)
if not use_placeholder:
self.assertAllEqual(shapes_info.shape, op_plus_2mat.shape)
op_plus_2mat_v, mat_v = sess.run([op_plus_2mat, mat])
self.assertAC(op_plus_2mat_v, 3 * mat_v)
return test_add_to_tensor
def _test_diag_part(use_placeholder, shapes_info, dtype):
def test_diag_part(self: "LinearOperatorDerivedClassTest"):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_diag_part = operator.diag_part()
mat_diag_part = array_ops.matrix_diag_part(mat)
if not use_placeholder:
self.assertAllEqual(mat_diag_part.shape,
op_diag_part.shape)
op_diag_part_, mat_diag_part_ = sess.run(
[op_diag_part, mat_diag_part])
self.assertAC(op_diag_part_, mat_diag_part_)
return test_diag_part
def _test_composite_tensor(use_placeholder, shapes_info, dtype):
@test_util.run_without_tensor_float_32("Use FP32 in matmul")
def test_composite_tensor(self: "LinearOperatorDerivedClassTest"):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
self.assertIsInstance(operator, composite_tensor.CompositeTensor)
flat = nest.flatten(operator, expand_composites=True)
unflat = nest.pack_sequence_as(operator, flat, expand_composites=True)
self.assertIsInstance(unflat, type(operator))
# Input the operator to a `tf.function`.
x = self.make_x(operator, adjoint=False)
op_y = def_function.function(lambda op: op.matmul(x))(unflat)
mat_y = math_ops.matmul(mat, x)
if not use_placeholder:
self.assertAllEqual(mat_y.shape, op_y.shape)
# Test while_loop.
def body(op):
return type(op)(**op.parameters),
op_out, = while_v2.while_loop(
cond=lambda _: True,
body=body,
loop_vars=(operator,),
maximum_iterations=3)
loop_y = op_out.matmul(x)
op_y_, loop_y_, mat_y_ = sess.run([op_y, loop_y, mat_y])
self.assertAC(op_y_, mat_y_)
self.assertAC(loop_y_, mat_y_)
# Ensure that the `TypeSpec` can be encoded.
nested_structure_coder.encode_structure(operator._type_spec) # pylint: disable=protected-access
return test_composite_tensor
def _test_saved_model(use_placeholder, shapes_info, dtype):
@test_util.run_without_tensor_float_32("Use FP32 in matmul")
def test_saved_model(self: "LinearOperatorDerivedClassTest"):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
if test_util.is_xla_enabled() and np.prod(shapes_info.shape) == 0:
self.skipTest("Saving XLA model fails for empty model.")
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
x = self.make_x(operator, adjoint=False)
class Model(module.Module):
def __init__(self, init_x):
self.x = nest.map_structure(
lambda x_: variables.Variable(x_, shape=None),
init_x)
@def_function.function(input_signature=(operator._type_spec,)) # pylint: disable=protected-access
def do_matmul(self, op):
return op.matmul(self.x)
saved_model_dir = self.get_temp_dir()
m1 = Model(x)
sess.run([v.initializer for v in m1.variables])
sess.run(m1.x.assign(m1.x + 1.))
save_model.save(m1, saved_model_dir)
m2 = load_model.load(saved_model_dir)
sess.run(m2.x.initializer)
sess.run(m2.x.assign(m2.x + 1.))
y_op = m2.do_matmul(operator)
y_mat = math_ops.matmul(mat, m2.x)
y_op_, y_mat_ = sess.run([y_op, y_mat])
self.assertAC(y_op_, y_mat_)
return test_saved_model
def _test_composite_tensor_gradient(use_placeholder, shapes_info, dtype):
def test_composite_tensor_gradient(self: "LinearOperatorDerivedClassTest"):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, _ = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
x = self.make_x(operator, adjoint=False)
y = operator.matmul(x)
op_g, = gradients_impl.gradients(
y,
operator,
grad_ys=array_ops.ones_like(y)) # Complex dtypes need grad_ys.
def _unflatten_and_matmul(components):
unflat_op = nest.pack_sequence_as(
operator, components, expand_composites=True)
return unflat_op.matmul(x)
flat_op = nest.flatten(operator, expand_composites=True)
y_ = _unflatten_and_matmul(flat_op)
flat_g = gradients_impl.gradients(
y_,
flat_op,
grad_ys=array_ops.ones_like(y_))
if all(g is None for g in flat_g):
self.assertIsNone(op_g)
else:
self.assertIsInstance(op_g, operator.__class__)
for g, ug in zip(nest.flatten(op_g, expand_composites=True),
nest.flatten(flat_g, expand_composites=True)):
self.assertAllClose(g, ug)
return test_composite_tensor_gradient
# pylint:enable=missing-docstring
def add_tests(test_cls):
"""Add tests for LinearOperator methods."""
test_name_dict = {
# All test classes should be added here.
"add_to_tensor": _test_add_to_tensor,
"adjoint": _test_adjoint,
"cholesky": _test_cholesky,
"cond": _test_cond,
"composite_tensor": _test_composite_tensor,
"composite_tensor_gradient": _test_composite_tensor_gradient,
"det": _test_det,
"diag_part": _test_diag_part,
"eigvalsh": _test_eigvalsh,
"inverse": _test_inverse,
"log_abs_det": _test_log_abs_det,
"operator_matmul_with_same_type": _test_operator_matmul_with_same_type,
"operator_solve_with_same_type": _test_operator_solve_with_same_type,
"matmul": _test_matmul,
"matmul_with_broadcast": _test_matmul_with_broadcast,
"saved_model": _test_saved_model,
"slicing": _test_slicing,
"solve": _test_solve,
"solve_with_broadcast": _test_solve_with_broadcast,
"to_dense": _test_to_dense,
"trace": _test_trace,
}
optional_tests = [
# Test classes need to explicitly add these to cls.optional_tests.
"operator_matmul_with_same_type",
"operator_solve_with_same_type",
]
tests_with_adjoint_args = [
"matmul",
"matmul_with_broadcast",
"solve",
"solve_with_broadcast",
]
if set(test_cls.skip_these_tests()).intersection(test_cls.optional_tests()):
raise ValueError(
"Test class {test_cls} had intersecting 'skip_these_tests' "
f"{test_cls.skip_these_tests()} and 'optional_tests' "
f"{test_cls.optional_tests()}.")
for name, test_template_fn in test_name_dict.items():
if name in test_cls.skip_these_tests():
continue
if name in optional_tests and name not in test_cls.optional_tests():
continue
for dtype, use_placeholder, shape_info in itertools.product(
test_cls.dtypes_to_test(),
test_cls.use_placeholder_options(),
test_cls.operator_shapes_infos()):
base_test_name = "_".join([
"test", name, "_shape={},dtype={},use_placeholder={}".format(
shape_info.shape, dtype, use_placeholder)])
if name in tests_with_adjoint_args:
for adjoint in test_cls.adjoint_options():
for adjoint_arg in test_cls.adjoint_arg_options():
test_name = base_test_name + ",adjoint={},adjoint_arg={}".format(
adjoint, adjoint_arg)
if hasattr(test_cls, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(
test_cls,
test_name,
test_util.run_deprecated_v1(
test_template_fn( # pylint: disable=too-many-function-args
use_placeholder, shape_info, dtype, adjoint,
adjoint_arg, test_cls.use_blockwise_arg())))
else:
if hasattr(test_cls, base_test_name):
raise RuntimeError("Test %s defined more than once" % base_test_name)
setattr(
test_cls,
base_test_name,
test_util.run_deprecated_v1(test_template_fn(
use_placeholder, shape_info, dtype)))
| LinearOperatorDerivedClassTest |
python | getsentry__sentry | src/sentry/web/frontend/group_tag_export.py | {
"start": 1218,
"end": 2571
} | class ____(ProjectView):
required_scope = "event:read"
enforce_rate_limit = True
rate_limits = RateLimitConfig(
limit_overrides={
"GET": {
RateLimitCategory.IP: RateLimit(limit=10, window=1, concurrent_limit=10),
RateLimitCategory.USER: RateLimit(limit=10, window=1, concurrent_limit=10),
RateLimitCategory.ORGANIZATION: RateLimit(limit=20, window=1, concurrent_limit=5),
}
}
)
def get(self, request: Request, organization, project, group_id, key) -> HttpResponseBase:
# If the environment doesn't exist then the tag can't possibly exist
try:
environment_id = get_environment_id(request, project.organization_id)
except Environment.DoesNotExist:
raise Http404
try:
processor = IssuesByTagProcessor(
project_id=project.id,
group_id=group_id,
key=key,
environment_id=environment_id,
tenant_ids={"organization_id": project.organization_id},
)
except ExportError:
raise Http404
filename = f"{processor.group.qualified_short_id or processor.group.id}-{key}"
return GroupTagCsvResponder(key).respond(processor.get_raw_data(), filename)
| GroupTagExportView |
python | tensorflow__tensorflow | tensorflow/python/distribute/strategy_combinations_test.py | {
"start": 3229,
"end": 5414
} | class ____(test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
tf2.disable()
@combinations.generate(
combinations.combine(strategy=[
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.one_device_strategy_gpu_on_worker_1,
strategy_combinations.one_device_strategy_on_worker_1
]))
def testOneDevice(self, strategy):
self.assertIsInstance(strategy, one_device_strategy.OneDeviceStrategyV1)
@combinations.generate(
combinations.combine(strategy=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_one_gpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
]))
def testMirrored(self, strategy):
self.assertIsInstance(strategy, mirrored_strategy.MirroredStrategyV1)
@combinations.generate(
combinations.combine(strategy=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_4x1_cpu,
]))
def testMultiWorkerMirrored(self, strategy):
# MultiWorkerMirroredStrategy combinations only supports V2.
self.assertIsInstance(
strategy, collective_all_reduce_strategy.CollectiveAllReduceStrategy)
@combinations.generate(
combinations.combine(strategy=[
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_two_gpus,
]))
def testCentralStorage(self, strategy):
self.assertIsInstance(strategy,
central_storage_strategy.CentralStorageStrategyV1)
@combinations.generate(
combinations.combine(strategy=strategy_combinations.tpu_strategies))
def testTPU(self, strategy):
self.assertIsInstance(strategy, tpu_strategy.TPUStrategyV1)
| V1StrategyTest |
python | gevent__gevent | src/gevent/tests/test__semaphore.py | {
"start": 10973,
"end": 11152
} | class ____(TestSemaphoreMultiThread):
def _getTargetClass(self):
return BoundedSemaphore
@greentest.skipOnPurePython("Needs C extension")
| TestBoundedSemaphoreMultiThread |
python | mlflow__mlflow | mlflow/tracing/otel/translation/google_adk.py | {
"start": 72,
"end": 667
} | class ____(OtelSchemaTranslator):
"""
Translator for Google ADK semantic conventions.
Google ADK mostly uses OpenTelemetry semantic conventions, but with some custom
inputs and outputs attributes.
"""
# Input/Output attribute keys
# Reference: https://github.com/google/adk-python/blob/d2888a3766b87df2baaaa1a67a2235b1b80f138f/src/google/adk/telemetry/tracing.py#L264
INPUT_VALUE_KEYS = ["gcp.vertex.agent.llm_request", "gcp.vertex.agent.tool_call_args"]
OUTPUT_VALUE_KEYS = ["gcp.vertex.agent.llm_response", "gcp.vertex.agent.tool_response"]
| GoogleADKTranslator |
python | numba__numba | numba/cuda/target.py | {
"start": 2579,
"end": 14463
} | class ____(BaseContext):
implement_powi_as_math_call = True
strict_alignment = True
def __init__(self, typingctx, target='cuda'):
super().__init__(typingctx, target)
self.data_model_manager = cuda_data_manager.chain(
datamodel.default_manager
)
@property
def DIBuilder(self):
return debuginfo.DIBuilder
@property
def enable_boundscheck(self):
# Unconditionally disabled
return False
# Overrides
def create_module(self, name):
return self._internal_codegen._create_empty_module(name)
def init(self):
self._internal_codegen = codegen.JITCUDACodegen("numba.cuda.jit")
self._target_data = None
def load_additional_registries(self):
# side effect of import needed for numba.cpython.*, the builtins
# registry is updated at import time.
from numba.cpython import numbers, tupleobj, slicing # noqa: F401
from numba.cpython import rangeobj, iterators, enumimpl # noqa: F401
from numba.cpython import unicode, charseq # noqa: F401
from numba.cpython import cmathimpl
from numba.misc import cffiimpl
from numba.np import arrayobj # noqa: F401
from numba.np import npdatetime # noqa: F401
from . import (
cudaimpl, printimpl, libdeviceimpl, mathimpl, vector_types
)
# fix for #8940
from numba.np.unsafe import ndarray # noqa F401
self.install_registry(cudaimpl.registry)
self.install_registry(cffiimpl.registry)
self.install_registry(printimpl.registry)
self.install_registry(libdeviceimpl.registry)
self.install_registry(cmathimpl.registry)
self.install_registry(mathimpl.registry)
self.install_registry(vector_types.impl_registry)
def codegen(self):
return self._internal_codegen
@property
def target_data(self):
if self._target_data is None:
self._target_data = ll.create_target_data(nvvm.NVVM().data_layout)
return self._target_data
@cached_property
def nonconst_module_attrs(self):
"""
Some CUDA intrinsics are at the module level, but cannot be treated as
constants, because they are loaded from a special register in the PTX.
These include threadIdx, blockDim, etc.
"""
from numba import cuda
nonconsts = ('threadIdx', 'blockDim', 'blockIdx', 'gridDim', 'laneid',
'warpsize')
nonconsts_with_mod = tuple([(types.Module(cuda), nc)
for nc in nonconsts])
return nonconsts_with_mod
@cached_property
def call_conv(self):
return CUDACallConv(self)
def mangler(self, name, argtypes, *, abi_tags=(), uid=None):
return itanium_mangler.mangle(name, argtypes, abi_tags=abi_tags,
uid=uid)
def prepare_cuda_kernel(self, codelib, fndesc, debug, lineinfo,
nvvm_options, filename, linenum,
max_registers=None):
"""
Adapt a code library ``codelib`` with the numba compiled CUDA kernel
with name ``fname`` and arguments ``argtypes`` for NVVM.
A new library is created with a wrapper function that can be used as
the kernel entry point for the given kernel.
Returns the new code library and the wrapper function.
Parameters:
codelib: The CodeLibrary containing the device function to wrap
in a kernel call.
fndesc: The FunctionDescriptor of the source function.
debug: Whether to compile with debug.
lineinfo: Whether to emit line info.
nvvm_options: Dict of NVVM options used when compiling the new library.
filename: The source filename that the function is contained in.
linenum: The source line that the function is on.
max_registers: The max_registers argument for the code library.
"""
kernel_name = itanium_mangler.prepend_namespace(
fndesc.llvm_func_name, ns='cudapy',
)
library = self.codegen().create_library(f'{codelib.name}_kernel_',
entry_name=kernel_name,
nvvm_options=nvvm_options,
max_registers=max_registers)
library.add_linking_library(codelib)
wrapper = self.generate_kernel_wrapper(library, fndesc, kernel_name,
debug, lineinfo, filename,
linenum)
return library, wrapper
def generate_kernel_wrapper(self, library, fndesc, kernel_name, debug,
lineinfo, filename, linenum):
"""
Generate the kernel wrapper in the given ``library``.
The function being wrapped is described by ``fndesc``.
The wrapper function is returned.
"""
argtypes = fndesc.argtypes
arginfo = self.get_arg_packer(argtypes)
argtys = list(arginfo.argument_types)
wrapfnty = ir.FunctionType(ir.VoidType(), argtys)
wrapper_module = self.create_module("cuda.kernel.wrapper")
fnty = ir.FunctionType(ir.IntType(32),
[self.call_conv.get_return_type(types.pyobject)]
+ argtys)
func = ir.Function(wrapper_module, fnty, fndesc.llvm_func_name)
prefixed = itanium_mangler.prepend_namespace(func.name, ns='cudapy')
wrapfn = ir.Function(wrapper_module, wrapfnty, prefixed)
builder = ir.IRBuilder(wrapfn.append_basic_block(''))
if debug or lineinfo:
directives_only = lineinfo and not debug
debuginfo = self.DIBuilder(module=wrapper_module,
filepath=filename,
cgctx=self,
directives_only=directives_only)
debuginfo.mark_subprogram(
wrapfn, kernel_name, fndesc.args, argtypes, linenum,
)
debuginfo.mark_location(builder, linenum)
# Define error handling variable
def define_error_gv(postfix):
name = wrapfn.name + postfix
gv = cgutils.add_global_variable(wrapper_module, ir.IntType(32),
name)
gv.initializer = ir.Constant(gv.type.pointee, None)
return gv
gv_exc = define_error_gv("__errcode__")
gv_tid = []
gv_ctaid = []
for i in 'xyz':
gv_tid.append(define_error_gv("__tid%s__" % i))
gv_ctaid.append(define_error_gv("__ctaid%s__" % i))
callargs = arginfo.from_arguments(builder, wrapfn.args)
status, _ = self.call_conv.call_function(
builder, func, types.void, argtypes, callargs)
if debug:
# Check error status
with cgutils.if_likely(builder, status.is_ok):
builder.ret_void()
with builder.if_then(builder.not_(status.is_python_exc)):
# User exception raised
old = ir.Constant(gv_exc.type.pointee, None)
# Use atomic cmpxchg to prevent rewriting the error status
# Only the first error is recorded
xchg = builder.cmpxchg(gv_exc, old, status.code,
'monotonic', 'monotonic')
changed = builder.extract_value(xchg, 1)
# If the xchange is successful, save the thread ID.
sreg = nvvmutils.SRegBuilder(builder)
with builder.if_then(changed):
for dim, ptr, in zip("xyz", gv_tid):
val = sreg.tid(dim)
builder.store(val, ptr)
for dim, ptr, in zip("xyz", gv_ctaid):
val = sreg.ctaid(dim)
builder.store(val, ptr)
builder.ret_void()
nvvm.set_cuda_kernel(wrapfn)
library.add_ir_module(wrapper_module)
if debug or lineinfo:
debuginfo.finalize()
library.finalize()
if config.DUMP_LLVM:
utils.dump_llvm(fndesc, wrapper_module)
return library.get_function(wrapfn.name)
def make_constant_array(self, builder, aryty, arr):
"""
Unlike the parent version. This returns a a pointer in the constant
addrspace.
"""
lmod = builder.module
constvals = [
self.get_constant(types.byte, i)
for i in iter(arr.tobytes(order='A'))
]
constaryty = ir.ArrayType(ir.IntType(8), len(constvals))
constary = ir.Constant(constaryty, constvals)
addrspace = nvvm.ADDRSPACE_CONSTANT
gv = cgutils.add_global_variable(lmod, constary.type, "_cudapy_cmem",
addrspace=addrspace)
gv.linkage = 'internal'
gv.global_constant = True
gv.initializer = constary
# Preserve the underlying alignment
lldtype = self.get_data_type(aryty.dtype)
align = self.get_abi_sizeof(lldtype)
gv.align = 2 ** (align - 1).bit_length()
# Convert to generic address-space
ptrty = ir.PointerType(ir.IntType(8))
genptr = builder.addrspacecast(gv, ptrty, 'generic')
# Create array object
ary = self.make_array(aryty)(self, builder)
kshape = [self.get_constant(types.intp, s) for s in arr.shape]
kstrides = [self.get_constant(types.intp, s) for s in arr.strides]
self.populate_array(ary, data=builder.bitcast(genptr, ary.data.type),
shape=kshape,
strides=kstrides,
itemsize=ary.itemsize, parent=ary.parent,
meminfo=None)
return ary._getvalue()
def insert_const_string(self, mod, string):
"""
Unlike the parent version. This returns a a pointer in the constant
addrspace.
"""
text = cgutils.make_bytearray(string.encode("utf-8") + b"\x00")
name = '$'.join(["__conststring__",
itanium_mangler.mangle_identifier(string)])
# Try to reuse existing global
gv = mod.globals.get(name)
if gv is None:
# Not defined yet
gv = cgutils.add_global_variable(mod, text.type, name,
addrspace=nvvm.ADDRSPACE_CONSTANT)
gv.linkage = 'internal'
gv.global_constant = True
gv.initializer = text
# Cast to a i8* pointer
charty = gv.type.pointee.element
return gv.bitcast(charty.as_pointer(nvvm.ADDRSPACE_CONSTANT))
def insert_string_const_addrspace(self, builder, string):
"""
Insert a constant string in the constant addresspace and return a
generic i8 pointer to the data.
This function attempts to deduplicate.
"""
lmod = builder.module
gv = self.insert_const_string(lmod, string)
charptrty = ir.PointerType(ir.IntType(8))
return builder.addrspacecast(gv, charptrty, 'generic')
def optimize_function(self, func):
"""Run O1 function passes
"""
pass
## XXX skipped for now
# fpm = lp.FunctionPassManager.new(func.module)
#
# lp.PassManagerBuilder.new().populate(fpm)
#
# fpm.initialize()
# fpm.run(func)
# fpm.finalize()
def get_ufunc_info(self, ufunc_key):
return ufuncs.get_ufunc_info(ufunc_key)
| CUDATargetContext |
python | apache__airflow | airflow-ctl/src/airflowctl/ctl/console_formatting.py | {
"start": 1613,
"end": 4794
} | class ____(Console):
"""Airflow rich console."""
def __init__(self, show_header: bool = True, *args, **kwargs):
super().__init__(*args, **kwargs)
# Set the width to constant to pipe whole output from console
self._width = 200 if not is_tty() else self._width
# If show header in tables
self.show_header = show_header
def print_as_json(self, data: dict):
"""Render dict as json text representation."""
json_content = json.dumps(data)
self.print(Syntax(json_content, "json", theme="ansi_dark"), soft_wrap=True)
def print_as_yaml(self, data: dict):
"""Render dict as yaml text representation."""
yaml_content = yaml.dump(data)
self.print(Syntax(yaml_content, "yaml", theme="ansi_dark"), soft_wrap=True)
def print_as_table(self, data: list[dict]):
"""Render list of dictionaries as table."""
if not data:
self.print("No data found")
return
table = SimpleTable(show_header=self.show_header)
for col in data[0]:
table.add_column(col)
for row in data:
table.add_row(*(str(d) for d in row.values()))
self.print(table)
def print_as_plain_table(self, data: list[dict]):
"""Render list of dictionaries as a simple table than can be easily piped."""
if not data:
self.print("No data found")
return
rows = [d.values() for d in data]
output = tabulate(rows, tablefmt="plain", headers=list(data[0]))
self.print(output)
def _normalize_data(self, value: Any, output: str) -> list | str | dict | None:
if isinstance(value, (tuple, list)):
if output == "table":
return ",".join(str(self._normalize_data(x, output)) for x in value)
return [self._normalize_data(x, output) for x in value]
if isinstance(value, dict) and output != "table":
return {k: self._normalize_data(v, output) for k, v in value.items()}
if value is None:
return None
return str(value)
def print_as(
self,
data: Sequence[dict | Any],
output: str,
mapper: Callable[[Any], dict] | None = None,
) -> None:
"""Print provided using format specified by output argument."""
output_to_renderer: dict[str, Callable[[Any], None]] = {
"json": self.print_as_json,
"yaml": self.print_as_yaml,
"table": self.print_as_table,
"plain": self.print_as_plain_table,
}
renderer = output_to_renderer.get(output)
if not renderer:
raise ValueError(f"Unknown formatter: {output}. Allowed options: {list(output_to_renderer)}")
if mapper:
dict_data: Sequence[dict] = [mapper(d) for d in data]
elif is_data_sequence(data):
dict_data = data
else:
raise ValueError("To tabulate non-dictionary data you need to provide `mapper` function")
dict_data = [{k: self._normalize_data(v, output) for k, v in d.items()} for d in dict_data]
renderer(dict_data)
| AirflowConsole |
python | ray-project__ray | release/ray_release/reporter/ray_test_db.py | {
"start": 350,
"end": 2157
} | class ____(Reporter):
"""
Reporter that updates the test and test result object in s3 with the latest test run
information.
- test: Test object that contains the test information (name, oncall, state, etc.)
- result: Result object that contains the test result information of this particular
test run (status, start time, end time, etc.)
"""
def report_result(self, test: Test, result: Result) -> None:
if os.environ.get("BUILDKITE_BRANCH") != "master":
logger.info("Skip upload test results. We only upload on master branch.")
return
if (
os.environ.get("BUILDKITE_PIPELINE_ID")
not in get_global_config()["ci_pipeline_postmerge"]
):
logger.info("Skip upload test results. We only upload on branch pipeline.")
return
if result.status == ResultStatus.TRANSIENT_INFRA_ERROR.value:
logger.info(
f"Skip recording result for test {test.get_name()} due to transient "
"infra error result"
)
return
logger.info(
f"Updating test object {test.get_name()} with result {result.status}"
)
test.persist_result_to_s3(result)
# Update the test object with the latest test state
test.update_from_s3()
logger.info(f"Test object: {json.dumps(test)}")
logger.info(
f"Test results: "
f"{json.dumps([result.__dict__ for result in test.get_test_results()])}"
)
# Compute and update the next test state
ReleaseTestStateMachine(test).move()
# Persist the updated test object to S3
test.persist_to_s3()
logger.info(f"Test object {test.get_name()} updated successfully")
| RayTestDBReporter |
python | django__django | tests/admin_views/models.py | {
"start": 9556,
"end": 9712
} | class ____(models.Model):
def __str__(self):
return "Primary key = %s" % self.id
temp_storage = FileSystemStorage(tempfile.mkdtemp())
| EmptyModel |
python | huggingface__transformers | src/transformers/models/fnet/modeling_fnet.py | {
"start": 14055,
"end": 14537
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = FNetLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
@auto_docstring
| FNetPreTrainingHeads |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 10669,
"end": 11030
} | class ____(PrimitiveModel):
def __init__(self, dmm, fe_type):
self._pointee_model = dmm.lookup(fe_type.dtype)
self._pointee_be_type = self._pointee_model.get_data_type()
be_type = self._pointee_be_type.as_pointer()
super(PointerModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.EphemeralPointer)
| PointerModel |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.