language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI059.py | {
"start": 1647,
"end": 1722
} | class ____(Generic[T], metaclass=type, *[str]): # PYI059 but no fix
...
| C3 |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 9568,
"end": 10071
} | class ____(HistoricalRecords):
def get_extra_fields(self, model, fields):
def verbose_str(self):
return "{} changed by {} as of {}".format(
self.history_object,
self.history_user,
self.history_date,
)
extra_fields = super().get_extra_fields(model, fields)
extra_fields["__str__"] = verbose_str
return extra_fields
register(Voter, records_class=HistoricalRecordsVerbose)
| HistoricalRecordsVerbose |
python | pydata__xarray | xarray/backends/netCDF4_.py | {
"start": 1826,
"end": 2929
} | class ____(BackendArray):
__slots__ = ("datastore", "dtype", "shape", "variable_name")
def __init__(self, variable_name, datastore):
self.datastore = datastore
self.variable_name = variable_name
array = self.get_array()
self.shape = array.shape
dtype = array.dtype
if dtype is str:
# use object dtype (with additional vlen string metadata) because that's
# the only way in numpy to represent variable length strings and to
# check vlen string dtype in further steps
# it also prevents automatic string concatenation via
# conventions.decode_cf_variable
dtype = create_vlen_dtype(str)
self.dtype = dtype
def __setitem__(self, key, value):
with self.datastore.lock:
data = self.get_array(needs_lock=False)
data[key] = value
if self.datastore.autoclose:
self.datastore.close(needs_lock=False)
def get_array(self, needs_lock=True):
raise NotImplementedError("Virtual Method")
| BaseNetCDF4Array |
python | allegroai__clearml | clearml/backend_api/services/v2_23/frames.py | {
"start": 428496,
"end": 444715
} | class ____(Request):
"""
Get Elasticsearch query that returns frames with unique URIs for the given dataview specification.
The query is returned only for the clients that have kibana space set up.
Note: 'count_range' option for label rules is not supported and not reflected in the query
:param dataview: Dataview specification
:type dataview: Dataview
"""
_service = "frames"
_action = "get_snippets_query_for_dataview"
_version = "2.23"
_schema = {
"definitions": {
"dataview": {
"properties": {
"augmentation": {
"description": "Augmentation parameters. Only for training and testing tasks.",
"oneOf": [
{"$ref": "#/definitions/dv_augmentation"},
{"type": "null"},
],
},
"filters": {
"description": "List of FilterRule ('OR' relationship)",
"items": {"$ref": "#/definitions/filter_rule"},
"type": ["array", "null"],
},
"iteration": {
"description": "Iteration parameters. Not applicable for register (import) tasks.",
"oneOf": [
{"$ref": "#/definitions/iteration"},
{"type": "null"},
],
},
"labels_enumeration": {
"additionalProperties": {"type": "integer"},
"description": (
"Labels enumerations, specifies numbers to be assigned to ROI labels when getting frames"
),
"type": ["object", "null"],
},
"mapping": {
"description": "Mapping parameters",
"oneOf": [{"$ref": "#/definitions/mapping"}, {"type": "null"}],
},
"output_rois": {
"description": (
"'all_in_frame' - all rois for a frame are returned\n\n'only_filtered' - only rois which"
" led this frame to be selected\n\n'frame_per_roi' - single roi per frame. Frame can be"
" returned multiple times with a different roi each time.\n\nNote: this should be used for"
" Training tasks only\n\nNote: frame_per_roi implies that only filtered rois will be"
" returned\n "
),
"oneOf": [
{"$ref": "#/definitions/output_rois_enum"},
{"type": "null"},
],
},
"versions": {
"description": "View dataset versions",
"items": {"$ref": "#/definitions/view_entry"},
"type": ["array", "null"],
},
},
"type": "object",
},
"dv_augmentation": {
"properties": {
"crop_around_rois": {
"description": "Crop image data around all frame ROIs",
"type": ["boolean", "null"],
},
"sets": {
"description": "List of augmentation sets",
"items": {"$ref": "#/definitions/dv_augmentation_set"},
"type": ["array", "null"],
},
},
"type": "object",
},
"dv_augmentation_set": {
"properties": {
"arguments": {
"additionalProperties": {
"additionalProperties": True,
"type": "object",
},
"description": "Arguments dictionary per custom augmentation type.",
"type": ["object", "null"],
},
"cls": {
"description": "Augmentation class",
"type": ["string", "null"],
},
"strength": {
"description": "Augmentation strength. Range [0,).",
"minimum": 0,
"type": ["number", "null"],
},
"types": {
"description": "Augmentation type",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
},
"filter_by_roi_enum": {
"default": "label_rules",
"enum": ["disabled", "no_rois", "label_rules"],
"type": "string",
},
"filter_label_rule": {
"properties": {
"conf_range": {
"description": (
"Range of ROI confidence level in the frame (min, max). -1 for not applicable\n "
" Both min and max can be either -1 or positive.\n 2nd number (max) must be"
" either -1 or larger than or equal to the 1st number (min)"
),
"items": {"type": "number"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"count_range": {
"description": (
"Range of times ROI appears in the frame (min, max). -1 for not applicable.\n "
" Both integers must be larger than or equal to -1.\n 2nd integer (max) must be"
" either -1 or larger than or equal to the 1st integer (min)"
),
"items": {"type": "integer"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"label": {
"description": (
"Lucene format query (see lucene query syntax).\nDefault search field is label.keyword and"
" default operator is AND, so searching for:\n\n'Bus Stop' Blue\n\nis equivalent"
" to:\n\nLabel.keyword:'Bus Stop' AND label.keyword:'Blue'"
),
"type": "string",
},
"must_not": {
"default": False,
"description": (
"If set then the label must not exist or lucene query must not be true.\n The"
" default value is false"
),
"type": "boolean",
},
},
"required": ["label"],
"type": "object",
},
"filter_rule": {
"properties": {
"dataset": {
"description": (
"Dataset ID. Must be a dataset which is in the task's view. If set to '*' all datasets in"
" View are used."
),
"type": "string",
},
"filter_by_roi": {
"description": "Type of filter. Optional, the default value is 'label_rules'",
"oneOf": [
{"$ref": "#/definitions/filter_by_roi_enum"},
{"type": "null"},
],
},
"frame_query": {
"description": "Frame filter, in Lucene query syntax",
"type": ["string", "null"],
},
"label_rules": {
"description": (
"List of FilterLabelRule ('AND' connection)\n\ndisabled - No filtering by ROIs. Select all"
" frames, even if they don't have ROIs (all frames)\n\nno_rois - Select only frames without"
" ROIs (empty frames)\n\nlabel_rules - Select frames according to label rules"
),
"items": {"$ref": "#/definitions/filter_label_rule"},
"type": ["array", "null"],
},
"sources_query": {
"description": "Sources filter, in Lucene query syntax. Filters sources in each frame.",
"type": ["string", "null"],
},
"version": {
"description": (
"Dataset version to apply rule to. Must belong to the dataset and be in the task's view. If"
" set to '*' all version of the datasets in View are used."
),
"type": "string",
},
"weight": {
"description": "Rule weight. Default is 1",
"type": "number",
},
},
"required": ["dataset"],
"type": "object",
},
"iteration": {
"description": "Sequential Iteration API configuration",
"properties": {
"infinite": {
"description": "Infinite iteration",
"type": ["boolean", "null"],
},
"jump": {
"description": "Jump entry",
"oneOf": [{"$ref": "#/definitions/jump"}, {"type": "null"}],
},
"limit": {
"description": (
"Maximum frames per task. If not passed, frames will end when no more matching frames are"
" found, unless infinite is True."
),
"type": ["integer", "null"],
},
"min_sequence": {
"description": (
"Length (in ms) of video clips to return. This is used in random order, and in sequential"
" order only if jumping is provided and only for video frames"
),
"type": ["integer", "null"],
},
"order": {
"description": (
"\n Input frames order. Values: 'sequential', 'random'\n In"
" Sequential mode frames will be returned according to the order in which the frames were"
" added to the dataset."
),
"oneOf": [
{"$ref": "#/definitions/iteration_order_enum"},
{"type": "null"},
],
},
"random_seed": {
"description": "Random seed used when iterating over the dataview",
"type": ["integer", "null"],
},
},
"type": "object",
},
"iteration_order_enum": {
"enum": ["sequential", "random"],
"type": "string",
},
"jump": {
"properties": {
"time": {
"description": "Max time in milliseconds between frames",
"type": ["integer", "null"],
}
},
"type": "object",
},
"label_source": {
"properties": {
"dataset": {
"description": "Source dataset id. '*' for all datasets in view",
"type": ["string", "null"],
},
"labels": {
"description": (
"List of source labels (AND connection). '*' indicates any label. Labels must exist in at"
" least one of the dataset versions in the task's view"
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"version": {
"description": (
"Source dataset version id. Default is '*' (for all versions in dataset in the view)"
" Version must belong to the selected dataset, and must be in the task's view[i]"
),
"type": ["string", "null"],
},
},
"type": "object",
},
"mapping": {
"properties": {
"rules": {
"description": "Rules list",
"items": {"$ref": "#/definitions/mapping_rule"},
"type": ["array", "null"],
}
},
"type": "object",
},
"mapping_rule": {
"properties": {
"source": {
"description": "Source label info",
"oneOf": [
{"$ref": "#/definitions/label_source"},
{"type": "null"},
],
},
"target": {
"description": "Target label name",
"type": ["string", "null"],
},
},
"type": "object",
},
"output_rois_enum": {
"enum": ["all_in_frame", "only_filtered", "frame_per_roi"],
"type": "string",
},
"view_entry": {
"properties": {
"dataset": {
"description": "Existing Dataset id",
"type": ["string", "null"],
},
"merge_with": {
"description": "Version ID to merge with",
"type": ["string", "null"],
},
"version": {
"description": "Version id of a version belonging to the dataset",
"type": ["string", "null"],
},
},
"type": "object",
},
},
"properties": {
"dataview": {
"$ref": "#/definitions/dataview",
"description": "Dataview specification",
}
},
"required": ["dataview"],
}
def __init__(self, dataview, **kwargs):
super(GetSnippetsQueryForDataviewRequest, self).__init__(**kwargs)
self.dataview = dataview
@schema_property("dataview")
def dataview(self):
return self._property_dataview
@dataview.setter
def dataview(self, value):
if value is None:
self._property_dataview = None
return
if isinstance(value, dict):
value = Dataview.from_dict(value)
else:
self.assert_isinstance(value, "dataview", Dataview)
self._property_dataview = value
| GetSnippetsQueryForDataviewRequest |
python | conda__conda | conda/models/enums.py | {
"start": 371,
"end": 892
} | class ____(Enum):
x86 = "x86"
x86_64 = "x86_64"
# arm64 is for macOS and Windows
arm64 = "arm64"
armv6l = "armv6l"
armv7l = "armv7l"
# aarch64 is for Linux only
aarch64 = "aarch64"
ppc64 = "ppc64"
ppc64le = "ppc64le"
riscv64 = "riscv64"
s390x = "s390x"
wasm32 = "wasm32"
z = "z"
@classmethod
def from_sys(cls):
if sys.platform == "zos":
return cls["z"]
return cls[machine()]
def __json__(self):
return self.value
| Arch |
python | huggingface__transformers | src/transformers/models/lfm2_moe/modeling_lfm2_moe.py | {
"start": 33625,
"end": 36749
} | class ____(Lfm2MoePreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = Lfm2MoeModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, Lfm2MoeForCausalLM
>>> model = Lfm2MoeForCausalLM.from_pretrained("meta-lfm2_moe/Lfm2Moe-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-lfm2_moe/Lfm2Moe-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["Lfm2MoeForCausalLM", "Lfm2MoeModel", "Lfm2MoePreTrainedModel"]
| Lfm2MoeForCausalLM |
python | tiangolo__fastapi | docs_src/response_model/tutorial001_01_py39.py | {
"start": 104,
"end": 507
} | class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
tags: list[str] = []
@app.post("/items/")
async def create_item(item: Item) -> Item:
return item
@app.get("/items/")
async def read_items() -> list[Item]:
return [
Item(name="Portal Gun", price=42.0),
Item(name="Plumbus", price=32.0),
]
| Item |
python | allegroai__clearml | clearml/automation/aws_driver.py | {
"start": 577,
"end": 8225
} | class ____(CloudDriver):
"""AWS Driver"""
aws_access_key_id = attr.ib(validator=instance_of(str), default="")
aws_secret_access_key = attr.ib(validator=instance_of(str), default="")
aws_session_token = attr.ib(validator=instance_of(str), default="")
aws_region = attr.ib(validator=instance_of(str), default="")
use_credentials_chain = attr.ib(validator=instance_of(bool), default=False)
use_iam_instance_profile = attr.ib(validator=instance_of(bool), default=False)
iam_arn = attr.ib(validator=instance_of(str), default="")
iam_name = attr.ib(validator=instance_of(str), default="")
@classmethod
def from_config(cls, config: ConfigTree) -> "AWSDriver":
obj = super().from_config(config)
obj.aws_access_key_id = config["hyper_params"].get("cloud_credentials_key")
obj.aws_secret_access_key = config["hyper_params"].get("cloud_credentials_secret")
obj.aws_session_token = config["hyper_params"].get("cloud_credentials_token")
obj.aws_region = config["hyper_params"].get("cloud_credentials_region")
obj.use_credentials_chain = config["hyper_params"].get("use_credentials_chain", False)
obj.use_iam_instance_profile = config["hyper_params"].get("use_iam_instance_profile", False)
obj.iam_arn = config["hyper_params"].get("iam_arn")
obj.iam_name = config["hyper_params"].get("iam_name")
return obj
def __attrs_post_init__(self) -> None:
super().__attrs_post_init__()
self.tags = parse_tags(self.tags)
def spin_up_worker(
self,
resource_conf: dict,
worker_prefix: str,
queue_name: str,
task_id: str,
) -> str:
# user_data script will automatically run when the instance is started. it will install the required packages
# for clearml-agent, configure it using environment variables and run clearml-agent on the required queue
# Config reference: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2/client/run_instances.html
user_data = self.gen_user_data(worker_prefix, queue_name, task_id, resource_conf.get("cpu_only", False))
ec2 = boto3.client("ec2", **self.creds())
launch_specification = ConfigFactory.from_dict(
{
"ImageId": resource_conf["ami_id"],
"Monitoring": {"Enabled": bool(resource_conf.get("enable_monitoring", False))},
"InstanceType": resource_conf["instance_type"],
}
)
# handle EBS volumes (existing or new)
# Ref: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
if resource_conf.get("ebs_snapshot_id") and resource_conf.get("ebs_device_name"):
launch_specification["BlockDeviceMappings"] = [
{
"DeviceName": resource_conf["ebs_device_name"],
"Ebs": {"SnapshotId": resource_conf["ebs_snapshot_id"]},
}
]
elif resource_conf.get("ebs_device_name"):
launch_specification["BlockDeviceMappings"] = [
{
"DeviceName": resource_conf["ebs_device_name"],
"Ebs": {
"VolumeSize": resource_conf.get("ebs_volume_size", 80),
"VolumeType": resource_conf.get("ebs_volume_type", "gp3"),
},
}
]
if resource_conf.get("subnet_id", None):
launch_specification["SubnetId"] = resource_conf["subnet_id"]
elif resource_conf.get("availability_zone", None):
launch_specification["Placement"] = {"AvailabilityZone": resource_conf["availability_zone"]}
else:
raise Exception("subnet_id or availability_zone must to be specified in the config")
if resource_conf.get("key_name", None):
launch_specification["KeyName"] = resource_conf["key_name"]
if resource_conf.get("security_group_ids", None):
launch_specification["SecurityGroupIds"] = resource_conf["security_group_ids"]
# Adding iam role - you can have Arn OR Name, not both, Arn getting priority
if self.iam_arn:
launch_specification["IamInstanceProfile"] = {
"Arn": self.iam_arn,
}
elif self.iam_name:
launch_specification["IamInstanceProfile"] = {"Name": self.iam_name}
if resource_conf["is_spot"]:
# Create a request for a spot instance in AWS
encoded_user_data = base64.b64encode(user_data.encode("ascii")).decode("ascii")
launch_specification["UserData"] = encoded_user_data
ConfigTree.merge_configs(launch_specification, resource_conf.get("extra_configurations", {}))
instances = ec2.request_spot_instances(LaunchSpecification=launch_specification)
# Wait until spot request is fulfilled
request_id = instances["SpotInstanceRequests"][0]["SpotInstanceRequestId"]
waiter = ec2.get_waiter("spot_instance_request_fulfilled")
waiter.wait(SpotInstanceRequestIds=[request_id])
# Get the instance object for later use
response = ec2.describe_spot_instance_requests(SpotInstanceRequestIds=[request_id])
instance_id = response["SpotInstanceRequests"][0]["InstanceId"]
else:
# Create a new EC2 instance
launch_specification.update(
MinCount=1,
MaxCount=1,
UserData=user_data,
InstanceInitiatedShutdownBehavior="terminate",
)
ConfigTree.merge_configs(launch_specification, resource_conf.get("extra_configurations", {}))
instances = ec2.run_instances(**launch_specification)
# Get the instance object for later use
instance_id = instances["Instances"][0]["InstanceId"]
instance = boto3.resource("ec2", **self.creds()).Instance(instance_id)
if resource_conf.get("tags"):
instance.create_tags(
Resources=[instance_id],
Tags=[{"Key": key, "Value": val} for key, val in parse_tags(resource_conf.get("tags"))],
)
# Wait until instance is in running state
instance.wait_until_running()
return instance_id
def spin_down_worker(self, instance_id: str) -> None:
instance = boto3.resource("ec2", **self.creds()).Instance(instance_id)
instance.terminate()
def creds(self) -> Dict[str, Optional[str]]:
creds = {
"region_name": self.aws_region or None,
}
if not self.use_credentials_chain:
creds.update(
{
"aws_secret_access_key": self.aws_secret_access_key or None,
"aws_access_key_id": self.aws_access_key_id or None,
"aws_session_token": self.aws_session_token or None,
}
)
return creds
def instance_id_command(self) -> str:
return "curl http://169.254.169.254/latest/meta-data/instance-id"
def instance_type_key(self) -> str:
return "instance_type"
def kind(self) -> str:
return "AWS"
def console_log(self, instance_id: str) -> str:
ec2 = boto3.client("ec2", **self.creds())
try:
out = ec2.get_console_output(InstanceId=instance_id)
return out.get("Output", "")
except ClientError as err:
return "error: cannot get logs for {}:\n{}".format(instance_id, err)
| AWSDriver |
python | pypa__pip | src/pip/_vendor/distlib/util.py | {
"start": 32264,
"end": 33749
} | class ____(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base): # pragma: no cover
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix, use_abspath=True):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix, use_abspath=use_abspath)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
| Cache |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/build_systems/autotools.py | {
"start": 382,
"end": 1444
} | class ____(PackageBase):
"""Specialized class for packages built using GNU Autotools."""
build_system_class = "AutotoolsPackage"
default_buildsystem = "autotools"
build_system("autotools")
depends_on("gmake", type="build", when="build_system=autotools")
def flags_to_build_system_args(self, flags):
"""Produces a list of all command line arguments to pass compiler flags to configure."""
# Has to be dynamic attribute due to caching.
configure_flag_args = []
for flag, values in flags.items():
if values:
var_name = "LIBS" if flag == "ldlibs" else flag.upper()
configure_flag_args.append(f"{var_name}={' '.join(values)}")
# Spack's fflags are meant for both F77 and FC, therefore we additionally set FCFLAGS
values = flags.get("fflags", None)
if values:
configure_flag_args.append(f"FCFLAGS={' '.join(values)}")
setattr(self, "configure_flag_args", configure_flag_args)
@register_builder("autotools")
| AutotoolsPackage |
python | paramiko__paramiko | tests/test_client.py | {
"start": 28953,
"end": 30974
} | class ____(ClientTest):
# TODO: most of these could reasonably be set up to use mocks/assertions
# (e.g. "gave passphrase -> expect PKey was given it as the passphrase")
# instead of suffering a real connection cycle.
# TODO: in that case, move the below to be part of an integration suite?
@requires_sha1_signing
def test_password_kwarg_works_for_password_auth(self):
# Straightforward / duplicate of earlier basic password test.
self._test_connection(password="pygmalion")
# TODO: more granular exception pending #387; should be signaling "no auth
# methods available" because no key and no password
@raises(SSHException)
@requires_sha1_signing
def test_passphrase_kwarg_not_used_for_password_auth(self):
# Using the "right" password in the "wrong" field shouldn't work.
self._test_connection(passphrase="pygmalion")
@requires_sha1_signing
def test_passphrase_kwarg_used_for_key_passphrase(self):
# Straightforward again, with new passphrase kwarg.
self._test_connection(
key_filename=_support("test_rsa_password.key"),
passphrase="television",
)
@requires_sha1_signing
def test_password_kwarg_used_for_passphrase_when_no_passphrase_kwarg_given(
self,
): # noqa
# Backwards compatibility: passphrase in the password field.
self._test_connection(
key_filename=_support("test_rsa_password.key"),
password="television",
)
@raises(AuthenticationException) # TODO: more granular
@requires_sha1_signing
def test_password_kwarg_not_used_for_passphrase_when_passphrase_kwarg_given( # noqa
self,
):
# Sanity: if we're given both fields, the password field is NOT used as
# a passphrase.
self._test_connection(
key_filename=_support("test_rsa_password.key"),
password="television",
passphrase="wat? lol no",
)
| PasswordPassphraseTests |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 133230,
"end": 133997
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"pull_request_id",
"user_ids",
"team_ids",
"union",
"client_mutation_id",
)
pull_request_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="pullRequestId"
)
user_ids = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="userIds"
)
team_ids = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="teamIds"
)
union = sgqlc.types.Field(Boolean, graphql_name="union")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| RequestReviewsInput |
python | allegroai__clearml | clearml/backend_api/services/v2_13/queues.py | {
"start": 20049,
"end": 21437
} | class ____(Request):
"""
Adds a task entry to the queue.
:param queue: Queue id
:type queue: str
:param task: Task id
:type task: str
"""
_service = "queues"
_action = "add_task"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"queue": {"description": "Queue id", "type": "string"},
"task": {"description": "Task id", "type": "string"},
},
"required": ["queue", "task"],
"type": "object",
}
def __init__(self, queue: str, task: str, **kwargs: Any) -> None:
super(AddTaskRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
| AddTaskRequest |
python | pypa__setuptools | setuptools/discovery.py | {
"start": 7732,
"end": 8603
} | class ____(ModuleFinder):
DEFAULT_EXCLUDE = (
"setup",
"conftest",
"test",
"tests",
"example",
"examples",
"build",
# ---- Task runners ----
"toxfile",
"noxfile",
"pavement",
"dodo",
"tasks",
"fabfile",
# ---- Other tools ----
"[Ss][Cc]onstruct", # SCons
"conanfile", # Connan: C/C++ build tool
"manage", # Django
"benchmark",
"benchmarks",
"exercise",
"exercises",
# ---- Hidden files/Private modules ----
"[._]*",
)
"""Reserved top-level module names"""
def _find_packages_within(root_pkg: str, pkg_dir: StrPath) -> list[str]:
nested = PEP420PackageFinder.find(pkg_dir)
return [root_pkg] + [".".join((root_pkg, n)) for n in nested]
| FlatLayoutModuleFinder |
python | pandas-dev__pandas | pandas/tests/computation/test_eval.py | {
"start": 40216,
"end": 59636
} | class ____:
def eval(self, *args, **kwargs):
kwargs["level"] = kwargs.pop("level", 0) + 1
return pd.eval(*args, **kwargs)
def test_simple_arith_ops(self, engine, parser):
exclude_arith = []
if parser == "python":
exclude_arith = ["in", "not in"]
arith_ops = [
op
for op in expr.ARITH_OPS_SYMS + expr.CMP_OPS_SYMS
if op not in exclude_arith
]
ops = (op for op in arith_ops if op != "//")
for op in ops:
ex = f"1 {op} 1"
ex2 = f"x {op} 1"
ex3 = f"1 {op} (x + 1)"
if op in ("in", "not in"):
msg = "argument of type 'int' is not iterable"
with pytest.raises(TypeError, match=msg):
pd.eval(ex, engine=engine, parser=parser)
else:
expec = _eval_single_bin(1, op, 1, engine)
x = self.eval(ex, engine=engine, parser=parser)
assert x == expec
expec = _eval_single_bin(x, op, 1, engine)
y = self.eval(ex2, local_dict={"x": x}, engine=engine, parser=parser)
assert y == expec
expec = _eval_single_bin(1, op, x + 1, engine)
y = self.eval(ex3, local_dict={"x": x}, engine=engine, parser=parser)
assert y == expec
@pytest.mark.parametrize("rhs", [True, False])
@pytest.mark.parametrize("lhs", [True, False])
@pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS)
def test_simple_bool_ops(self, rhs, lhs, op):
ex = f"{lhs} {op} {rhs}"
if parser == "python" and op in ["and", "or"]:
msg = "'BoolOp' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
self.eval(ex)
return
res = self.eval(ex)
exp = eval(ex)
assert res == exp
@pytest.mark.parametrize("rhs", [True, False])
@pytest.mark.parametrize("lhs", [True, False])
@pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS)
def test_bool_ops_with_constants(self, rhs, lhs, op):
ex = f"{lhs} {op} {rhs}"
if parser == "python" and op in ["and", "or"]:
msg = "'BoolOp' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
self.eval(ex)
return
res = self.eval(ex)
exp = eval(ex)
assert res == exp
def test_4d_ndarray_fails(self):
x = np.random.default_rng(2).standard_normal((3, 4, 5, 6))
y = Series(np.random.default_rng(2).standard_normal(10))
msg = "N-dimensional objects, where N > 2, are not supported with eval"
with pytest.raises(NotImplementedError, match=msg):
self.eval("x + y", local_dict={"x": x, "y": y})
def test_constant(self):
x = self.eval("1")
assert x == 1
def test_single_variable(self):
df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)))
df2 = self.eval("df", local_dict={"df": df})
tm.assert_frame_equal(df, df2)
def test_failing_subscript_with_name_error(self):
df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) # noqa: F841
with pytest.raises(NameError, match="name 'x' is not defined"):
self.eval("df[x > 2] > 2")
def test_lhs_expression_subscript(self):
df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
result = self.eval("(df + 1)[df > 2]", local_dict={"df": df})
expected = (df + 1)[df > 2]
tm.assert_frame_equal(result, expected)
def test_attr_expression(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 3)), columns=list("abc")
)
expr1 = "df.a < df.b"
expec1 = df.a < df.b
expr2 = "df.a + df.b + df.c"
expec2 = df.a + df.b + df.c
expr3 = "df.a + df.b + df.c[df.b < 0]"
expec3 = df.a + df.b + df.c[df.b < 0]
exprs = expr1, expr2, expr3
expecs = expec1, expec2, expec3
for e, expec in zip(exprs, expecs):
tm.assert_series_equal(expec, self.eval(e, local_dict={"df": df}))
def test_assignment_fails(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 3)), columns=list("abc")
)
df2 = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
expr1 = "df = df2"
msg = "cannot assign without a target object"
with pytest.raises(ValueError, match=msg):
self.eval(expr1, local_dict={"df": df, "df2": df2})
def test_assignment_column_multiple_raise(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
)
# multiple assignees
with pytest.raises(SyntaxError, match="invalid syntax"):
df.eval("d c = a + b")
def test_assignment_column_invalid_assign(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
)
# invalid assignees
msg = "left hand side of an assignment must be a single name"
with pytest.raises(SyntaxError, match=msg):
df.eval("d,c = a + b")
def test_assignment_column_invalid_assign_function_call(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
)
msg = "cannot assign to function call"
with pytest.raises(SyntaxError, match=msg):
df.eval('Timestamp("20131001") = a + b')
def test_assignment_single_assign_existing(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
)
# single assignment - existing variable
expected = df.copy()
expected["a"] = expected["a"] + expected["b"]
df.eval("a = a + b", inplace=True)
tm.assert_frame_equal(df, expected)
def test_assignment_single_assign_new(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
)
# single assignment - new variable
expected = df.copy()
expected["c"] = expected["a"] + expected["b"]
df.eval("c = a + b", inplace=True)
tm.assert_frame_equal(df, expected)
def test_assignment_single_assign_local_overlap(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
)
df = df.copy()
a = 1 # noqa: F841
df.eval("a = 1 + b", inplace=True)
expected = df.copy()
expected["a"] = 1 + expected["b"]
tm.assert_frame_equal(df, expected)
def test_assignment_single_assign_name(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
)
a = 1 # noqa: F841
old_a = df.a.copy()
df.eval("a = a + b", inplace=True)
result = old_a + df.b
tm.assert_series_equal(result, df.a, check_names=False)
assert result.name is None
def test_assignment_multiple_raises(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
)
# multiple assignment
df.eval("c = a + b", inplace=True)
msg = "can only assign a single expression"
with pytest.raises(SyntaxError, match=msg):
df.eval("c = a = b")
def test_assignment_explicit(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
)
# explicit targets
self.eval("c = df.a + df.b", local_dict={"df": df}, target=df, inplace=True)
expected = df.copy()
expected["c"] = expected["a"] + expected["b"]
tm.assert_frame_equal(df, expected)
def test_column_in(self, engine):
# GH 11235
df = DataFrame({"a": [11], "b": [-32]})
result = df.eval("a in [11, -32]", engine=engine)
expected = Series([True], name="a")
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(reason="Unknown: Omitted test_ in name prior.")
def test_assignment_not_inplace(self):
# see gh-9297
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab")
)
actual = df.eval("c = a + b", inplace=False)
assert actual is not None
expected = df.copy()
expected["c"] = expected["a"] + expected["b"]
tm.assert_frame_equal(df, expected)
def test_multi_line_expression(self):
# GH 11149
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
expected = df.copy()
expected["c"] = expected["a"] + expected["b"]
expected["d"] = expected["c"] + expected["b"]
answer = df.eval(
"""
c = a + b
d = c + b""",
inplace=True,
)
tm.assert_frame_equal(expected, df)
assert answer is None
expected["a"] = expected["a"] - 1
expected["e"] = expected["a"] + 2
answer = df.eval(
"""
a = a - 1
e = a + 2""",
inplace=True,
)
tm.assert_frame_equal(expected, df)
assert answer is None
# multi-line not valid if not all assignments
msg = "Multi-line expressions are only valid if all expressions contain"
with pytest.raises(ValueError, match=msg):
df.eval(
"""
a = b + 2
b - 2""",
inplace=False,
)
def test_multi_line_expression_not_inplace(self):
# GH 11149
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
expected = df.copy()
expected["c"] = expected["a"] + expected["b"]
expected["d"] = expected["c"] + expected["b"]
df = df.eval(
"""
c = a + b
d = c + b""",
inplace=False,
)
tm.assert_frame_equal(expected, df)
expected["a"] = expected["a"] - 1
expected["e"] = expected["a"] + 2
df = df.eval(
"""
a = a - 1
e = a + 2""",
inplace=False,
)
tm.assert_frame_equal(expected, df)
def test_multi_line_expression_local_variable(self):
# GH 15342
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
expected = df.copy()
local_var = 7
expected["c"] = expected["a"] * local_var
expected["d"] = expected["c"] + local_var
answer = df.eval(
"""
c = a * @local_var
d = c + @local_var
""",
inplace=True,
)
tm.assert_frame_equal(expected, df)
assert answer is None
def test_multi_line_expression_callable_local_variable(self):
# 26426
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
def local_func(a, b):
return b
expected = df.copy()
expected["c"] = expected["a"] * local_func(1, 7)
expected["d"] = expected["c"] + local_func(1, 7)
answer = df.eval(
"""
c = a * @local_func(1, 7)
d = c + @local_func(1, 7)
""",
inplace=True,
)
tm.assert_frame_equal(expected, df)
assert answer is None
def test_multi_line_expression_callable_local_variable_with_kwargs(self):
# 26426
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
def local_func(a, b):
return b
expected = df.copy()
expected["c"] = expected["a"] * local_func(b=7, a=1)
expected["d"] = expected["c"] + local_func(b=7, a=1)
answer = df.eval(
"""
c = a * @local_func(b=7, a=1)
d = c + @local_func(b=7, a=1)
""",
inplace=True,
)
tm.assert_frame_equal(expected, df)
assert answer is None
def test_assignment_in_query(self):
# GH 8664
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
df_orig = df.copy()
msg = "cannot assign without a target object"
with pytest.raises(ValueError, match=msg):
df.query("a = 1")
tm.assert_frame_equal(df, df_orig)
def test_query_inplace(self):
# see gh-11149
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
expected = df.copy()
expected = expected[expected["a"] == 2]
df.query("a == 2", inplace=True)
tm.assert_frame_equal(expected, df)
df = {}
expected = {"a": 3}
self.eval("a = 1 + 2", target=df, inplace=True)
tm.assert_dict_equal(df, expected)
@pytest.mark.parametrize("invalid_target", [1, "cat", [1, 2], np.array([]), (1, 3)])
def test_cannot_item_assign(self, invalid_target):
msg = "Cannot assign expression output to target"
expression = "a = 1 + 2"
with pytest.raises(ValueError, match=msg):
self.eval(expression, target=invalid_target, inplace=True)
if hasattr(invalid_target, "copy"):
with pytest.raises(ValueError, match=msg):
self.eval(expression, target=invalid_target, inplace=False)
@pytest.mark.parametrize("invalid_target", [1, "cat", (1, 3)])
def test_cannot_copy_item(self, invalid_target):
msg = "Cannot return a copy of the target"
expression = "a = 1 + 2"
with pytest.raises(ValueError, match=msg):
self.eval(expression, target=invalid_target, inplace=False)
@pytest.mark.parametrize("target", [1, "cat", [1, 2], np.array([]), (1, 3), {1: 2}])
def test_inplace_no_assignment(self, target):
expression = "1 + 2"
assert self.eval(expression, target=target, inplace=False) == 3
msg = "Cannot operate inplace if there is no assignment"
with pytest.raises(ValueError, match=msg):
self.eval(expression, target=target, inplace=True)
def test_basic_period_index_boolean_expression(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((2, 2)),
columns=period_range("2020-01-01", freq="D", periods=2),
)
e = df < 2
r = self.eval("df < 2", local_dict={"df": df})
x = df < 2
tm.assert_frame_equal(r, e)
tm.assert_frame_equal(x, e)
def test_basic_period_index_subscript_expression(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((2, 2)),
columns=period_range("2020-01-01", freq="D", periods=2),
)
r = self.eval("df[df < 2 + 3]", local_dict={"df": df})
e = df[df < 2 + 3]
tm.assert_frame_equal(r, e)
def test_nested_period_index_subscript_expression(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((2, 2)),
columns=period_range("2020-01-01", freq="D", periods=2),
)
r = self.eval("df[df[df < 2] < 2] + df * 2", local_dict={"df": df})
e = df[df[df < 2] < 2] + df * 2
tm.assert_frame_equal(r, e)
def test_date_boolean(self, engine, parser):
df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
df["dates1"] = date_range("1/1/2012", periods=5)
res = self.eval(
"df.dates1 < 20130101",
local_dict={"df": df},
engine=engine,
parser=parser,
)
expec = df.dates1 < "20130101"
tm.assert_series_equal(res, expec)
def test_simple_in_ops(self, engine, parser):
if parser != "python":
res = pd.eval("1 in [1, 2]", engine=engine, parser=parser)
assert res
res = pd.eval("2 in (1, 2)", engine=engine, parser=parser)
assert res
res = pd.eval("3 in (1, 2)", engine=engine, parser=parser)
assert not res
res = pd.eval("3 not in (1, 2)", engine=engine, parser=parser)
assert res
res = pd.eval("[3] not in (1, 2)", engine=engine, parser=parser)
assert res
res = pd.eval("[3] in ([3], 2)", engine=engine, parser=parser)
assert res
res = pd.eval("[[3]] in [[[3]], 2]", engine=engine, parser=parser)
assert res
res = pd.eval("(3,) in [(3,), 2]", engine=engine, parser=parser)
assert res
res = pd.eval("(3,) not in [(3,), 2]", engine=engine, parser=parser)
assert not res
res = pd.eval("[(3,)] in [[(3,)], 2]", engine=engine, parser=parser)
assert res
else:
msg = "'In' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
pd.eval("1 in [1, 2]", engine=engine, parser=parser)
with pytest.raises(NotImplementedError, match=msg):
pd.eval("2 in (1, 2)", engine=engine, parser=parser)
with pytest.raises(NotImplementedError, match=msg):
pd.eval("3 in (1, 2)", engine=engine, parser=parser)
with pytest.raises(NotImplementedError, match=msg):
pd.eval("[(3,)] in (1, 2, [(3,)])", engine=engine, parser=parser)
msg = "'NotIn' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
pd.eval("3 not in (1, 2)", engine=engine, parser=parser)
with pytest.raises(NotImplementedError, match=msg):
pd.eval("[3] not in (1, 2, [[3]])", engine=engine, parser=parser)
def test_check_many_exprs(self, engine, parser):
a = 1 # noqa: F841
expr = " * ".join("a" * 33)
expected = 1
res = pd.eval(expr, engine=engine, parser=parser)
assert res == expected
@pytest.mark.parametrize(
"expr",
[
"df > 2 and df > 3",
"df > 2 or df > 3",
"not df > 2",
],
)
def test_fails_and_or_not(self, expr, engine, parser):
df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
if parser == "python":
msg = "'BoolOp' nodes are not implemented"
if "not" in expr:
msg = "'Not' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
pd.eval(
expr,
local_dict={"df": df},
parser=parser,
engine=engine,
)
else:
# smoke-test, should not raise
pd.eval(
expr,
local_dict={"df": df},
parser=parser,
engine=engine,
)
@pytest.mark.parametrize("char", ["|", "&"])
def test_fails_ampersand_pipe(self, char, engine, parser):
df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) # noqa: F841
ex = f"(df + 2)[df > 1] > 0 {char} (df > 0)"
if parser == "python":
msg = "cannot evaluate scalar only bool ops"
with pytest.raises(NotImplementedError, match=msg):
pd.eval(ex, parser=parser, engine=engine)
else:
# smoke-test, should not raise
pd.eval(ex, parser=parser, engine=engine)
| TestOperations |
python | PyCQA__pylint | tests/functional/a/access/access_to_protected_members.py | {
"start": 3014,
"end": 3141
} | class ____:
"""Test for GitHub issue 1159"""
_foo = 0
def __init__(self):
self._bar = 0
| Issue1159OtherClass |
python | pallets__jinja | tests/test_loader.py | {
"start": 4525,
"end": 7448
} | class ____:
searchpath = (Path(__file__) / ".." / "res" / "templates").resolve()
@staticmethod
def _test_common(env):
tmpl = env.get_template("test.html")
assert tmpl.render().strip() == "BAR"
tmpl = env.get_template("foo/test.html")
assert tmpl.render().strip() == "FOO"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
def test_searchpath_as_str(self):
filesystem_loader = loaders.FileSystemLoader(str(self.searchpath))
env = Environment(loader=filesystem_loader)
self._test_common(env)
def test_searchpath_as_pathlib(self):
filesystem_loader = loaders.FileSystemLoader(self.searchpath)
env = Environment(loader=filesystem_loader)
self._test_common(env)
def test_searchpath_as_list_including_pathlib(self):
filesystem_loader = loaders.FileSystemLoader(
["/tmp/templates", self.searchpath]
)
env = Environment(loader=filesystem_loader)
self._test_common(env)
def test_caches_template_based_on_mtime(self):
filesystem_loader = loaders.FileSystemLoader(self.searchpath)
env = Environment(loader=filesystem_loader)
tmpl1 = env.get_template("test.html")
tmpl2 = env.get_template("test.html")
assert tmpl1 is tmpl2
os.utime(self.searchpath / "test.html", (time.time(), time.time()))
tmpl3 = env.get_template("test.html")
assert tmpl1 is not tmpl3
@pytest.mark.parametrize(
("encoding", "expect"),
[
("utf-8", "文字化け"),
("iso-8859-1", "æ\x96\x87\xe5\xad\x97\xe5\x8c\x96\xe3\x81\x91"),
],
)
def test_uses_specified_encoding(self, encoding, expect):
loader = loaders.FileSystemLoader(self.searchpath, encoding=encoding)
e = Environment(loader=loader)
t = e.get_template("mojibake.txt")
assert t.render() == expect
def test_filename_normpath(self):
"""Nested template names should only contain ``os.sep`` in the
loaded filename.
"""
loader = loaders.FileSystemLoader(self.searchpath)
e = Environment(loader=loader)
t = e.get_template("foo/test.html")
assert t.filename == str(self.searchpath / "foo" / "test.html")
def test_error_includes_paths(self, env, filesystem_loader):
env.loader = filesystem_loader
with pytest.raises(TemplateNotFound) as info:
env.get_template("missing")
e_str = str(info.value)
assert e_str.startswith("'missing' not found in search path: ")
filesystem_loader.searchpath.append("other")
with pytest.raises(TemplateNotFound) as info:
env.get_template("missing")
e_str = str(info.value)
assert e_str.startswith("'missing' not found in search paths: ")
assert ", 'other'" in e_str
| TestFileSystemLoader |
python | getsentry__sentry | src/sentry/sentry_apps/token_exchange/manual_refresher.py | {
"start": 1080,
"end": 6042
} | class ____:
"""
Refreshes an installation's token after validation (i.e client_secret_jwt or client_credentials)
Currently meant for 3rd party integrations to manually refresh their tokens.
"""
install: SentryAppInstallation
client_id: str
user: User
def run(self) -> ApiToken:
with SentryAppInteractionEvent(
operation_type=SentryAppInteractionType.AUTHORIZATIONS,
event_type=SentryAppEventType.MANUAL_REFRESHER,
).capture() as lifecycle:
context = {
"installation_uuid": self.install.uuid,
"client_id": self.application.client_id[:SENSITIVE_CHARACTER_LIMIT],
"sentry_app_id": self.install.sentry_app.id,
}
lifecycle.add_extras(context)
try:
token = None
with transaction.atomic(router.db_for_write(ApiToken)):
self._validate()
self._delete_existing_token()
token = self._create_new_token()
self._record_token_exchange()
return token
except (OutboxDatabaseError, OutboxFlushError) as e:
if token is not None:
logger.warning(
"manual_refresher.outbox-failure",
extra=context,
exc_info=e,
)
self._record_token_exchange()
return token
raise SentryAppSentryError(
message="Failed to refresh given token",
status_code=500,
webhook_context=context,
) from e
except SentryAppIntegratorError as e:
lifecycle.record_halt(halt_reason=e)
raise
def _delete_existing_token(self) -> None:
# An installation must have a token to be refreshed
# Lack of token indicates the api grant hasn't been exchanged yet
if self.installation.api_token is None:
raise SentryAppIntegratorError(
message="Installation does not have a token",
status_code=401,
webhook_context={"installation_uuid": self.install.uuid},
)
self.installation.api_token.delete()
def _record_token_exchange(self) -> None:
analytics.record(
SentryAppTokenExchangedEvent(
sentry_app_installation_id=self.install.id,
exchange_type="manual_refresh",
)
)
logger.info(
"manual_refresher.token_exchange",
extra={
"installation_uuid": self.install.uuid,
"sentry_app_id": self.install.sentry_app.id,
},
)
def _validate(self) -> None:
Validator(install=self.installation, client_id=self.client_id, user=self.user).run()
def _create_new_token(self) -> ApiToken:
token = ApiToken.objects.create(
user=self.user,
application=self.application,
scope_list=self.sentry_app.scope_list,
expires_at=token_expiration(),
)
try:
SentryAppInstallation.objects.get(id=self.install.id).update(api_token=token)
except SentryAppInstallation.DoesNotExist:
pass
return token
@cached_property
def installation(self) -> SentryAppInstallation:
try:
return SentryAppInstallation.objects.get(id=self.install.id)
except SentryAppInstallation.DoesNotExist:
raise SentryAppIntegratorError(
message="Installation not found",
status_code=404,
webhook_context={"installation_uuid": self.install.uuid},
)
@cached_property
def application(self) -> ApiApplication:
try:
return ApiApplication.objects.get(client_id=self.client_id)
except ApiApplication.DoesNotExist:
raise SentryAppSentryError(
message="Could not find matching Application for given client_id",
status_code=401,
webhook_context={
"client_id": self.client_id,
"installation_uuid": self.install.uuid,
},
)
@property
def sentry_app(self) -> SentryApp:
try:
return self.application.sentry_app
except SentryApp.DoesNotExist:
raise SentryAppSentryError(
message="Sentry App does not exist on attached Application",
status_code=401,
webhook_context={
"application_id": self.application.id,
"installation_uuid": self.install.uuid,
"client_id": self.application.client_id[:SENSITIVE_CHARACTER_LIMIT],
},
)
| ManualTokenRefresher |
python | giampaolo__psutil | tests/test_posix.py | {
"start": 3351,
"end": 10358
} | class ____(PsutilTestCase):
"""Compare psutil results against 'ps' command line utility (mainly)."""
@classmethod
def setUpClass(cls):
cls.pid = spawn_subproc(
[PYTHON_EXE, "-E", "-O"], stdin=subprocess.PIPE
).pid
@classmethod
def tearDownClass(cls):
terminate(cls.pid)
def test_ppid(self):
ppid_ps = ps('ppid', self.pid)
ppid_psutil = psutil.Process(self.pid).ppid()
assert ppid_ps == ppid_psutil
def test_uid(self):
uid_ps = ps('uid', self.pid)
uid_psutil = psutil.Process(self.pid).uids().real
assert uid_ps == uid_psutil
def test_gid(self):
gid_ps = ps('rgid', self.pid)
gid_psutil = psutil.Process(self.pid).gids().real
assert gid_ps == gid_psutil
def test_username(self):
username_ps = ps('user', self.pid)
username_psutil = psutil.Process(self.pid).username()
assert username_ps == username_psutil
def test_username_no_resolution(self):
# Emulate a case where the system can't resolve the uid to
# a username in which case psutil is supposed to return
# the stringified uid.
p = psutil.Process()
with mock.patch("psutil.pwd.getpwuid", side_effect=KeyError) as fun:
assert p.username() == str(p.uids().real)
assert fun.called
@skip_on_access_denied()
@retry_on_failure()
def test_rss_memory(self):
# give python interpreter some time to properly initialize
# so that the results are the same
time.sleep(0.1)
rss_ps = ps_rss(self.pid)
rss_psutil = psutil.Process(self.pid).memory_info()[0] / 1024
assert rss_ps == rss_psutil
@skip_on_access_denied()
@retry_on_failure()
def test_vsz_memory(self):
# give python interpreter some time to properly initialize
# so that the results are the same
time.sleep(0.1)
vsz_ps = ps_vsz(self.pid)
vsz_psutil = psutil.Process(self.pid).memory_info()[1] / 1024
assert vsz_ps == vsz_psutil
def test_name(self):
name_ps = ps_name(self.pid)
# remove path if there is any, from the command
name_ps = os.path.basename(name_ps).lower()
name_psutil = psutil.Process(self.pid).name().lower()
# ...because of how we calculate PYTHON_EXE; on MACOS this may
# be "pythonX.Y".
name_ps = re.sub(r"\d.\d", "", name_ps)
name_psutil = re.sub(r"\d.\d", "", name_psutil)
# ...may also be "python.X"
name_ps = re.sub(r"\d", "", name_ps)
name_psutil = re.sub(r"\d", "", name_psutil)
assert name_ps == name_psutil
def test_name_long(self):
# On UNIX the kernel truncates the name to the first 15
# characters. In such a case psutil tries to determine the
# full name from the cmdline.
name = "long-program-name"
cmdline = ["long-program-name-extended", "foo", "bar"]
with mock.patch("psutil._psplatform.Process.name", return_value=name):
with mock.patch(
"psutil._psplatform.Process.cmdline", return_value=cmdline
):
p = psutil.Process()
assert p.name() == "long-program-name-extended"
def test_name_long_cmdline_ad_exc(self):
# Same as above but emulates a case where cmdline() raises
# AccessDenied in which case psutil is supposed to return
# the truncated name instead of crashing.
name = "long-program-name"
with mock.patch("psutil._psplatform.Process.name", return_value=name):
with mock.patch(
"psutil._psplatform.Process.cmdline",
side_effect=psutil.AccessDenied(0, ""),
):
p = psutil.Process()
assert p.name() == "long-program-name"
def test_name_long_cmdline_nsp_exc(self):
# Same as above but emulates a case where cmdline() raises NSP
# which is supposed to propagate.
name = "long-program-name"
with mock.patch("psutil._psplatform.Process.name", return_value=name):
with mock.patch(
"psutil._psplatform.Process.cmdline",
side_effect=psutil.NoSuchProcess(0, ""),
):
p = psutil.Process()
with pytest.raises(psutil.NoSuchProcess):
p.name()
@pytest.mark.skipif(MACOS or BSD, reason="ps -o start not available")
def test_create_time(self):
time_ps = ps('start', self.pid)
time_psutil = psutil.Process(self.pid).create_time()
time_psutil_tstamp = datetime.datetime.fromtimestamp(
time_psutil
).strftime("%H:%M:%S")
# sometimes ps shows the time rounded up instead of down, so we check
# for both possible values
round_time_psutil = round(time_psutil)
round_time_psutil_tstamp = datetime.datetime.fromtimestamp(
round_time_psutil
).strftime("%H:%M:%S")
assert time_ps in {time_psutil_tstamp, round_time_psutil_tstamp}
def test_exe(self):
ps_pathname = ps_name(self.pid)
psutil_pathname = psutil.Process(self.pid).exe()
try:
assert ps_pathname == psutil_pathname
except AssertionError:
# certain platforms such as BSD are more accurate returning:
# "/usr/local/bin/python3.7"
# ...instead of:
# "/usr/local/bin/python"
# We do not want to consider this difference in accuracy
# an error.
adjusted_ps_pathname = ps_pathname[: len(ps_pathname)]
assert ps_pathname == adjusted_ps_pathname
# On macOS the official python installer exposes a python wrapper that
# executes a python executable hidden inside an application bundle inside
# the Python framework.
# There's a race condition between the ps call & the psutil call below
# depending on the completion of the execve call so let's retry on failure
@retry_on_failure()
def test_cmdline(self):
ps_cmdline = ps_args(self.pid)
psutil_cmdline = " ".join(psutil.Process(self.pid).cmdline())
if AARCH64 and len(ps_cmdline) < len(psutil_cmdline):
assert psutil_cmdline.startswith(ps_cmdline)
else:
assert ps_cmdline == psutil_cmdline
# On SUNOS "ps" reads niceness /proc/pid/psinfo which returns an
# incorrect value (20); the real deal is getpriority(2) which
# returns 0; psutil relies on it, see:
# https://github.com/giampaolo/psutil/issues/1082
# AIX has the same issue
@pytest.mark.skipif(SUNOS, reason="not reliable on SUNOS")
@pytest.mark.skipif(AIX, reason="not reliable on AIX")
def test_nice(self):
ps_nice = ps('nice', self.pid)
psutil_nice = psutil.Process().nice()
assert ps_nice == psutil_nice
@pytest.mark.skipif(not POSIX, reason="POSIX only")
| TestProcess |
python | getsentry__sentry | tests/sentry/utils/test_eventuser.py | {
"start": 717,
"end": 20722
} | class ____(APITestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.project = self.create_project(date_added=(timezone.now() - timedelta(hours=2)))
self.event_1 = self.store_event(
data={
"user": {
"id": 1,
"email": "foo@example.com",
"username": "foobar",
"ip_address": "127.0.0.1",
},
"timestamp": before_now(seconds=10).isoformat(),
},
project_id=self.project.id,
)
self.event_2 = self.store_event(
data={
"user": {
"id": 2,
"email": "nisanthan@sentry.io",
"username": "nisanthan",
"ip_address": "1.1.1.1",
},
"timestamp": before_now(seconds=20).isoformat(),
},
project_id=self.project.id,
)
self.event_3 = self.store_event(
data={
"user": {
"id": "myminion",
"email": "minion@universal.com",
"username": "minion",
"ip_address": "8.8.8.8",
},
"timestamp": before_now(seconds=30).isoformat(),
},
project_id=self.project.id,
)
@mock.patch("sentry.analytics.record")
def test_for_projects_query_filter_id(self, mock_record: mock.MagicMock) -> None:
with mock.patch("time.time") as mock_time:
mock_time.return_value = 0
euser = EventUser.for_projects([self.project], {"id": ["2"]}, result_limit=1)
assert len(euser) == 1
assert euser[0].user_ident == self.event_2.data.get("user").get("id")
assert euser[0].email == self.event_2.data.get("user").get("email")
assert_analytics_events_recorded(
mock_record,
[
EventUserSnubaQuery(
project_ids=[self.project.id],
query=f"MATCH (events)\nSELECT project_id, ip_address_v6, ip_address_v4, user_id, user_name, "
f"user_email, max(timestamp) AS `latest_timestamp`\nBY project_id, ip_address_v6, "
f"ip_address_v4, user_id, user_name, user_email\n"
f"WHERE project_id IN array({self.project.id}) AND timestamp < toDateTime('{now.isoformat()}') "
f"AND timestamp >= toDateTime('{(now - timedelta(hours=2)).isoformat()}') "
f"AND user_id IN array('2')\n"
f"ORDER BY latest_timestamp DESC\n"
f"LIMIT 1\nOFFSET 0",
query_try=0,
count_rows_returned=1,
count_rows_filtered=0,
query_time_ms=0,
),
EventUserSnubaForProjects(
project_ids=[self.project.id],
total_tries=1,
total_rows_returned=1,
total_time_ms=0,
),
],
)
def test_for_projects_query_filter_username(self) -> None:
euser = EventUser.for_projects([self.project], {"username": ["minion"]}, result_limit=1)
assert len(euser) == 1
assert euser[0].user_ident == self.event_3.data.get("user").get("id")
assert euser[0].email == self.event_3.data.get("user").get("email")
def test_for_projects_query_filter_email(self) -> None:
euser = EventUser.for_projects(
[self.project], {"email": ["foo@example.com"]}, result_limit=1
)
assert len(euser) == 1
assert euser[0].user_ident == self.event_1.data.get("user").get("id")
assert euser[0].email == self.event_1.data.get("user").get("email")
def test_for_projects_query_filter_ip(self) -> None:
euser = EventUser.for_projects([self.project], {"ip": ["8.8.8.8"]}, result_limit=1)
assert len(euser) == 1
assert euser[0].user_ident == self.event_3.data.get("user").get("id")
assert euser[0].email == self.event_3.data.get("user").get("email")
def test_for_projects_query_multiple_OR_filters(self) -> None:
eusers = EventUser.for_projects(
[self.project],
{"username": ["minion"], "email": ["foo@example.com"]},
filter_boolean=BooleanOp.OR,
)
assert len(eusers) == 2
def test_for_projects_query_multiple_AND_filters(self) -> None:
eusers = EventUser.for_projects(
[self.project],
{"username": ["minion"], "email": ["minion@universal.com"], "ip": ["8.8.8.8"]},
)
assert len(eusers) == 1
assert eusers[0].user_ident == self.event_3.data.get("user").get("id")
assert eusers[0].email == self.event_3.data.get("user").get("email")
def test_for_projects_query_with_multiple_eventuser_entries_different_ips(self) -> None:
for i in range(10):
self.store_event(
data={
"user": {
"id": 2,
"email": "nisanthan@sentry.io",
"username": "nisanthan",
"ip_address": f"1.1.1.{i}",
},
"timestamp": before_now(seconds=30 + i).isoformat(),
},
project_id=self.project.id,
)
eusers = EventUser.for_projects(
[self.project],
{"username": ["nisanthan"]},
filter_boolean=BooleanOp.OR,
)
assert len(eusers) == 1
assert eusers[0].user_ident == self.event_2.data.get("user").get("id")
assert eusers[0].email == self.event_2.data.get("user").get("email")
assert eusers[0].ip_address == self.event_2.data.get("user").get("ip_address")
def test_for_projects_query_with_multiple_eventuser_entries_different_ips_query_by_ip(
self,
) -> None:
for i in range(5):
self.store_event(
data={
"user": {
"id": 2,
"email": "nisanthan@sentry.io",
"username": "nisanthan",
"ip_address": f"1.1.1.{i}",
},
"timestamp": before_now(seconds=30 + i).isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"user": {
"id": "myminion",
"email": "minion@universal.com",
"username": "minion",
"ip_address": f"8.8.8.{i}",
},
"timestamp": before_now(seconds=40 + i).isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"user": {
"id": "gru",
"email": "gru@universal.com",
"username": "gru",
"ip_address": f"2001:0db8:0000:85a3:0000:0000:ac1f:800{i}",
},
"timestamp": before_now(seconds=50 + i).isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"user": {
"id": "scarlet",
"email": "scarlet@universal.com",
"username": "scarlet",
"ip_address": f"2001:db8:0:85a3::ac1f:{i}008",
},
"timestamp": before_now(seconds=60 + i).isoformat(),
},
project_id=self.project.id,
)
eusers = EventUser.for_projects(
[self.project],
{
"ip": [
"2001:0db8:0000:85a3:0000:0000:ac1f:3008",
"2001:db8:0:85a3::ac1f:8001",
"8.8.8.4",
"1.1.1.2",
]
},
filter_boolean=BooleanOp.OR,
)
assert len(eusers) == 4
assert eusers[0].email == "nisanthan@sentry.io"
assert eusers[0].ip_address == "1.1.1.2"
assert eusers[1].email == "minion@universal.com"
assert eusers[1].ip_address == "8.8.8.4"
assert eusers[2].email == "gru@universal.com"
assert eusers[2].ip_address == "2001:db8:0:85a3::ac1f:8001"
assert eusers[3].email == "scarlet@universal.com"
assert eusers[3].ip_address == "2001:db8:0:85a3::ac1f:3008"
@mock.patch("sentry.analytics.record")
def test_for_projects_query_with_multiple_eventuser_entries_different_ips_query_by_username(
self, mock_record
):
for i in range(10):
self.store_event(
data={
"user": {
"id": 2,
"email": "nisanthan@sentry.io",
"username": "nisanthan",
"ip_address": f"1.1.1.{i}",
},
"timestamp": before_now(seconds=30 + i).isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"user": {
"id": "myminion",
"email": "minion@universal.com",
"username": "minion",
"ip_address": f"2001:0db8:0000:85a3:0000:0000:ac1f:800{i}",
},
"timestamp": before_now(seconds=40 + i).isoformat(),
},
project_id=self.project.id,
)
with mock.patch("time.time") as mock_time:
# Define the mock values for time.time()
mock_time.return_value = 1000
eusers = EventUser.for_projects(
[self.project],
{"username": ["nisanthan", "minion"]},
filter_boolean=BooleanOp.OR,
)
assert len(eusers) == 2
assert eusers[0].user_ident == self.event_2.data.get("user").get("id")
assert eusers[0].email == self.event_2.data.get("user").get("email")
assert eusers[0].ip_address == self.event_2.data.get("user").get("ip_address")
assert eusers[1].user_ident == self.event_3.data.get("user").get("id")
assert eusers[1].email == self.event_3.data.get("user").get("email")
assert eusers[1].ip_address == self.event_3.data.get("user").get("ip_address")
assert_analytics_events_recorded(
mock_record,
[
EventUserSnubaQuery(
project_ids=[self.project.id],
query=f"MATCH (events)\nSELECT project_id, ip_address_v6, ip_address_v4, user_id, user_name, "
f"user_email, max(timestamp) AS `latest_timestamp`\nBY project_id, ip_address_v6, "
f"ip_address_v4, user_id, user_name, user_email\n"
f"WHERE project_id IN array({self.project.id}) AND timestamp < toDateTime('{now.isoformat()}') "
f"AND timestamp >= toDateTime('{(now - timedelta(hours=2)).isoformat()}') "
f"AND user_name IN array('nisanthan', 'minion')\n"
f"ORDER BY latest_timestamp DESC",
query_try=0,
count_rows_returned=21,
count_rows_filtered=19,
query_time_ms=0,
),
EventUserSnubaForProjects(
project_ids=[self.project.id],
total_tries=1,
total_rows_returned=2,
total_time_ms=0,
),
],
)
@mock.patch("sentry.utils.eventuser.OVERFETCH_FACTOR", new=2)
@mock.patch("sentry.analytics.record")
def test_for_projects_multiple_query(self, mock_record) -> None:
id_1 = "test1"
email_1 = "test@sentry.io"
for i in range(6):
self.store_event(
data={
"user": {
"id": id_1,
"email": email_1,
"username": "test",
"ip_address": f"1.1.1.{i}",
},
"timestamp": before_now(seconds=30 + i).isoformat(),
},
project_id=self.project.id,
)
id_2 = "test2"
email_2 = "test2@sentry.io"
for i in range(5, 8):
self.store_event(
data={
"user": {
"id": "test2",
"email": email_2,
"username": "test2",
"ip_address": f"2001:0db8:0000:85a3:0000:0000:ac1f:800{i}",
},
"timestamp": before_now(minutes=60 + i).isoformat(),
},
project_id=self.project.id,
)
with mock.patch("time.time") as mock_time:
# Define the mock values for time.time()
mock_time.return_value = 1000
eusers = EventUser.for_projects(
[self.project],
{"username": ["test", "test2"]},
filter_boolean=BooleanOp.OR,
result_limit=2,
)
assert len(eusers) == 2
assert eusers[0].user_ident == id_1
assert eusers[0].email == email_1
assert eusers[0].ip_address == "1.1.1.0"
assert eusers[1].user_ident == id_2
assert eusers[1].email == email_2
assert eusers[1].ip_address == "2001:db8:0:85a3::ac1f:8005"
assert_analytics_events_recorded(
mock_record,
[
EventUserSnubaQuery(
project_ids=[self.project.id],
query=f"MATCH (events)\nSELECT project_id, ip_address_v6, ip_address_v4, user_id, user_name, "
f"user_email, max(timestamp) AS `latest_timestamp`\nBY project_id, ip_address_v6, "
f"ip_address_v4, user_id, user_name, user_email\n"
f"WHERE project_id IN array({self.project.id}) AND timestamp < toDateTime('{now.isoformat()}') "
f"AND timestamp >= toDateTime('{(now - timedelta(hours=2)).isoformat()}') "
f"AND user_name IN array('test', 'test2')\n"
f"ORDER BY latest_timestamp DESC\n"
f"LIMIT 5\nOFFSET 0",
query_try=0,
count_rows_returned=5,
count_rows_filtered=4,
query_time_ms=0,
),
EventUserSnubaQuery(
project_ids=[self.project.id],
query=f"MATCH (events)\nSELECT project_id, ip_address_v6, ip_address_v4, user_id, user_name, "
f"user_email, max(timestamp) AS `latest_timestamp`\nBY project_id, ip_address_v6, "
f"ip_address_v4, user_id, user_name, user_email\n"
f"WHERE project_id IN array({self.project.id}) AND timestamp < toDateTime('{now.isoformat()}') "
f"AND timestamp >= toDateTime('{(now - timedelta(hours=2)).isoformat()}') "
f"AND user_name IN array('test', 'test2')\n"
f"ORDER BY latest_timestamp DESC\n"
f"LIMIT 5\nOFFSET 5",
query_try=1,
count_rows_returned=4,
count_rows_filtered=3,
query_time_ms=0,
),
EventUserSnubaForProjects(
project_ids=[self.project.id],
total_tries=2,
total_rows_returned=2,
total_time_ms=0,
),
],
)
def test_tag_value_primary_is_user_ident(self) -> None:
euser = EventUser.for_projects([self.project], {"id": ["2"]}, result_limit=1)
assert len(euser) == 1
assert euser[0].user_ident == "2"
assert euser[0].tag_value == "id:2"
def test_tag_value_primary_is_username(self) -> None:
self.store_event(
data={
"user": {
"id": None,
"email": "cocoa@universal.com",
"username": "cocoa",
"ip_address": "8.8.8.8",
},
"timestamp": before_now(seconds=30).isoformat(),
},
project_id=self.project.id,
)
euser = EventUser.for_projects([self.project], {"username": ["cocoa"]}, result_limit=1)
assert len(euser) == 1
assert euser[0].user_ident is None
assert euser[0].tag_value == "username:cocoa"
def test_tag_value_primary_is_email(self) -> None:
self.store_event(
data={
"user": {
"id": None,
"email": "cocoa@universal.com",
"username": None,
"ip_address": "8.8.8.8",
},
"timestamp": before_now(seconds=30).isoformat(),
},
project_id=self.project.id,
)
euser = EventUser.for_projects(
[self.project], {"email": ["cocoa@universal.com"]}, result_limit=1
)
assert len(euser) == 1
assert euser[0].user_ident is None
assert euser[0].username is None
assert euser[0].tag_value == "email:cocoa@universal.com"
def test_tag_value_primary_is_ip(self) -> None:
self.store_event(
data={
"user": {
"id": None,
"email": None,
"username": None,
"ip_address": "8.8.8.1",
},
"timestamp": before_now(seconds=30).isoformat(),
},
project_id=self.project.id,
)
euser = EventUser.for_projects([self.project], {"ip": ["8.8.8.1"]}, result_limit=1)
assert len(euser) == 1
assert euser[0].user_ident is None
assert euser[0].username is None
assert euser[0].email is None
assert euser[0].tag_value == "ip:8.8.8.1"
def test_for_tags(self) -> None:
assert EventUser.for_tags(self.project.id, ["id:myminion"]) == {
"id:myminion": EventUser.from_event(self.event_3)
}
assert EventUser.for_tags(self.project.id, ["id:doesnotexist"]) == {}
assert EventUser.for_tags(self.project.id, ["id:myminion", "id:doesnotexist", "id:2"]) == {
"id:myminion": EventUser.from_event(self.event_3),
"id:2": EventUser.from_event(self.event_2),
}
def test_for_tags_large_batch(self) -> None:
"""Test that for_tags handles large batches without query size errors."""
# Create a large list of tag values that would normally cause query size issues
tag_values = []
# Add the existing known values first
tag_values.extend(["id:myminion", "id:2"])
# Add many non-existent values to simulate a large batch
for i in range(MAX_TAG_VALUES_BATCH_SIZE + 1): # More than MAX_TAG_VALUES_BATCH_SIZE (100)
tag_values.append(f"id:nonexistent{i}")
# This should work without raising SnubaError about query size
result = EventUser.for_tags(self.project.id, tag_values)
# We should get the existing users and empty results for non-existent ones
expected = {
"id:myminion": EventUser.from_event(self.event_3),
"id:2": EventUser.from_event(self.event_2),
}
assert result == expected
| EventUserTestCase |
python | Netflix__metaflow | metaflow/_vendor/yaml/scanner.py | {
"start": 906,
"end": 51277
} | class ____:
def __init__(self):
"""Initialize the scanner."""
# It is assumed that Scanner and Reader will have a common descendant.
# Reader do the dirty work of checking for BOM and converting the
# input data to Unicode. It also adds NUL to the end.
#
# Reader supports the following methods
# self.peek(i=0) # peek the next i-th character
# self.prefix(l=1) # peek the next l characters
# self.forward(l=1) # read the next l characters and move the pointer.
# Had we reached the end of the stream?
self.done = False
# The number of unclosed '{' and '['. `flow_level == 0` means block
# context.
self.flow_level = 0
# List of processed tokens that are not yet emitted.
self.tokens = []
# Add the STREAM-START token.
self.fetch_stream_start()
# Number of tokens that were emitted through the `get_token` method.
self.tokens_taken = 0
# The current indentation level.
self.indent = -1
# Past indentation levels.
self.indents = []
# Variables related to simple keys treatment.
# A simple key is a key that is not denoted by the '?' indicator.
# Example of simple keys:
# ---
# block simple key: value
# ? not a simple key:
# : { flow simple key: value }
# We emit the KEY token before all keys, so when we find a potential
# simple key, we try to locate the corresponding ':' indicator.
# Simple keys should be limited to a single line and 1024 characters.
# Can a simple key start at the current position? A simple key may
# start:
# - at the beginning of the line, not counting indentation spaces
# (in block context),
# - after '{', '[', ',' (in the flow context),
# - after '?', ':', '-' (in the block context).
# In the block context, this flag also signifies if a block collection
# may start at the current position.
self.allow_simple_key = True
# Keep track of possible simple keys. This is a dictionary. The key
# is `flow_level`; there can be no more that one possible simple key
# for each level. The value is a SimpleKey record:
# (token_number, required, index, line, column, mark)
# A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
# '[', or '{' tokens.
self.possible_simple_keys = {}
# Public methods.
def check_token(self, *choices):
# Check if the next token is one of the given types.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
if not choices:
return True
for choice in choices:
if isinstance(self.tokens[0], choice):
return True
return False
def peek_token(self):
# Return the next token, but do not delete if from the queue.
# Return None if no more tokens.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
return self.tokens[0]
else:
return None
def get_token(self):
# Return the next token.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
self.tokens_taken += 1
return self.tokens.pop(0)
# Private methods.
def need_more_tokens(self):
if self.done:
return False
if not self.tokens:
return True
# The current token may be a potential simple key, so we
# need to look further.
self.stale_possible_simple_keys()
if self.next_possible_simple_key() == self.tokens_taken:
return True
def fetch_more_tokens(self):
# Eat whitespaces and comments until we reach the next token.
self.scan_to_next_token()
# Remove obsolete possible simple keys.
self.stale_possible_simple_keys()
# Compare the current indentation and column. It may add some tokens
# and decrease the current indentation level.
self.unwind_indent(self.column)
# Peek the next character.
ch = self.peek()
# Is it the end of stream?
if ch == '\0':
return self.fetch_stream_end()
# Is it a directive?
if ch == '%' and self.check_directive():
return self.fetch_directive()
# Is it the document start?
if ch == '-' and self.check_document_start():
return self.fetch_document_start()
# Is it the document end?
if ch == '.' and self.check_document_end():
return self.fetch_document_end()
# TODO: support for BOM within a stream.
#if ch == '\uFEFF':
# return self.fetch_bom() <-- issue BOMToken
# Note: the order of the following checks is NOT significant.
# Is it the flow sequence start indicator?
if ch == '[':
return self.fetch_flow_sequence_start()
# Is it the flow mapping start indicator?
if ch == '{':
return self.fetch_flow_mapping_start()
# Is it the flow sequence end indicator?
if ch == ']':
return self.fetch_flow_sequence_end()
# Is it the flow mapping end indicator?
if ch == '}':
return self.fetch_flow_mapping_end()
# Is it the flow entry indicator?
if ch == ',':
return self.fetch_flow_entry()
# Is it the block entry indicator?
if ch == '-' and self.check_block_entry():
return self.fetch_block_entry()
# Is it the key indicator?
if ch == '?' and self.check_key():
return self.fetch_key()
# Is it the value indicator?
if ch == ':' and self.check_value():
return self.fetch_value()
# Is it an alias?
if ch == '*':
return self.fetch_alias()
# Is it an anchor?
if ch == '&':
return self.fetch_anchor()
# Is it a tag?
if ch == '!':
return self.fetch_tag()
# Is it a literal scalar?
if ch == '|' and not self.flow_level:
return self.fetch_literal()
# Is it a folded scalar?
if ch == '>' and not self.flow_level:
return self.fetch_folded()
# Is it a single quoted scalar?
if ch == '\'':
return self.fetch_single()
# Is it a double quoted scalar?
if ch == '\"':
return self.fetch_double()
# It must be a plain scalar then.
if self.check_plain():
return self.fetch_plain()
# No? It's an error. Let's produce a nice error message.
raise ScannerError("while scanning for the next token", None,
"found character %r that cannot start any token" % ch,
self.get_mark())
# Simple keys treatment.
def next_possible_simple_key(self):
# Return the number of the nearest possible simple key. Actually we
# don't need to loop through the whole dictionary. We may replace it
# with the following code:
# if not self.possible_simple_keys:
# return None
# return self.possible_simple_keys[
# min(self.possible_simple_keys.keys())].token_number
min_token_number = None
for level in self.possible_simple_keys:
key = self.possible_simple_keys[level]
if min_token_number is None or key.token_number < min_token_number:
min_token_number = key.token_number
return min_token_number
def stale_possible_simple_keys(self):
# Remove entries that are no longer possible simple keys. According to
# the YAML specification, simple keys
# - should be limited to a single line,
# - should be no longer than 1024 characters.
# Disabling this procedure will allow simple keys of any length and
# height (may cause problems if indentation is broken though).
for level in list(self.possible_simple_keys):
key = self.possible_simple_keys[level]
if key.line != self.line \
or self.index-key.index > 1024:
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not find expected ':'", self.get_mark())
del self.possible_simple_keys[level]
def save_possible_simple_key(self):
# The next token may start a simple key. We check if it's possible
# and save its position. This function is called for
# ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
# Check if a simple key is required at the current position.
required = not self.flow_level and self.indent == self.column
# The next token might be a simple key. Let's save it's number and
# position.
if self.allow_simple_key:
self.remove_possible_simple_key()
token_number = self.tokens_taken+len(self.tokens)
key = SimpleKey(token_number, required,
self.index, self.line, self.column, self.get_mark())
self.possible_simple_keys[self.flow_level] = key
def remove_possible_simple_key(self):
# Remove the saved possible key position at the current flow level.
if self.flow_level in self.possible_simple_keys:
key = self.possible_simple_keys[self.flow_level]
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not find expected ':'", self.get_mark())
del self.possible_simple_keys[self.flow_level]
# Indentation functions.
def unwind_indent(self, column):
## In flow context, tokens should respect indentation.
## Actually the condition should be `self.indent >= column` according to
## the spec. But this condition will prohibit intuitively correct
## constructions such as
## key : {
## }
#if self.flow_level and self.indent > column:
# raise ScannerError(None, None,
# "invalid indentation or unclosed '[' or '{'",
# self.get_mark())
# In the flow context, indentation is ignored. We make the scanner less
# restrictive then specification requires.
if self.flow_level:
return
# In block context, we may need to issue the BLOCK-END tokens.
while self.indent > column:
mark = self.get_mark()
self.indent = self.indents.pop()
self.tokens.append(BlockEndToken(mark, mark))
def add_indent(self, column):
# Check if we need to increase indentation.
if self.indent < column:
self.indents.append(self.indent)
self.indent = column
return True
return False
# Fetchers.
def fetch_stream_start(self):
# We always add STREAM-START as the first token and STREAM-END as the
# last token.
# Read the token.
mark = self.get_mark()
# Add STREAM-START.
self.tokens.append(StreamStartToken(mark, mark,
encoding=self.encoding))
def fetch_stream_end(self):
# Set the current indentation to -1.
self.unwind_indent(-1)
# Reset simple keys.
self.remove_possible_simple_key()
self.allow_simple_key = False
self.possible_simple_keys = {}
# Read the token.
mark = self.get_mark()
# Add STREAM-END.
self.tokens.append(StreamEndToken(mark, mark))
# The steam is finished.
self.done = True
def fetch_directive(self):
# Set the current indentation to -1.
self.unwind_indent(-1)
# Reset simple keys.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Scan and add DIRECTIVE.
self.tokens.append(self.scan_directive())
def fetch_document_start(self):
self.fetch_document_indicator(DocumentStartToken)
def fetch_document_end(self):
self.fetch_document_indicator(DocumentEndToken)
def fetch_document_indicator(self, TokenClass):
# Set the current indentation to -1.
self.unwind_indent(-1)
# Reset simple keys. Note that there could not be a block collection
# after '---'.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Add DOCUMENT-START or DOCUMENT-END.
start_mark = self.get_mark()
self.forward(3)
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_start(self):
self.fetch_flow_collection_start(FlowSequenceStartToken)
def fetch_flow_mapping_start(self):
self.fetch_flow_collection_start(FlowMappingStartToken)
def fetch_flow_collection_start(self, TokenClass):
# '[' and '{' may start a simple key.
self.save_possible_simple_key()
# Increase the flow level.
self.flow_level += 1
# Simple keys are allowed after '[' and '{'.
self.allow_simple_key = True
# Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_end(self):
self.fetch_flow_collection_end(FlowSequenceEndToken)
def fetch_flow_mapping_end(self):
self.fetch_flow_collection_end(FlowMappingEndToken)
def fetch_flow_collection_end(self, TokenClass):
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Decrease the flow level.
self.flow_level -= 1
# No simple keys after ']' or '}'.
self.allow_simple_key = False
# Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_entry(self):
# Simple keys are allowed after ','.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add FLOW-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(FlowEntryToken(start_mark, end_mark))
def fetch_block_entry(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a new entry?
if not self.allow_simple_key:
raise ScannerError(None, None,
"sequence entries are not allowed here",
self.get_mark())
# We may need to add BLOCK-SEQUENCE-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockSequenceStartToken(mark, mark))
# It's an error for the block entry to occur in the flow context,
# but we let the parser detect this.
else:
pass
# Simple keys are allowed after '-'.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add BLOCK-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(BlockEntryToken(start_mark, end_mark))
def fetch_key(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a key (not necessary a simple)?
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping keys are not allowed here",
self.get_mark())
# We may need to add BLOCK-MAPPING-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after '?' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add KEY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(KeyToken(start_mark, end_mark))
def fetch_value(self):
# Do we determine a simple key?
if self.flow_level in self.possible_simple_keys:
# Add KEY.
key = self.possible_simple_keys[self.flow_level]
del self.possible_simple_keys[self.flow_level]
self.tokens.insert(key.token_number-self.tokens_taken,
KeyToken(key.mark, key.mark))
# If this key starts a new block mapping, we need to add
# BLOCK-MAPPING-START.
if not self.flow_level:
if self.add_indent(key.column):
self.tokens.insert(key.token_number-self.tokens_taken,
BlockMappingStartToken(key.mark, key.mark))
# There cannot be two simple keys one after another.
self.allow_simple_key = False
# It must be a part of a complex key.
else:
# Block context needs additional checks.
# (Do we really need them? They will be caught by the parser
# anyway.)
if not self.flow_level:
# We are allowed to start a complex value if and only if
# we can start a simple key.
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping values are not allowed here",
self.get_mark())
# If this value starts a new block mapping, we need to add
# BLOCK-MAPPING-START. It will be detected as an error later by
# the parser.
if not self.flow_level:
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after ':' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add VALUE.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(ValueToken(start_mark, end_mark))
def fetch_alias(self):
# ALIAS could be a simple key.
self.save_possible_simple_key()
# No simple keys after ALIAS.
self.allow_simple_key = False
# Scan and add ALIAS.
self.tokens.append(self.scan_anchor(AliasToken))
def fetch_anchor(self):
# ANCHOR could start a simple key.
self.save_possible_simple_key()
# No simple keys after ANCHOR.
self.allow_simple_key = False
# Scan and add ANCHOR.
self.tokens.append(self.scan_anchor(AnchorToken))
def fetch_tag(self):
# TAG could start a simple key.
self.save_possible_simple_key()
# No simple keys after TAG.
self.allow_simple_key = False
# Scan and add TAG.
self.tokens.append(self.scan_tag())
def fetch_literal(self):
self.fetch_block_scalar(style='|')
def fetch_folded(self):
self.fetch_block_scalar(style='>')
def fetch_block_scalar(self, style):
# A simple key may follow a block scalar.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Scan and add SCALAR.
self.tokens.append(self.scan_block_scalar(style))
def fetch_single(self):
self.fetch_flow_scalar(style='\'')
def fetch_double(self):
self.fetch_flow_scalar(style='"')
def fetch_flow_scalar(self, style):
# A flow scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after flow scalars.
self.allow_simple_key = False
# Scan and add SCALAR.
self.tokens.append(self.scan_flow_scalar(style))
def fetch_plain(self):
# A plain scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after plain scalars. But note that `scan_plain` will
# change this flag if the scan is finished at the beginning of the
# line.
self.allow_simple_key = False
# Scan and add SCALAR. May change `allow_simple_key`.
self.tokens.append(self.scan_plain())
# Checkers.
def check_directive(self):
# DIRECTIVE: ^ '%' ...
# The '%' indicator is already checked.
if self.column == 0:
return True
def check_document_start(self):
# DOCUMENT-START: ^ '---' (' '|'\n')
if self.column == 0:
if self.prefix(3) == '---' \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return True
def check_document_end(self):
# DOCUMENT-END: ^ '...' (' '|'\n')
if self.column == 0:
if self.prefix(3) == '...' \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return True
def check_block_entry(self):
# BLOCK-ENTRY: '-' (' '|'\n')
return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
def check_key(self):
# KEY(flow context): '?'
if self.flow_level:
return True
# KEY(block context): '?' (' '|'\n')
else:
return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
def check_value(self):
# VALUE(flow context): ':'
if self.flow_level:
return True
# VALUE(block context): ':' (' '|'\n')
else:
return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
def check_plain(self):
# A plain scalar may start with any non-space character except:
# '-', '?', ':', ',', '[', ']', '{', '}',
# '#', '&', '*', '!', '|', '>', '\'', '\"',
# '%', '@', '`'.
#
# It may also start with
# '-', '?', ':'
# if it is followed by a non-space character.
#
# Note that we limit the last rule to the block context (except the
# '-' character) because we want the flow context to be space
# independent.
ch = self.peek()
return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
and (ch == '-' or (not self.flow_level and ch in '?:')))
# Scanners.
def scan_to_next_token(self):
# We ignore spaces, line breaks and comments.
# If we find a line break in the block context, we set the flag
# `allow_simple_key` on.
# The byte order mark is stripped if it's the first character in the
# stream. We do not yet support BOM inside the stream as the
# specification requires. Any such mark will be considered as a part
# of the document.
#
# TODO: We need to make tab handling rules more sane. A good rule is
# Tabs cannot precede tokens
# BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
# KEY(block), VALUE(block), BLOCK-ENTRY
# So the checking code is
# if <TAB>:
# self.allow_simple_keys = False
# We also need to add the check for `allow_simple_keys == True` to
# `unwind_indent` before issuing BLOCK-END.
# Scanners for block, flow, and plain scalars need to be modified.
if self.index == 0 and self.peek() == '\uFEFF':
self.forward()
found = False
while not found:
while self.peek() == ' ':
self.forward()
if self.peek() == '#':
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
if self.scan_line_break():
if not self.flow_level:
self.allow_simple_key = True
else:
found = True
def scan_directive(self):
# See the specification for details.
start_mark = self.get_mark()
self.forward()
name = self.scan_directive_name(start_mark)
value = None
if name == 'YAML':
value = self.scan_yaml_directive_value(start_mark)
end_mark = self.get_mark()
elif name == 'TAG':
value = self.scan_tag_directive_value(start_mark)
end_mark = self.get_mark()
else:
end_mark = self.get_mark()
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
self.scan_directive_ignored_line(start_mark)
return DirectiveToken(name, value, start_mark, end_mark)
def scan_directive_name(self, start_mark):
# See the specification for details.
length = 0
ch = self.peek(length)
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
return value
def scan_yaml_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
major = self.scan_yaml_directive_number(start_mark)
if self.peek() != '.':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or '.', but found %r" % self.peek(),
self.get_mark())
self.forward()
minor = self.scan_yaml_directive_number(start_mark)
if self.peek() not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or ' ', but found %r" % self.peek(),
self.get_mark())
return (major, minor)
def scan_yaml_directive_number(self, start_mark):
# See the specification for details.
ch = self.peek()
if not ('0' <= ch <= '9'):
raise ScannerError("while scanning a directive", start_mark,
"expected a digit, but found %r" % ch, self.get_mark())
length = 0
while '0' <= self.peek(length) <= '9':
length += 1
value = int(self.prefix(length))
self.forward(length)
return value
def scan_tag_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
handle = self.scan_tag_directive_handle(start_mark)
while self.peek() == ' ':
self.forward()
prefix = self.scan_tag_directive_prefix(start_mark)
return (handle, prefix)
def scan_tag_directive_handle(self, start_mark):
# See the specification for details.
value = self.scan_tag_handle('directive', start_mark)
ch = self.peek()
if ch != ' ':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch, self.get_mark())
return value
def scan_tag_directive_prefix(self, start_mark):
# See the specification for details.
value = self.scan_tag_uri('directive', start_mark)
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch, self.get_mark())
return value
def scan_directive_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
if self.peek() == '#':
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in '\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a comment or a line break, but found %r"
% ch, self.get_mark())
self.scan_line_break()
def scan_anchor(self, TokenClass):
# The specification does not restrict characters for anchors and
# aliases. This may lead to problems, for instance, the document:
# [ *alias, value ]
# can be interpreted in two ways, as
# [ "value" ]
# and
# [ *alias , "value" ]
# Therefore we restrict aliases to numbers and ASCII letters.
start_mark = self.get_mark()
indicator = self.peek()
if indicator == '*':
name = 'alias'
else:
name = 'anchor'
self.forward()
length = 0
ch = self.peek(length)
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch, self.get_mark())
end_mark = self.get_mark()
return TokenClass(value, start_mark, end_mark)
def scan_tag(self):
# See the specification for details.
start_mark = self.get_mark()
ch = self.peek(1)
if ch == '<':
handle = None
self.forward(2)
suffix = self.scan_tag_uri('tag', start_mark)
if self.peek() != '>':
raise ScannerError("while parsing a tag", start_mark,
"expected '>', but found %r" % self.peek(),
self.get_mark())
self.forward()
elif ch in '\0 \t\r\n\x85\u2028\u2029':
handle = None
suffix = '!'
self.forward()
else:
length = 1
use_handle = False
while ch not in '\0 \r\n\x85\u2028\u2029':
if ch == '!':
use_handle = True
break
length += 1
ch = self.peek(length)
handle = '!'
if use_handle:
handle = self.scan_tag_handle('tag', start_mark)
else:
handle = '!'
self.forward()
suffix = self.scan_tag_uri('tag', start_mark)
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a tag", start_mark,
"expected ' ', but found %r" % ch, self.get_mark())
value = (handle, suffix)
end_mark = self.get_mark()
return TagToken(value, start_mark, end_mark)
def scan_block_scalar(self, style):
# See the specification for details.
if style == '>':
folded = True
else:
folded = False
chunks = []
start_mark = self.get_mark()
# Scan the header.
self.forward()
chomping, increment = self.scan_block_scalar_indicators(start_mark)
self.scan_block_scalar_ignored_line(start_mark)
# Determine the indentation level and go to the first non-empty line.
min_indent = self.indent+1
if min_indent < 1:
min_indent = 1
if increment is None:
breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
indent = max(min_indent, max_indent)
else:
indent = min_indent+increment-1
breaks, end_mark = self.scan_block_scalar_breaks(indent)
line_break = ''
# Scan the inner part of the block scalar.
while self.column == indent and self.peek() != '\0':
chunks.extend(breaks)
leading_non_space = self.peek() not in ' \t'
length = 0
while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
length += 1
chunks.append(self.prefix(length))
self.forward(length)
line_break = self.scan_line_break()
breaks, end_mark = self.scan_block_scalar_breaks(indent)
if self.column == indent and self.peek() != '\0':
# Unfortunately, folding rules are ambiguous.
#
# This is the folding according to the specification:
if folded and line_break == '\n' \
and leading_non_space and self.peek() not in ' \t':
if not breaks:
chunks.append(' ')
else:
chunks.append(line_break)
# This is Clark Evans's interpretation (also in the spec
# examples):
#
#if folded and line_break == '\n':
# if not breaks:
# if self.peek() not in ' \t':
# chunks.append(' ')
# else:
# chunks.append(line_break)
#else:
# chunks.append(line_break)
else:
break
# Chomp the tail.
if chomping is not False:
chunks.append(line_break)
if chomping is True:
chunks.extend(breaks)
# We are done.
return ScalarToken(''.join(chunks), False, start_mark, end_mark,
style)
def scan_block_scalar_indicators(self, start_mark):
# See the specification for details.
chomping = None
increment = None
ch = self.peek()
if ch in '+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch in '0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
elif ch in '0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
ch = self.peek()
if ch in '+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected chomping or indentation indicators, but found %r"
% ch, self.get_mark())
return chomping, increment
def scan_block_scalar_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == ' ':
self.forward()
if self.peek() == '#':
while self.peek() not in '\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in '\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected a comment or a line break, but found %r" % ch,
self.get_mark())
self.scan_line_break()
def scan_block_scalar_indentation(self):
# See the specification for details.
chunks = []
max_indent = 0
end_mark = self.get_mark()
while self.peek() in ' \r\n\x85\u2028\u2029':
if self.peek() != ' ':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
else:
self.forward()
if self.column > max_indent:
max_indent = self.column
return chunks, max_indent, end_mark
def scan_block_scalar_breaks(self, indent):
# See the specification for details.
chunks = []
end_mark = self.get_mark()
while self.column < indent and self.peek() == ' ':
self.forward()
while self.peek() in '\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
while self.column < indent and self.peek() == ' ':
self.forward()
return chunks, end_mark
def scan_flow_scalar(self, style):
# See the specification for details.
# Note that we loose indentation rules for quoted scalars. Quoted
# scalars don't need to adhere indentation because " and ' clearly
# mark the beginning and the end of them. Therefore we are less
# restrictive then the specification requires. We only need to check
# that document separators are not included in scalars.
if style == '"':
double = True
else:
double = False
chunks = []
start_mark = self.get_mark()
quote = self.peek()
self.forward()
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
while self.peek() != quote:
chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
self.forward()
end_mark = self.get_mark()
return ScalarToken(''.join(chunks), False, start_mark, end_mark,
style)
ESCAPE_REPLACEMENTS = {
'0': '\0',
'a': '\x07',
'b': '\x08',
't': '\x09',
'\t': '\x09',
'n': '\x0A',
'v': '\x0B',
'f': '\x0C',
'r': '\x0D',
'e': '\x1B',
' ': '\x20',
'\"': '\"',
'\\': '\\',
'/': '/',
'N': '\x85',
'_': '\xA0',
'L': '\u2028',
'P': '\u2029',
}
ESCAPE_CODES = {
'x': 2,
'u': 4,
'U': 8,
}
def scan_flow_scalar_non_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
length = 0
while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
length += 1
if length:
chunks.append(self.prefix(length))
self.forward(length)
ch = self.peek()
if not double and ch == '\'' and self.peek(1) == '\'':
chunks.append('\'')
self.forward(2)
elif (double and ch == '\'') or (not double and ch in '\"\\'):
chunks.append(ch)
self.forward()
elif double and ch == '\\':
self.forward()
ch = self.peek()
if ch in self.ESCAPE_REPLACEMENTS:
chunks.append(self.ESCAPE_REPLACEMENTS[ch])
self.forward()
elif ch in self.ESCAPE_CODES:
length = self.ESCAPE_CODES[ch]
self.forward()
for k in range(length):
if self.peek(k) not in '0123456789ABCDEFabcdef':
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"expected escape sequence of %d hexdecimal numbers, but found %r" %
(length, self.peek(k)), self.get_mark())
code = int(self.prefix(length), 16)
chunks.append(chr(code))
self.forward(length)
elif ch in '\r\n\x85\u2028\u2029':
self.scan_line_break()
chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
else:
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"found unknown escape character %r" % ch, self.get_mark())
else:
return chunks
def scan_flow_scalar_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
length = 0
while self.peek(length) in ' \t':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch == '\0':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected end of stream", self.get_mark())
elif ch in '\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
breaks = self.scan_flow_scalar_breaks(double, start_mark)
if line_break != '\n':
chunks.append(line_break)
elif not breaks:
chunks.append(' ')
chunks.extend(breaks)
else:
chunks.append(whitespaces)
return chunks
def scan_flow_scalar_breaks(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
# Instead of checking indentation, we check for document
# separators.
prefix = self.prefix(3)
if (prefix == '---' or prefix == '...') \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected document separator", self.get_mark())
while self.peek() in ' \t':
self.forward()
if self.peek() in '\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
else:
return chunks
def scan_plain(self):
# See the specification for details.
# We add an additional restriction for the flow context:
# plain scalars in the flow context cannot contain ',' or '?'.
# We also keep track of the `allow_simple_key` flag here.
# Indentation rules are loosed for the flow context.
chunks = []
start_mark = self.get_mark()
end_mark = start_mark
indent = self.indent+1
# We allow zero indentation for scalars, but then we need to check for
# document separators at the beginning of the line.
#if indent == 0:
# indent = 1
spaces = []
while True:
length = 0
if self.peek() == '#':
break
while True:
ch = self.peek(length)
if ch in '\0 \t\r\n\x85\u2028\u2029' \
or (ch == ':' and
self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029'
+ (u',[]{}' if self.flow_level else u''))\
or (self.flow_level and ch in ',?[]{}'):
break
length += 1
if length == 0:
break
self.allow_simple_key = False
chunks.extend(spaces)
chunks.append(self.prefix(length))
self.forward(length)
end_mark = self.get_mark()
spaces = self.scan_plain_spaces(indent, start_mark)
if not spaces or self.peek() == '#' \
or (not self.flow_level and self.column < indent):
break
return ScalarToken(''.join(chunks), True, start_mark, end_mark)
def scan_plain_spaces(self, indent, start_mark):
# See the specification for details.
# The specification is really confusing about tabs in plain scalars.
# We just forbid them completely. Do not use tabs in YAML!
chunks = []
length = 0
while self.peek(length) in ' ':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch in '\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
self.allow_simple_key = True
prefix = self.prefix(3)
if (prefix == '---' or prefix == '...') \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return
breaks = []
while self.peek() in ' \r\n\x85\u2028\u2029':
if self.peek() == ' ':
self.forward()
else:
breaks.append(self.scan_line_break())
prefix = self.prefix(3)
if (prefix == '---' or prefix == '...') \
and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
return
if line_break != '\n':
chunks.append(line_break)
elif not breaks:
chunks.append(' ')
chunks.extend(breaks)
elif whitespaces:
chunks.append(whitespaces)
return chunks
def scan_tag_handle(self, name, start_mark):
# See the specification for details.
# For some strange reasons, the specification does not allow '_' in
# tag handles. I have allowed it anyway.
ch = self.peek()
if ch != '!':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch, self.get_mark())
length = 1
ch = self.peek(length)
if ch != ' ':
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_':
length += 1
ch = self.peek(length)
if ch != '!':
self.forward(length)
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch, self.get_mark())
length += 1
value = self.prefix(length)
self.forward(length)
return value
def scan_tag_uri(self, name, start_mark):
# See the specification for details.
# Note: we do not check if URI is well-formed.
chunks = []
length = 0
ch = self.peek(length)
while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-;/?:@&=+$,_.!~*\'()[]%':
if ch == '%':
chunks.append(self.prefix(length))
self.forward(length)
length = 0
chunks.append(self.scan_uri_escapes(name, start_mark))
else:
length += 1
ch = self.peek(length)
if length:
chunks.append(self.prefix(length))
self.forward(length)
length = 0
if not chunks:
raise ScannerError("while parsing a %s" % name, start_mark,
"expected URI, but found %r" % ch, self.get_mark())
return ''.join(chunks)
def scan_uri_escapes(self, name, start_mark):
# See the specification for details.
codes = []
mark = self.get_mark()
while self.peek() == '%':
self.forward()
for k in range(2):
if self.peek(k) not in '0123456789ABCDEFabcdef':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected URI escape sequence of 2 hexdecimal numbers, but found %r"
% self.peek(k), self.get_mark())
codes.append(int(self.prefix(2), 16))
self.forward(2)
try:
value = bytes(codes).decode('utf-8')
except UnicodeDecodeError as exc:
raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
return value
def scan_line_break(self):
# Transforms:
# '\r\n' : '\n'
# '\r' : '\n'
# '\n' : '\n'
# '\x85' : '\n'
# '\u2028' : '\u2028'
# '\u2029 : '\u2029'
# default : ''
ch = self.peek()
if ch in '\r\n\x85':
if self.prefix(2) == '\r\n':
self.forward(2)
else:
self.forward()
return '\n'
elif ch in '\u2028\u2029':
self.forward()
return ch
return ''
| Scanner |
python | prabhupant__python-ds | data_structures/bst/diameter.py | {
"start": 0,
"end": 841
} | class ____():
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def height(root, ans):
if not root:
return 0
lheight = height(root.left, ans)
rheight = height(root.right, ans)
# Diameter is basically the max of (1 + lheight + rheight)
# So we are storing it here to reduce calling it again
# O(n)
ans[0] = max(ans[0], 1 + lheight + rheight)
return 1 + max(lheight, rheight)
def diameter(root):
if not root:
return 0
ans =[-99999999999]
h = height(root, ans)
return ans[0]
if __name__ == '__main__':
root = Node(5)
root.left = Node(3)
root.right = Node(7)
root.left.left = Node(2)
root.left.right = Node(4)
root.right.right = Node(8)
root.right.left = Node(6)
print(diameter(root))
| Node |
python | sqlalchemy__sqlalchemy | test/base/test_utils.py | {
"start": 100157,
"end": 101790
} | class ____(fixtures.TestBase):
__requires__ = ("cextensions",)
def test_all_cyext_imported(self):
ext = _all_cython_modules()
lib_folder = (Path(__file__).parent / ".." / ".." / "lib").resolve()
sa_folder = lib_folder / "sqlalchemy"
cython_files = [f.resolve() for f in sa_folder.glob("**/*_cy.py")]
eq_(len(ext), len(cython_files))
names = {
".".join(f.relative_to(lib_folder).parts).replace(".py", "")
for f in cython_files
}
eq_({m.__name__ for m in ext}, set(names))
@testing.combinations(*_all_cython_modules())
def test_load_uncompiled_module(self, module):
is_true(module._is_compiled())
py_module = langhelpers.load_uncompiled_module(module)
is_false(py_module._is_compiled())
eq_(py_module.__name__, module.__name__)
eq_(py_module.__package__, module.__package__)
def test_setup_defines_all_files(self):
try:
import setuptools # noqa: F401
except ImportError:
testing.skip_test("setuptools is required")
with (
mock.patch("setuptools.setup", mock.MagicMock()),
mock.patch.dict(
"os.environ",
{"DISABLE_SQLALCHEMY_CEXT": "", "REQUIRE_SQLALCHEMY_CEXT": ""},
),
):
import setup
setup_modules = {f"sqlalchemy.{m}" for m in setup.CYTHON_MODULES}
expected = {e.__name__ for e in _all_cython_modules()}
print(expected)
print(setup_modules)
eq_(setup_modules, expected)
| CyExtensionTest |
python | spyder-ide__spyder | spyder/utils/snippets/nodes.py | {
"start": 9568,
"end": 10535
} | class ____(TabstopSnippetNode):
"""
Node that represents an int tabstop choice snippet.
This node represents the expression ${int:|options|}, where options are
text sequences separated by comma.
"""
KIND = SnippetKind.CHOICE
def __init__(self, number, *choices):
TabstopSnippetNode.__init__(self, number, choices[0])
self.current_choice = choices[0]
self.choices = choices
def update(self, choice):
if choice not in self.choices:
# TODO: Maybe we should display this as a warning
# instead of raising an exception.
raise LookupError('Choice {0} is not a valid value for this '
'snippet, expected any of {1}'.format(
choice, self.choices))
self.current_choice = choice
self._placeholder = choice
# --------------------- Variable snippet node classes -------------------------
| ChoiceNode |
python | walkccc__LeetCode | solutions/342. Power of Four/342.py | {
"start": 0,
"end": 415
} | class ____:
def isPowerOfFour(self, n: int) -> bool:
# Why (4^n - 1) % 3 == 0?
# (4^n - 1) = (2^n - 1)(2^n + 1) and 2^n - 1, 2^n, 2^n + 1 are
# three consecutive numbers; among one of them, there must be a multiple
# of 3, and that can't be 2^n, so it must be either 2^n - 1 or 2^n + 1.
# Therefore, 4^n - 1 is a multiple of 3.
return n > 0 and n.bit_count() == 1 and (n - 1) % 3 == 0
| Solution |
python | tiangolo__fastapi | fastapi/exceptions.py | {
"start": 4785,
"end": 5156
} | class ____(ValidationException):
def __init__(self, errors: Sequence[Any], *, body: Any = None) -> None:
super().__init__(errors)
self.body = body
def __str__(self) -> str:
message = f"{len(self._errors)} validation errors:\n"
for err in self._errors:
message += f" {err}\n"
return message
| ResponseValidationError |
python | python__mypy | mypyc/test-data/fixtures/ir.py | {
"start": 16304,
"end": 16356
} | class ____: pass
NotImplemented: Any = ...
| staticmethod |
python | pytorch__pytorch | torch/distributed/elastic/agent/server/api.py | {
"start": 16457,
"end": 38927
} | class ____(ElasticAgent):
"""An ``ElasticAgent`` that manages one particular type of worker role.
An ``ElasticAgent`` that manages workers (``WorkerGroup``) for a single ``WorkerSpec``
such as one particular type of worker role.
"""
def __init__(self, spec: WorkerSpec, exit_barrier_timeout: float = 300):
self._worker_group = WorkerGroup(spec)
self._remaining_restarts = self._worker_group.spec.max_restarts
self._store = None
self._exit_barrier_timeout = exit_barrier_timeout
self._total_execution_time = 0
def get_worker_group(self, role: str = DEFAULT_ROLE) -> WorkerGroup:
return self._worker_group
@abc.abstractmethod
def _start_workers(self, worker_group: WorkerGroup) -> dict[int, Any]:
r"""Start ``worker_group.spec.local_world_size`` number of workers.
This is according to worker spec for the worker group .
Returns a map of ``local_rank`` to worker ``id``.
"""
raise NotImplementedError
@abc.abstractmethod
def _stop_workers(self, worker_group: WorkerGroup) -> None:
r"""Stop all workers in the given worker group.
Implementers must deal with workers in all states defined by
``WorkerState``. That is, it must gracefully handle stopping
non-existent workers, unhealthy (stuck) workers, etc.
"""
raise NotImplementedError
@abc.abstractmethod
def _monitor_workers(self, worker_group: WorkerGroup) -> RunResult:
r"""Check on the workers for the ``worker_group``.
This function also returns the new state of the worker group.
"""
raise NotImplementedError
@abc.abstractmethod
def _shutdown(self, death_sig: signal.Signals = signal.SIGTERM) -> None:
"""Clean up any resources that were allocated during the agent's work.
Args:
death_sig: Signal to send to the child process, SIGTERM is default
"""
raise NotImplementedError
@prof
def _rendezvous(self, worker_group: WorkerGroup) -> None:
r"""Run rendezvous for the workers specified by the worker spec.
Assigns workers a new global rank and world size.
Updates the rendezvous store for the worker group.
"""
spec = worker_group.spec
with self.record_duration("RENDEZVOUS"):
rdzv_info = spec.rdzv_handler.next_rendezvous()
store = rdzv_info.store
group_rank = rdzv_info.rank
group_world_size = rdzv_info.world_size
# master_addr/master_port could be explicitly overridden
# TODO: BC - specific to static rdzv and can be simplified further
master_addr = spec.master_addr or rdzv_info.bootstrap_store_info.master_addr
master_port = spec.master_port or rdzv_info.bootstrap_store_info.master_port
self._store = store
with self.record_duration("ASSIGN_WORKER_RANKS"):
workers = self._assign_worker_ranks(
store, group_rank, group_world_size, spec
)
worker_group.workers = workers
worker_group.store = store
worker_group.group_rank = group_rank
worker_group.group_world_size = group_world_size
worker_group.master_addr = master_addr
worker_group.master_port = master_port
restart_count = spec.max_restarts - self._remaining_restarts
logger.info(
"[%(role)s] Rendezvous complete for workers. Result:\n"
" restart_count=%(restart_count)s\n"
" master_addr=%(master_addr)s\n"
" master_port=%(master_port)s\n"
" group_rank=%(group_rank)s\n"
" group_world_size=%(group_world_size)s\n"
" local_ranks=%(local_ranks)s\n"
" role_ranks=%(role_ranks)s\n"
" global_ranks=%(global_ranks)s\n"
" role_world_sizes=%(role_world_sizes)s\n"
" global_world_sizes=%(global_world_sizes)s\n"
" event_log_handler=%(event_log_handler)s\n",
{
"role": spec.role,
"restart_count": restart_count,
"master_addr": master_addr,
"master_port": master_port,
"group_rank": group_rank,
"group_world_size": group_world_size,
"local_ranks": [worker.local_rank for worker in workers],
"role_ranks": [worker.role_rank for worker in workers],
"global_ranks": [worker.global_rank for worker in workers],
"role_world_sizes": [worker.role_world_size for worker in workers],
"global_world_sizes": [worker.world_size for worker in workers],
"event_log_handler": spec.event_log_handler,
},
)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _assign_worker_ranks(
self, store, group_rank: int, group_world_size: int, spec: WorkerSpec
) -> list[Worker]:
"""Determine proper ranks for worker processes.
Fast Path: when all workers have the same role and world size. We calculate
the global rank to be group_rank * group_world_size + local_rank. And the
`role_world_size` is the same as `global_world_size`. No TCP store is used in
this case. This is only enabled when users set the environment variable
`TORCH_ELASTIC_WORKER_IDENTICAL` to 1.
Time complexity: each worker O(1), overall O(1)
Slow Path: when workers have different roles and world sizes. We use the
the following algorithm:
1. Each agent writes its configuration(group_rank, group_world_size
, num_workers) to the common store.
2. The rank 0 agent reads all the role_info from the store and
determines each agents worker ranks.
3. Determine the global rank: the global rank of the workers is computed
by cumulative sum of the local_world_size for all workers in front of it.
For efficiency reasons each worker is assigned a base global rank
such that it's workers are in the range [base_global_rank,
base_global_rank + local_world_size).
4. Determine the role rank: The role rank is determined using the algorithms
in the point 3 with the exception that the ranks are calculated with
respect to the role name.
5. The rank 0 agent writes the assigned ranks to the store.
6. Each agent reads the assigned ranks from the store.
Time complexity: each worker O(1), rank0 O(n), overall O(n)
"""
if os.environ.get("TORCH_ELASTIC_WORKER_IDENTICAL", "0") == "1":
global_world_size = group_world_size * spec.local_world_size
base_global_rank = group_rank * spec.local_world_size
base_role_rank = base_global_rank
role_world_size = global_world_size
else:
ROLE_INFO_PREFIX = "torchelastic/role_info/"
ASSIGNED_RANKS_PREFIX = "torchelastic/assigned_ranks/"
agent_role_info = _RoleInstanceInfo(
spec.role, group_rank, spec.local_world_size
)
store.set(f"{ROLE_INFO_PREFIX}{group_rank}", agent_role_info.serialize())
# tcp store is collocated with rank 0 so we can use it to do extra compute to reduce overall # of operations.
if group_rank == 0:
role_infos_bytes = store.multi_get(
[f"torchelastic/role_info/{i}" for i in range(group_world_size)]
)
role_infos = [
_RoleInstanceInfo.deserialize(info_bytes)
for info_bytes in role_infos_bytes
]
role_sizes = defaultdict(lambda: 0)
global_size = 0
for role_info in role_infos:
role_sizes[role_info.role] += role_info.local_world_size
global_size += role_info.local_world_size
base_global_rank = 0
role_ranks = defaultdict(lambda: 0)
keys = []
values = []
for i, role_info in enumerate(role_infos):
keys.append(f"{ASSIGNED_RANKS_PREFIX}{i}")
values.append(
json.dumps(
[
base_global_rank,
global_size,
role_ranks[role_info.role],
role_sizes[role_info.role],
]
)
)
base_global_rank += role_info.local_world_size
role_ranks[role_info.role] += role_info.local_world_size
store.multi_set(keys, values)
# get will block until the data is available in the store.
(
base_global_rank,
global_world_size,
base_role_rank,
role_world_size,
) = json.loads(store.get(f"{ASSIGNED_RANKS_PREFIX}{group_rank}"))
workers = []
for local_rank in range(spec.local_world_size):
worker = Worker(
local_rank=local_rank,
global_rank=base_global_rank + local_rank,
role_rank=base_role_rank + local_rank,
world_size=global_world_size,
role_world_size=role_world_size,
)
workers.append(worker)
return workers
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _initialize_workers(self, worker_group: WorkerGroup) -> None:
r"""Start a fresh set of workers for the worker_group.
Essentially, a rendezvous followed by a ``start_workers``.
The caller should first call ``_stop_workers()`` to stop running workers
prior to calling this method.
Optimistically sets the state of the worker group that
just started as ``HEALTHY`` and delegates the actual monitoring
of state to ``_monitor_workers()`` method
"""
role = worker_group.spec.role
logger.info("[%s] Rendezvous'ing worker group", role)
# TODO after stopping workers, wait at least monitor_interval*2 for
# workers on different nodes to fail on a collective op before waiting
# on the rdzv barrier, this way we ensure that nodes enter rdzv
# at around the same time and reduce false positive rdzv timeout errors
self._rendezvous(worker_group)
logger.info("[%s] Starting worker group", role)
worker_ids = self._start_workers(worker_group)
for local_rank, w_id in worker_ids.items():
worker = worker_group.workers[local_rank]
worker.id = w_id
record(
self._construct_event("START", EventSource.WORKER, worker),
worker_group.spec.event_log_handler,
)
worker_group.state = WorkerState.HEALTHY
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _restart_workers(self, worker_group: WorkerGroup) -> None:
"""Restart (stops, rendezvous, starts) all local workers in the group."""
role = worker_group.spec.role
logger.info("[%s] Stopping worker group", role)
self._stop_workers(worker_group)
worker_group.state = WorkerState.STOPPED
self._initialize_workers(worker_group)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def run(self, role: str = DEFAULT_ROLE) -> RunResult:
start_time = time.monotonic()
shutdown_called: bool = False
try:
result = self._invoke_run(role)
self._total_execution_time = int(time.monotonic() - start_time)
self._record_metrics(result)
self._record_worker_events(result)
return result
except RendezvousGracefulExitError as e:
logger.info("Rendezvous gracefully exited: %s", e) # noqa: G200
except SignalException as e:
logger.warning("Received %s death signal, shutting down workers", e.sigval)
self._shutdown(e.sigval)
shutdown_called = True
raise
finally:
if not shutdown_called:
self._shutdown()
# record the execution time in case there were any exceptions during run.
self._total_execution_time = int(time.monotonic() - start_time)
def get_event_failed(self) -> Event:
return self._construct_event(
state="FAILED",
source=EventSource.AGENT,
raw_error=traceback.format_exc(),
)
def get_event_succeeded(self) -> Event:
return self._construct_event(
state="SUCCEEDED",
source=EventSource.AGENT,
)
def _record_worker_events(self, result: RunResult) -> None:
for worker in self._worker_group.workers:
failure = result.failures.get(worker.global_rank)
state: str = self._get_worker_state(worker, result)
raw_error = json.dumps(failure.error_file_data) if failure else None
exit_code = failure.exitcode if failure else None
worker_pid = failure.pid if failure else None
record(
self._construct_event(
state=state,
source=EventSource.WORKER,
worker=worker,
raw_error=raw_error,
exit_code=exit_code,
worker_pid=worker_pid,
),
self._worker_group.spec.event_log_handler,
)
def _get_worker_state(self, worker: Worker, result: RunResult) -> str:
failure = result.failures.get(worker.global_rank)
if result.state in {WorkerState.UNHEALTHY, WorkerState.FAILED} and not failure:
# The worker got terminated by the torchelastic agent via SIGTERM signal
return "TERMINATED"
elif failure or worker.global_rank in result.return_values:
return result.state.value
else:
raise ValueError(f"Unknown worker: {worker.global_rank}")
@contextmanager
def record_duration(self, state: str):
start_time = time.perf_counter()
try:
yield
finally:
end_time = time.perf_counter()
duration_ms = (end_time - start_time) * 1000
record(
self._construct_event(
state=state, source=EventSource.AGENT, duration_ms=duration_ms
),
self._worker_group.spec.event_log_handler,
)
def _construct_event(
self,
state: str,
source: EventSource,
worker: Worker | None = None,
raw_error: str | None = None,
duration_ms: float | None = None,
exit_code: int | None = None,
worker_pid: int | None = None,
) -> Event:
wg = self._worker_group
spec = wg.spec
md = {
"group_world_size": wg.group_world_size,
"entry_point": spec.get_entrypoint_name(),
}
if worker:
md["local_rank"] = (worker.local_rank,)
md["role_rank"] = (worker.role_rank,)
md["role_world_size"] = (worker.role_world_size,)
md["exit_code"] = (exit_code,)
md["worker_pid"] = (worker_pid,)
global_rank = worker.global_rank
worker_id = str(worker.id)
else:
global_rank = None
worker_id = None
md_str = json.dumps(md)
metadata = {
"run_id": spec.rdzv_handler.get_run_id(),
"global_rank": global_rank,
"group_rank": wg.group_rank,
"worker_id": worker_id,
"role": spec.role,
"hostname": _get_fq_hostname(),
"state": state,
"total_run_time": self._total_execution_time,
"rdzv_backend": spec.rdzv_handler.get_backend(),
"raw_error": raw_error,
"metadata": md_str,
"agent_restarts": spec.max_restarts - self._remaining_restarts,
"duration_ms": duration_ms,
}
return Event(
f"torchelastic.worker.status.{state}", source=source, metadata=metadata
)
def _record_metrics(self, group_results: RunResult):
is_failed = group_results.is_failed()
self._record_flakiness_metric(is_failed)
spec = self._worker_group.spec
restarts_happened = self._remaining_restarts != spec.max_restarts
put_metric(f"workers.{spec.role}.run_total", 1)
self._record_metric_with_condition(
"run_success_with_retries", not is_failed and restarts_happened
)
self._record_metric_with_condition(
"run_success_no_retries", not is_failed and not restarts_happened
)
self._record_metric_with_condition(
"run_failed_with_retries", is_failed and restarts_happened
)
self._record_metric_with_condition(
"run_failed_no_retries", is_failed and not restarts_happened
)
def _record_metric_with_condition(self, metric_name, condition):
spec = self._worker_group.spec
if condition:
put_metric(f"workers.{spec.role}.{metric_name}", 1)
else:
put_metric(f"workers.{spec.role}.{metric_name}", 0)
def _record_flakiness_metric(self, is_failed: bool = False):
if is_failed:
flakiness = 100.0
else:
spec = self._worker_group.spec
flakiness = 100.0 - 100.0 * (self._remaining_restarts + 1) / (
spec.max_restarts + 1
)
spec = self._worker_group.spec
put_metric(f"workers.{spec.role}.flakiness", int(flakiness))
def _invoke_run(self, role: str = DEFAULT_ROLE) -> RunResult:
# NOTE: currently only works for a single role
spec = self._worker_group.spec
role = spec.role
logger.info(
"[%s] starting workers for entrypoint: %s", role, spec.get_entrypoint_name()
)
self._initialize_workers(self._worker_group)
monitor_interval = spec.monitor_interval
rdzv_handler = spec.rdzv_handler
while True:
assert self._worker_group.state != WorkerState.INIT
time.sleep(monitor_interval)
run_result = self._monitor_workers(self._worker_group)
state = run_result.state
self._worker_group.state = state
put_metric(f"workers.{role}.remaining_restarts", self._remaining_restarts)
put_metric(f"workers.{role}.{state.name.lower()}", 1)
if state == WorkerState.SUCCEEDED:
logger.info(
"[%s] worker group successfully finished."
" Waiting %s seconds for other agents to finish.",
role,
self._exit_barrier_timeout,
)
self._exit_barrier()
return run_result
elif state in {WorkerState.UNHEALTHY, WorkerState.FAILED}:
if self._remaining_restarts > 0:
logger.info(
"[%s] Worker group %s. "
"%s/%s attempts left;"
" will restart worker group",
role,
state.name,
self._remaining_restarts,
spec.max_restarts,
)
self._remaining_restarts -= 1
self._restart_workers(self._worker_group)
else:
self._stop_workers(self._worker_group)
self._worker_group.state = WorkerState.FAILED
return run_result
elif state == WorkerState.HEALTHY:
# membership changes do not count as retries
num_nodes_waiting = rdzv_handler.num_nodes_waiting()
group_rank = self._worker_group.group_rank
if num_nodes_waiting > 0:
logger.info(
"[%s] Detected %s "
"new nodes from group_rank=%s; "
"will restart worker group",
role,
num_nodes_waiting,
group_rank,
)
self._restart_workers(self._worker_group)
else:
raise Exception( # noqa: TRY002
f"[{role}] Worker group in {state.name} state"
)
def _exit_barrier(self):
"""
Define a barrier that keeps the agent process alive until all workers finish.
Wait for ``exit_barrier_timeout`` seconds for all agents to finish
executing their local workers (either successfully or not). This
acts as a safety guard against user scripts that terminate at different
times.
"""
logger.info(
"Local worker group finished (%s). "
"Waiting %s seconds for other agents to finish",
self._worker_group.state,
self._exit_barrier_timeout,
)
start = time.time()
try:
store_util.barrier(
store=self._store,
world_size=self._worker_group.group_world_size,
key_prefix=_TERMINAL_STATE_SYNC_ID,
barrier_timeout=self._exit_barrier_timeout,
)
logger.info(
"Done waiting for other agents. Elapsed: %s seconds",
time.time() - start,
)
except SignalException as e:
logger.warning("Got termination signal: %s", e.sigval)
raise
except Exception:
logger.exception(
"Error waiting on exit barrier. Elapsed: %s seconds",
time.time() - start,
)
| SimpleElasticAgent |
python | pytorch__pytorch | torch/distributed/tensor/_ops/_math_ops.py | {
"start": 1113,
"end": 1239
} | class ____:
norm_type: int | float | str
ReductionOpType = Union[NormReduction, str]
@dataclass(frozen=True)
| NormReduction |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol21.py | {
"start": 314,
"end": 606
} | class ____:
name: str
def do_something(a: A, class_a: type[A]) -> None:
val1 = a.name
reveal_type(val1, expected_text="str")
# This should generate an error because accesses to
# properties from a protocol class are not allowed.
val2 = class_a.name
val3: A = B()
| B |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1024033,
"end": 1024839
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of
UpdateEnterpriseMembersCanUpdateProtectedBranchesSetting
"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "enterprise", "message")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
enterprise = sgqlc.types.Field("Enterprise", graphql_name="enterprise")
"""The enterprise with the updated members can update protected
branches setting.
"""
message = sgqlc.types.Field(String, graphql_name="message")
"""A message confirming the result of updating the members can update
protected branches setting.
"""
| UpdateEnterpriseMembersCanUpdateProtectedBranchesSettingPayload |
python | matplotlib__matplotlib | lib/mpl_toolkits/mplot3d/art3d.py | {
"start": 28882,
"end": 39160
} | class ____(PathCollection):
"""
A collection of 3D paths.
"""
def __init__(
self,
*args,
zs=0,
zdir="z",
depthshade=None,
depthshade_minalpha=None,
axlim_clip=False,
**kwargs
):
"""
Create a collection of flat 3D paths with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of paths in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PathCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument *depthshade* is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
*depthshade_minalpha* sets the minimum alpha value applied by
depth-shading.
"""
if depthshade is None:
depthshade = rcParams['axes3d.depthshade']
if depthshade_minalpha is None:
depthshade_minalpha = rcParams['axes3d.depthshade_minalpha']
self._depthshade = depthshade
self._depthshade_minalpha = depthshade_minalpha
self._in_draw = False
super().__init__(*args, **kwargs)
self.set_3d_properties(zs, zdir, axlim_clip)
self._offset_zordered = None
def draw(self, renderer):
with self._use_zordered_offset():
with cbook._setattr_cm(self, _in_draw=True):
super().draw(renderer)
def set_sort_zpos(self, val):
"""Set the position to use for z-sorting."""
self._sort_zpos = val
self.stale = True
def set_3d_properties(self, zs, zdir, axlim_clip=False):
"""
Set the *z* positions and direction of the paths.
Parameters
----------
zs : float or array of floats
The location or locations to place the paths in the collection
along the *zdir* axis.
zdir : {'x', 'y', 'z'}
Plane to plot paths orthogonal to.
All paths must have the same direction.
See `.get_dir_vector` for a description of the values.
axlim_clip : bool, default: False
Whether to hide paths with a vertex outside the axes view limits.
.. versionadded:: 3.10
"""
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = offsets.T
else:
xs = []
ys = []
self._zdir = zdir
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
# In the base draw methods we access the attributes directly which
# means we cannot resolve the shuffling in the getter methods like
# we do for the edge and face colors.
#
# This means we need to carry around a cache of the unsorted sizes and
# widths (postfixed with 3d) and in `do_3d_projection` set the
# depth-sorted version of that data into the private state used by the
# base collection class in its draw method.
#
# Grab the current sizes and linewidths to preserve them.
self._sizes3d = self._sizes
self._linewidths3d = np.array(self._linewidths)
xs, ys, zs = self._offsets3d
# Sort the points based on z coordinates
# Performance optimization: Create a sorted index array and reorder
# points and point properties according to the index array
self._z_markers_idx = slice(-1)
self._vzs = None
self._axlim_clip = axlim_clip
self.stale = True
def set_sizes(self, sizes, dpi=72.0):
super().set_sizes(sizes, dpi)
if not self._in_draw:
self._sizes3d = sizes
def set_linewidth(self, lw):
super().set_linewidth(lw)
if not self._in_draw:
self._linewidths3d = np.array(self._linewidths)
def get_depthshade(self):
return self._depthshade
def set_depthshade(
self,
depthshade,
depthshade_minalpha=None,
):
"""
Set whether depth shading is performed on collection members.
Parameters
----------
depthshade : bool
Whether to shade the patches in order to give the appearance of
depth.
depthshade_minalpha : float
Sets the minimum alpha value used by depth-shading.
.. versionadded:: 3.11
"""
if depthshade_minalpha is None:
depthshade_minalpha = rcParams['axes3d.depthshade_minalpha']
self._depthshade = depthshade
self._depthshade_minalpha = depthshade_minalpha
self.stale = True
def do_3d_projection(self):
mask = False
for xyz in self._offsets3d:
if np.ma.isMA(xyz):
mask = mask | xyz.mask
if self._axlim_clip:
mask = mask | _viewlim_mask(*self._offsets3d, self.axes)
mask = np.broadcast_to(mask,
(len(self._offsets3d), *self._offsets3d[0].shape))
xyzs = np.ma.array(self._offsets3d, mask=mask)
else:
xyzs = self._offsets3d
vxs, vys, vzs, vis = proj3d._proj_transform_clip(*xyzs,
self.axes.M,
self.axes._focal_length)
self._data_scale = _get_data_scale(vxs, vys, vzs)
# Sort the points based on z coordinates
# Performance optimization: Create a sorted index array and reorder
# points and point properties according to the index array
z_markers_idx = self._z_markers_idx = np.ma.argsort(vzs)[::-1]
self._vzs = vzs
# we have to special case the sizes because of code in collections.py
# as the draw method does
# self.set_sizes(self._sizes, self.figure.dpi)
# so we cannot rely on doing the sorting on the way out via get_*
if len(self._sizes3d) > 1:
self._sizes = self._sizes3d[z_markers_idx]
if len(self._linewidths3d) > 1:
self._linewidths = self._linewidths3d[z_markers_idx]
PathCollection.set_offsets(self, np.ma.column_stack((vxs, vys)))
# Re-order items
vzs = vzs[z_markers_idx]
vxs = vxs[z_markers_idx]
vys = vys[z_markers_idx]
# Store ordered offset for drawing purpose
self._offset_zordered = np.ma.column_stack((vxs, vys))
return np.min(vzs) if vzs.size else np.nan
@contextmanager
def _use_zordered_offset(self):
if self._offset_zordered is None:
# Do nothing
yield
else:
# Swap offset with z-ordered offset
old_offset = self._offsets
super().set_offsets(self._offset_zordered)
try:
yield
finally:
self._offsets = old_offset
def _maybe_depth_shade_and_sort_colors(self, color_array):
# Adjust the color_array alpha values if point depths are defined
# and depth shading is active
if self._vzs is not None and self._depthshade:
color_array = _zalpha(
color_array,
self._vzs,
min_alpha=self._depthshade_minalpha,
_data_scale=self._data_scale,
)
# Adjust the order of the color_array using the _z_markers_idx,
# which has been sorted by z-depth
if len(color_array) > 1:
color_array = color_array[self._z_markers_idx]
return mcolors.to_rgba_array(color_array)
def get_facecolor(self):
return self._maybe_depth_shade_and_sort_colors(super().get_facecolor())
def get_edgecolor(self):
# We need this check here to make sure we do not double-apply the depth
# based alpha shading when the edge color is "face" which means the
# edge colour should be identical to the face colour.
if cbook._str_equal(self._edgecolors, 'face'):
return self.get_facecolor()
return self._maybe_depth_shade_and_sort_colors(super().get_edgecolor())
def patch_collection_2d_to_3d(
col,
zs=0,
zdir="z",
depthshade=None,
axlim_clip=False,
*args,
depthshade_minalpha=None,
):
"""
Convert a `.PatchCollection` into a `.Patch3DCollection` object
(or a `.PathCollection` into a `.Path3DCollection` object).
Parameters
----------
col : `~matplotlib.collections.PatchCollection` or \
`~matplotlib.collections.PathCollection`
The collection to convert.
zs : float or array of floats
The location or locations to place the patches in the collection along
the *zdir* axis. Default: 0.
zdir : {'x', 'y', 'z'}
The axis in which to place the patches. Default: "z".
See `.get_dir_vector` for a description of the values.
depthshade : bool, default: :rc:`axes3d.depthshade`
Whether to shade the patches to give a sense of depth.
axlim_clip : bool, default: False
Whether to hide patches with a vertex outside the axes view limits.
.. versionadded:: 3.10
depthshade_minalpha : float, default: :rc:`axes3d.depthshade_minalpha`
Sets the minimum alpha value used by depth-shading.
.. versionadded:: 3.11
"""
if isinstance(col, PathCollection):
col.__class__ = Path3DCollection
col._offset_zordered = None
elif isinstance(col, PatchCollection):
col.__class__ = Patch3DCollection
if depthshade is None:
depthshade = rcParams['axes3d.depthshade']
if depthshade_minalpha is None:
depthshade_minalpha = rcParams['axes3d.depthshade_minalpha']
col._depthshade = depthshade
col._depthshade_minalpha = depthshade_minalpha
col._in_draw = False
col.set_3d_properties(zs, zdir, axlim_clip)
| Path3DCollection |
python | pandas-dev__pandas | pandas/tests/io/formats/test_format.py | {
"start": 1995,
"end": 48803
} | class ____:
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.default_rng(2).standard_normal(10),
"B": [
"a"
* np.random.default_rng(2).integers(max_len - 1, max_len + 1)
for _ in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = printing.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_truncation_preserves_na(self):
# https://github.com/pandas-dev/pandas/issues/55630
df = DataFrame({"a": [pd.NA for _ in range(10)]})
with option_context("display.max_rows", 2, "display.show_dimensions", False):
assert repr(df) == " a\n0 <NA>\n.. ...\n9 <NA>"
def test_repr_truncation_dataframe_attrs(self):
# GH#60455
df = DataFrame([[0] * 10])
df.attrs["b"] = DataFrame([])
with option_context("display.max_columns", 2, "display.show_dimensions", False):
assert repr(df) == " 0 ... 9\n0 0 ... 0"
def test_repr_truncation_series_with_dataframe_attrs(self):
# GH#60568
ser = Series([0] * 10)
ser.attrs["b"] = DataFrame([])
with option_context("display.max_rows", 2, "display.show_dimensions", False):
assert repr(ser) == "0 0\n ..\n9 0\ndtype: int64"
def test_max_colwidth_negative_int_raises(self):
# Deprecation enforced from:
# https://github.com/pandas-dev/pandas/issues/31532
with pytest.raises(
ValueError, match="Value must be a nonnegative integer or None"
):
with option_context("display.max_colwidth", -1):
pass
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.default_rng(2).random((1, 7)))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
@pytest.mark.parametrize(
"data, format_option, expected_values",
[
(12345.6789, "{:12.3f}", "12345.679"),
(None, "{:.3f}", "None"),
("", "{:.2f}", ""),
(112345.6789, "{:6.3f}", "112345.679"),
("foo foo", None, "foo foo"),
(" foo", None, "foo"),
(
"foo foo foo",
None,
"foo foo foo",
), # odd no.of spaces
(
"foo foo foo",
None,
"foo foo foo",
), # even no.of spaces
],
)
def test_repr_float_formatting_html_output(
self, data, format_option, expected_values
):
if format_option is not None:
with option_context("display.float_format", format_option.format):
df = DataFrame({"A": [data]})
html_output = df._repr_html_()
assert expected_values in html_output
else:
df = DataFrame({"A": [data]})
html_output = df._repr_html_()
assert expected_values in html_output
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode2(self):
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{
"b": ["あ", "いいい", "う", "ええええええ"],
"あああああ": [1, 222, 33333, 4],
},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{
"あああ": ["あああ", "い", "う", "えええええ"],
"いいいいい": ["あ", "いいい", "う", "ええ"],
},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{
"b": ["あ", "いいい", "う", "ええええええ"],
"あああああ": [1, 222, 33333, 4],
},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{
"あああ": ["あああ", "い", "う", "えええええ"],
"いいいいい": ["あ", "いいい", "う", "ええ"],
},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{
"b": ["あ", "いいい", "¡¡", "ええええええ"],
"あああああ": [1, 222, 33333, 4],
},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
@pytest.mark.parametrize(
"index_scalar",
[
"a" * 10,
1,
Timestamp(2020, 1, 1),
pd.Period("2020-01-01"),
],
)
@pytest.mark.parametrize("h", [10, 20])
@pytest.mark.parametrize("w", [10, 20])
def test_to_string_truncate_indices(self, index_scalar, h, w):
with option_context("display.expand_frame_repr", False):
df = DataFrame(
index=[index_scalar] * h, columns=[str(i) * 10 for i in range(w)]
)
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not has_horizontally_truncated_repr(df)
with option_context("display.max_rows", 15, "display.max_columns", 15):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
@pytest.mark.parametrize("dtype", ["object", "datetime64[us]"])
def test_truncate_with_different_dtypes(self, dtype):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
ser = Series(
[datetime(2012, 1, 1)] * 10
+ [datetime(1012, 1, 2)]
+ [datetime(2012, 1, 3)] * 10,
dtype=dtype,
)
with option_context("display.max_rows", 8):
result = str(ser)
assert dtype in result
def test_truncate_with_different_dtypes2(self):
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9}, dtype=object)
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
with option_context("display.max_rows", 1):
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame([["a" * 25] * (max_cols - 1)] * 10)
with option_context("display.expand_frame_repr", False):
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
with option_context("display.expand_frame_repr", True):
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 3)),
columns=["a" * 90, "b" * 90, "c" * 90],
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame([["a" * 25] * (max_cols - 1)] * 10)
df.index.name = "DataFrame Index"
with option_context("display.expand_frame_repr", False):
rep_str = repr(df)
with option_context("display.expand_frame_repr", True):
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays([["a" * 5] * 10] * 2)
max_cols = get_option("display.max_columns")
df = DataFrame([["a" * 25] * (max_cols - 1)] * 10, index=midx)
df.index.names = ["Level 0", "Level 1"]
with option_context("display.expand_frame_repr", False):
rep_str = repr(df)
with option_context("display.expand_frame_repr", True):
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays([["a" * 5] * 10] * 2)
mcols = MultiIndex.from_arrays([["b" * 3] * (max_cols - 1)] * 2)
df = DataFrame(
[["c" * 25] * (max_cols - 1)] * 10, index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
with option_context("display.expand_frame_repr", False):
rep_str = repr(df)
with option_context("display.expand_frame_repr", True):
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame([["a" * 25] * 10] * (max_cols - 1))
with option_context("display.expand_frame_repr", False):
rep_str = repr(df)
with option_context("display.expand_frame_repr", True):
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.default_rng(2).integers(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_to_string_ascii_error(self):
data = [
(
"0 ",
" .gitignore ",
" 5 ",
" \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
)
]
df = DataFrame(data)
# it works!
repr(df)
def test_show_dimensions(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
True,
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
False,
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
with option_context(
"display.max_rows",
2,
"display.max_columns",
2,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context("display.large_repr", "info"):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.default_rng(2).standard_normal((10, 5)))
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
4,
):
assert has_non_verbose_info_repr(df)
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
5,
):
assert not has_non_verbose_info_repr(df)
# FIXME: don't leave commented-out
# test verbose overrides
# set_option('display.max_info_columns', 4) # exceeded
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
skip = True
for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert ("+010" in line) or skip
else:
assert ("+10" in line) or skip
skip = False
@pytest.mark.parametrize(
"data, expected",
[
(["3.50"], "0 3.50\ndtype: object"),
([1.20, "1.00"], "0 1.2\n1 1.00\ndtype: object"),
([np.nan], "0 NaN\ndtype: float64"),
([None], "0 None\ndtype: object"),
(["3.50", np.nan], "0 3.50\n1 NaN\ndtype: object"),
([3.50, np.nan], "0 3.5\n1 NaN\ndtype: float64"),
([3.50, np.nan, "3.50"], "0 3.5\n1 NaN\n2 3.50\ndtype: object"),
([3.50, None, "3.50"], "0 3.5\n1 None\n2 3.50\ndtype: object"),
],
)
def test_repr_str_float_truncation(self, data, expected, using_infer_string):
# GH#38708
series = Series(data, dtype=object if "3.50" in data else None)
result = repr(series)
assert result == expected
@pytest.mark.parametrize(
"float_format,expected",
[
("{:,.0f}".format, "0 1,000\n1 test\ndtype: object"),
("{:.4f}".format, "0 1000.0000\n1 test\ndtype: object"),
],
)
def test_repr_float_format_in_object_col(self, float_format, expected):
# GH#40024
df = Series([1000.0, "test"])
with option_context("display.float_format", float_format):
result = repr(df)
assert result == expected
def test_period(self):
# GH 12615
df = DataFrame(
{
"A": pd.period_range("2013-01", periods=4, freq="M"),
"B": [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="h"),
pd.Period("2011-04", freq="M"),
],
"C": list("abcd"),
}
)
exp = (
" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d"
)
assert str(df) == exp
@pytest.mark.parametrize(
"length, max_rows, min_rows, expected",
[
(10, 10, 10, 10),
(10, 10, None, 10),
(10, 8, None, 8),
(20, 30, 10, 30), # max_rows > len(frame), hence max_rows
(50, 30, 10, 10), # max_rows < len(frame), hence min_rows
(100, 60, 10, 10), # same
(60, 60, 10, 60), # edge case
(61, 60, 10, 10), # edge case
],
)
def test_max_rows_fitted(self, length, min_rows, max_rows, expected):
"""Check that display logic is correct.
GH #37359
See description here:
https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
"""
formatter = fmt.DataFrameFormatter(
DataFrame(np.random.default_rng(2).random((length, 3))),
max_rows=max_rows,
min_rows=min_rows,
)
result = formatter.max_rows_fitted
assert result == expected
def gen_series_formatting():
s1 = Series(["a"] * 100)
s2 = Series(["ab"] * 100)
s3 = Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
s4 = s3[::-1]
test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
return test_sers
| TestDataFrameFormatting |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 115823,
"end": 116244
} | class ____:
xlSourceAutoFilter = 3 # from enum XlSourceType
xlSourceChart = 5 # from enum XlSourceType
xlSourcePivotTable = 6 # from enum XlSourceType
xlSourcePrintArea = 2 # from enum XlSourceType
xlSourceQuery = 7 # from enum XlSourceType
xlSourceRange = 4 # from enum XlSourceType
xlSourceSheet = 1 # from enum XlSourceType
xlSourceWorkbook = 0 # from enum XlSourceType
| SourceType |
python | getsentry__sentry | src/sentry/integrations/msteams/card_builder/block.py | {
"start": 1053,
"end": 1179
} | class ____(str, Enum):
OPEN_URL = "Action.OpenUrl"
SUBMIT = "Action.Submit"
SHOW_CARD = "Action.ShowCard"
| ActionType |
python | getsentry__sentry | tests/sentry/integrations/msteams/test_link_identity.py | {
"start": 380,
"end": 5582
} | class ____(TestCase):
def setUp(self) -> None:
super(TestCase, self).setUp()
self.user1 = self.create_user(is_superuser=False)
self.user2 = self.create_user(is_superuser=False)
self.org = self.create_organization(owner=None)
self.create_member(user=self.user1, organization=self.org)
self.create_member(user=self.user2, organization=self.org)
self.team = self.create_team(organization=self.org, members=[self.user1, self.user2])
self.login_as(self.user1)
self.integration = self.create_provider_integration(
provider="msteams",
name="Hogwarts",
external_id="1_50l3mnly_5w34r",
metadata={
"service_url": "https://smba.trafficmanager.net/amer",
"access_token": "3ld3rw4nd",
"expires_at": int(time.time()) + 86400,
},
)
self.create_organization_integration(
organization_id=self.org.id, integration=self.integration
)
self.idp = self.create_identity_provider(type="msteams", external_id="1_50l3mnly_5w34r")
@responses.activate
@patch("sentry.integrations.messaging.linkage.unsign")
def test_basic_flow(self, unsign: MagicMock) -> None:
unsign.return_value = {
"integration_id": self.integration.id,
"organization_id": self.org.id,
"teams_user_id": "a_p_w_b_d",
"team_id": "1_50l3mnly_5w34r",
"tenant_id": "h0g5m34d3",
}
linking_url = build_linking_url(
self.integration,
self.org,
"a_p_w_b_d",
"1_50l3mnly_5w34r",
"h0g5m34d3",
)
resp = self.client.get(linking_url)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/auth-link-identity.html")
def user_conversation_id_callback(
request: PreparedRequest,
) -> tuple[int, dict[str, str], str]:
assert request.body is not None
payload = orjson.loads(request.body)
if payload["members"] == [{"id": "a_p_w_b_d"}] and payload["channelData"] == {
"tenant": {"id": "h0g5m34d3"}
}:
return 200, {}, orjson.dumps({"id": "dumbl3d0r3"}).decode()
raise Exception("Callback invariant violation")
responses.add_callback(
method=responses.POST,
url="https://smba.trafficmanager.net/amer/v3/conversations",
callback=user_conversation_id_callback,
)
responses.add(
method=responses.POST,
url="https://smba.trafficmanager.net/amer/v3/conversations/dumbl3d0r3/activities",
status=200,
json={},
)
resp = self.client.post(linking_url)
identity = Identity.objects.filter(external_id="a_p_w_b_d", user=self.user1)
assert len(identity) == 1
assert identity[0].idp == self.idp
assert identity[0].status == IdentityStatus.VALID
assert len(responses.calls) == 2
@responses.activate
@patch("sentry.integrations.messaging.linkage.unsign")
def test_overwrites_existing_identities(self, unsign: MagicMock) -> None:
Identity.objects.create(
user=self.user1, idp=self.idp, external_id="h_p", status=IdentityStatus.VALID
)
Identity.objects.create(
user=self.user2, idp=self.idp, external_id="g_w", status=IdentityStatus.VALID
)
unsign.return_value = {
"integration_id": self.integration.id,
"organization_id": self.org.id,
"teams_user_id": "g_w",
"team_id": "1_50l3mnly_5w34r",
"tenant_id": "th3_burr0w",
}
linking_url = build_linking_url(
self.integration,
self.org,
"g_w",
"1_50l3mnly_5w34r",
"th3_burr0w",
)
def user_conversation_id_callback(
request: PreparedRequest,
) -> tuple[int, dict[str, str], str]:
assert request.body is not None
payload = orjson.loads(request.body)
if payload["members"] == [{"id": "g_w"}] and payload["channelData"] == {
"tenant": {"id": "th3_burr0w"}
}:
return 200, {}, orjson.dumps({"id": "g1nny_w345l3y"}).decode()
return 404, {}, orjson.dumps({}).decode()
responses.add_callback(
method=responses.POST,
url="https://smba.trafficmanager.net/amer/v3/conversations",
callback=user_conversation_id_callback,
)
responses.add(
method=responses.POST,
url="https://smba.trafficmanager.net/amer/v3/conversations/g1nny_w345l3y/activities",
status=200,
json={},
)
self.client.post(linking_url)
Identity.objects.get(external_id="g_w", user=self.user1)
assert not Identity.objects.filter(external_id="h_p", user=self.user1).exists()
assert not Identity.objects.filter(external_id="g_w", user=self.user2).exists()
| MsTeamsIntegrationLinkIdentityTest |
python | django__django | django/contrib/gis/db/models/functions.py | {
"start": 9549,
"end": 9632
} | class ____(GeomOutputGeoFunc):
arity = 2
geom_param_pos = (0, 1)
| ClosestPoint |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/types.py | {
"start": 667,
"end": 743
} | class ____(sqltypes._Binary):
__visit_name__ = "RAW"
OracleRaw = RAW
| RAW |
python | doocs__leetcode | solution/3600-3699/3661.Maximum Walls Destroyed by Robots/Solution.py | {
"start": 0,
"end": 1007
} | class ____:
def maxWalls(self, robots: List[int], distance: List[int], walls: List[int]) -> int:
n = len(robots)
arr = sorted(zip(robots, distance), key=lambda x: x[0])
walls.sort()
@cache
def dfs(i: int, j: int) -> int:
if i < 0:
return 0
left = arr[i][0] - arr[i][1]
if i > 0:
left = max(left, arr[i - 1][0] + 1)
l = bisect_left(walls, left)
r = bisect_left(walls, arr[i][0] + 1)
ans = dfs(i - 1, 0) + r - l
right = arr[i][0] + arr[i][1]
if i + 1 < n:
if j == 0:
right = min(right, arr[i + 1][0] - arr[i + 1][1] - 1)
else:
right = min(right, arr[i + 1][0] - 1)
l = bisect_left(walls, arr[i][0])
r = bisect_left(walls, right + 1)
ans = max(ans, dfs(i - 1, 1) + r - l)
return ans
return dfs(n - 1, 1)
| Solution |
python | python-attrs__attrs | tests/test_validators.py | {
"start": 17468,
"end": 22320
} | class ____:
"""
Tests for `deep_mapping`.
"""
def test_in_all(self):
"""
Verify that this validator is in ``__all__``.
"""
assert deep_mapping.__name__ in validator_module.__all__
def test_success(self):
"""
If both the key and value validators succeed, nothing happens.
"""
key_validator = instance_of(str)
value_validator = instance_of(int)
v = deep_mapping(key_validator, value_validator)
a = simple_attr("test")
v(None, a, {"a": 6, "b": 7})
@pytest.mark.parametrize(
("key_validator", "value_validator", "mapping_validator"),
[
(42, instance_of(int), None),
(instance_of(str), 42, None),
(instance_of(str), instance_of(int), 42),
(42, 42, None),
(42, 42, 42),
(42, None, None),
(None, 42, None),
],
)
def test_noncallable_validators(
self, key_validator, value_validator, mapping_validator
):
"""
Raise `TypeError` if any validators are not callable.
"""
with pytest.raises(TypeError) as e:
deep_mapping(key_validator, value_validator, mapping_validator)
value = 42
message = (
f"must be callable (got {value} that is a {value.__class__})."
)
assert message in e.value.args[0]
assert value == e.value.args[1]
assert message in e.value.msg
assert value == e.value.value
def test_fail_invalid_mapping(self):
"""
Raise `TypeError` if mapping validator fails.
"""
key_validator = instance_of(str)
value_validator = instance_of(int)
mapping_validator = instance_of(dict)
v = deep_mapping(key_validator, value_validator, mapping_validator)
a = simple_attr("test")
with pytest.raises(TypeError):
v(None, a, None)
def test_fail_invalid_key(self):
"""
Raise key validator error if an invalid key is found.
"""
key_validator = instance_of(str)
value_validator = instance_of(int)
v = deep_mapping(key_validator, value_validator)
a = simple_attr("test")
with pytest.raises(TypeError):
v(None, a, {"a": 6, 42: 7})
def test_fail_invalid_member(self):
"""
Raise key validator error if an invalid member value is found.
"""
key_validator = instance_of(str)
value_validator = instance_of(int)
v = deep_mapping(key_validator, value_validator)
a = simple_attr("test")
with pytest.raises(TypeError):
v(None, a, {"a": "6", "b": 7})
def test_repr(self):
"""
Returned validator has a useful `__repr__`.
"""
key_validator = instance_of(str)
key_repr = "<instance_of validator for type <class 'str'>>"
value_validator = instance_of(int)
value_repr = "<instance_of validator for type <class 'int'>>"
v = deep_mapping(key_validator, value_validator)
expected_repr = (
"<deep_mapping validator for objects mapping "
f"{key_repr} to {value_repr}>"
)
assert expected_repr == repr(v)
def test_error_neither_validator_provided(self):
"""
Raise ValueError if neither key_validator nor value_validator is
provided.
"""
with pytest.raises(ValueError) as e:
deep_mapping()
assert (
"At least one of key_validator or value_validator must be provided"
== e.value.args[0]
)
def test_key_validator_can_be_none(self):
"""
The key validator can be None.
"""
v = deep_mapping(value_validator=instance_of(int))
a = simple_attr("test")
v(None, a, {"a": 6, "b": 7})
def test_value_validator_can_be_none(self):
"""
The value validator can be None.
"""
v = deep_mapping(key_validator=instance_of(str))
a = simple_attr("test")
v(None, a, {"a": 6, "b": 7})
@pytest.mark.parametrize("conv", [list, tuple])
def test_validators_iterables(self, conv):
"""
If iterables are passed as validators, they are combined with and_.
"""
key_validator = (instance_of(str), min_len(2))
value_validator = (instance_of(int), ge(10))
mapping_validator = (instance_of(dict), max_len(2))
v = deep_mapping(
conv(key_validator),
conv(value_validator),
conv(mapping_validator),
)
assert and_(*key_validator) == v.key_validator
assert and_(*value_validator) == v.value_validator
assert and_(*mapping_validator) == v.mapping_validator
| TestDeepMapping |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 31026,
"end": 31182
} | class ____(Structure):
_fields_ = (
("isym_flags", p_uint32),
# ('isym', p_uint8 * 3),
# ('flags', p_uint8),
)
| dylib_reference |
python | nedbat__coveragepy | tests/test_debug.py | {
"start": 862,
"end": 3874
} | class ____(CoverageTest):
"""Tests of debug.info_formatter."""
run_in_temp_dir = False
def test_info_formatter(self) -> None:
lines = list(
info_formatter(
[
("x", "hello there"),
("very long label", ["one element"]),
("fits on 1", (17, 23, 42, 76, 99)),
("regular", ["abc", "def", "ghi", "jkl"]),
("nothing", []),
]
)
)
expected = [
" x: hello there",
" very long label: one element",
" fits on 1: (17, 23, 42, 76, 99)",
" regular: abc",
" def",
" ghi",
" jkl",
" nothing: -none-",
]
assert expected == lines
def test_info_formatter_with_generator(self) -> None:
lines = list(info_formatter(("info%d" % i, i) for i in range(3)))
expected = [
" info0: 0",
" info1: 1",
" info2: 2",
]
assert expected == lines
def test_too_long_label(self) -> None:
with pytest.raises(AssertionError):
list(info_formatter([("this label is way too long and will not fit", 23)]))
@pytest.mark.parametrize(
"label, header",
[
("x", "-- x ---------------------------------------------------------"),
("hello there", "-- hello there -----------------------------------------------"),
],
)
def test_info_header(label: str, header: str) -> None:
assert header == info_header(label)
@pytest.mark.parametrize(
"id64, id16",
[
(0x1234, 0x1234),
(0x12340000, 0x1234),
(0xA5A55A5A, 0xFFFF),
(0x1234CBA956780FED, 0x8008),
],
)
def test_short_id(id64: int, id16: int) -> None:
assert id16 == short_id(id64)
@pytest.mark.parametrize(
"text, numchars, result",
[
("hello", 10, "'hello'"),
("0123456789abcdefghijklmnopqrstuvwxyz", 15, "'01234...vwxyz'"),
],
)
def test_clipped_repr(text: str, numchars: int, result: str) -> None:
assert result == clipped_repr(text, numchars)
@pytest.mark.parametrize(
"text, filters, result",
[
("hello", [], "hello"),
("hello\n", [], "hello\n"),
("hello\nhello\n", [], "hello\nhello\n"),
("hello\nbye\n", [lambda x: "=" + x], "=hello\n=bye\n"),
(
"hello\nbye\n",
[lambda x: "=" + x, lambda x: x + "\ndone\n"],
"=hello\ndone\n=bye\ndone\n",
),
],
)
def test_filter_text(
text: str,
filters: Iterable[Callable[[str], str]],
result: str,
) -> None:
assert result == filter_text(text, filters)
| InfoFormatterTest |
python | spack__spack | lib/spack/spack/config.py | {
"start": 9020,
"end": 13913
} | class ____(ConfigScope):
"""This class represents a configuration scope in a single YAML file."""
def __init__(
self,
name: str,
path: str,
schema: YamlConfigDict,
*,
yaml_path: Optional[List[str]] = None,
writable: bool = True,
prefer_modify: bool = True,
) -> None:
"""Similar to ``ConfigScope`` but can be embedded in another schema.
Arguments:
schema (dict): jsonschema for the file to read
yaml_path (list): path in the schema where config data can be
found.
If the schema accepts the following yaml data, the yaml_path
would be ['outer', 'inner']
.. code-block:: yaml
outer:
inner:
config:
install_tree: $spack/opt/spack
"""
super().__init__(name)
self._raw_data: Optional[YamlConfigDict] = None
self.schema = schema
self.path = path
self.writable = writable
self.prefer_modify = prefer_modify
self.yaml_path = yaml_path or []
def get_section_filename(self, section) -> str:
return self.path
def get_section(self, section: str) -> Optional[YamlConfigDict]:
# read raw data from the file, which looks like:
# {
# 'config': {
# ... data ...
# },
# 'packages': {
# ... data ...
# },
# }
#
# To preserve overrides up to the section level (e.g. to override
# the "packages" section with the "::" syntax), data in self.sections
# looks like this:
# {
# 'config': {
# 'config': {
# ... data ...
# }
# },
# 'packages': {
# 'packages': {
# ... data ...
# }
# }
# }
# This bit ensures we have read the file and have
# the raw data in memory
if self._raw_data is None:
self._raw_data = read_config_file(self.path, self.schema)
if self._raw_data is None:
return None
# Here we know we have the raw data and ensure we
# populate the sections dictionary, which may be
# cleared by the clear() method
if not self.sections:
section_data = self._raw_data
for key in self.yaml_path:
if section_data is None:
return None
section_data = section_data[key]
for section_key, data in section_data.items():
self.sections[section_key] = {section_key: data}
return self.sections.get(section, None)
def _write_section(self, section: str) -> None:
if not self.writable:
raise spack.error.ConfigError(f"Cannot write to immutable scope {self}")
data_to_write: Optional[YamlConfigDict] = self._raw_data
# If there is no existing data, this section SingleFileScope has never
# been written to disk. We need to construct the portion of the data
# from the root of self._raw_data to the level at which the config
# sections are defined. That requires creating keys for every entry in
# self.yaml_path
if not data_to_write:
data_to_write = {}
# reverse because we construct it from the inside out
for key in reversed(self.yaml_path):
data_to_write = {key: data_to_write}
# data_update_pointer is a pointer to the part of data_to_write
# that we are currently updating.
# We start by traversing into the data to the point at which the
# config sections are defined. This means popping the keys from
# self.yaml_path
data_update_pointer = data_to_write
for key in self.yaml_path:
data_update_pointer = data_update_pointer[key]
# For each section, update the data at the level of our pointer
# with the data from the section
for key, data in self.sections.items():
data_update_pointer[key] = data[key]
validate(data_to_write, self.schema)
try:
parent = os.path.dirname(self.path)
filesystem.mkdirp(parent)
tmp = os.path.join(parent, f".{os.path.basename(self.path)}.tmp")
with open(tmp, "w", encoding="utf-8") as f:
syaml.dump_config(data_to_write, stream=f, default_flow_style=False)
filesystem.rename(tmp, self.path)
except (syaml.SpackYAMLError, OSError) as e:
raise ConfigFileError(f"cannot write to config file {str(e)}") from e
def __repr__(self) -> str:
return f"<SingleFileScope: {self.name}: {self.path}>"
| SingleFileScope |
python | cython__cython | tests/run/richcmp_str_equals.py | {
"start": 71,
"end": 473
} | class ____(object):
def __init__(self):
pass
def __eq__(self, other):
return plop()
def test_equals(x):
"""
>>> x = testobj()
>>> result = test_equals(x)
>>> isinstance(result, plop)
True
>>> test_equals('hihi')
False
>>> test_equals('coucou')
True
"""
eq = x == 'coucou' # not every str equals returns a bool ...
return eq
| testobj |
python | gevent__gevent | src/greentest/3.10/test_smtpd.py | {
"start": 31216,
"end": 31731
} | class ____(SMTPDChannelTest):
def setUp(self):
smtpd.socket = asyncore.socket = mock_socket
self.old_debugstream = smtpd.DEBUGSTREAM
self.debug = smtpd.DEBUGSTREAM = io.StringIO()
self.server = DummyServer((socket_helper.HOSTv6, 0), ('b', 0),
decode_data=True)
conn, addr = self.server.accept()
self.channel = smtpd.SMTPChannel(self.server, conn, addr,
decode_data=True)
| SMTPDChannelIPv6Test |
python | agronholm__apscheduler | src/apscheduler/eventbrokers/redis.py | {
"start": 551,
"end": 4806
} | class ____(BaseExternalEventBroker):
"""
An event broker that uses a Redis server to broadcast events.
Requires the redis_ library to be installed.
.. _redis: https://pypi.org/project/redis/
:param client_or_url: an asynchronous Redis client or a Redis URL
(```redis://...```)
:param channel: channel on which to send the messages
:param stop_check_interval: interval (in seconds) on which the channel listener
should check if it should stop (higher values mean slower reaction time but less
CPU use)
.. note:: The event broker will not manage the life cycle of any client instance
passed to it, so you need to close the client afterwards when you're done with
it.
"""
client_or_url: Redis | str = attrs.field(validator=instance_of((Redis, str)))
channel: str = attrs.field(kw_only=True, default="apscheduler")
stop_check_interval: float = attrs.field(kw_only=True, default=1)
_client: Redis = attrs.field(init=False)
_close_on_exit: bool = attrs.field(init=False, default=False)
_stopped: bool = attrs.field(init=False, default=True)
def __attrs_post_init__(self) -> None:
if isinstance(self.client_or_url, str):
pool = ConnectionPool.from_url(self.client_or_url)
self._client = Redis(connection_pool=pool)
self._close_on_exit = True
else:
self._client = self.client_or_url
def __repr__(self) -> str:
return create_repr(self, "client_or_url")
def _retry(self) -> tenacity.AsyncRetrying:
def after_attempt(retry_state: tenacity.RetryCallState) -> None:
self._logger.warning(
"%s: connection failure (attempt %d): %s",
self.__class__.__name__,
retry_state.attempt_number,
retry_state.outcome.exception(),
)
return tenacity.AsyncRetrying(
stop=self.retry_settings.stop,
wait=self.retry_settings.wait,
retry=tenacity.retry_if_exception_type(ConnectionError),
after=after_attempt,
sleep=anyio.sleep,
reraise=True,
)
async def _close_client(self) -> None:
with move_on_after(5, shield=True):
await self._client.aclose(close_connection_pool=True)
async def start(self, exit_stack: AsyncExitStack, logger: Logger) -> None:
# Close the client and its connection pool if this broker was created using
# .from_url()
if self._close_on_exit:
exit_stack.push_async_callback(self._close_client)
pubsub = await exit_stack.enter_async_context(self._client.pubsub())
await pubsub.subscribe(self.channel)
await super().start(exit_stack, logger)
self._stopped = False
exit_stack.callback(setattr, self, "_stopped", True)
self._task_group.start_soon(
self._listen_messages, pubsub, name="Redis subscriber"
)
async def _listen_messages(self, pubsub: PubSub) -> None:
while not self._stopped:
try:
async for attempt in self._retry():
with attempt:
msg = await pubsub.get_message(
ignore_subscribe_messages=True,
timeout=self.stop_check_interval,
)
if msg and isinstance(msg["data"], bytes):
event = self.reconstitute_event(msg["data"])
if event is not None:
await self.publish_local(event)
except Exception as exc:
# CancelledError is a subclass of Exception in Python 3.7
if not isinstance(exc, CancelledError):
self._logger.exception(
"%s listener crashed", self.__class__.__name__
)
await pubsub.aclose()
raise
async def publish(self, event: Event) -> None:
notification = self.generate_notification(event)
async for attempt in self._retry():
with attempt:
await self._client.publish(self.channel, notification)
| RedisEventBroker |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/hitl.py | {
"start": 3146,
"end": 3312
} | class ____(BaseModel):
"""Schema for a collection of Human-in-the-loop details."""
hitl_details: Iterable[HITLDetail]
total_entries: int
| HITLDetailCollection |
python | PrefectHQ__prefect | src/prefect/utilities/schema_tools/hydration.py | {
"start": 1900,
"end": 2041
} | class ____(Placeholder):
pass
def _remove_value(value: Any) -> TypeIs[RemoveValue]:
return isinstance(value, RemoveValue)
| RemoveValue |
python | pytorch__pytorch | torch/utils/_config_module.py | {
"start": 758,
"end": 9956
} | class ____(Generic[T]):
"""Represents a config with richer behaviour than just a default value.
::
i.e.
foo = Config(justknob="//foo:bar", default=False)
install_config_module(...)
This configs must be installed with install_config_module to be used
Precedence Order:
alias: If set, the directly use the value of the alias.
env_name_force: If set, this environment variable has precedence over
everything after this.
If multiple env variables are given, the precedence order is from
left to right.
user_override: If a user sets a value (i.e. foo.bar=True), that
has precedence over everything after this.
env_name_default: If set, this environment variable will override everything
after this.
If multiple env variables are given, the precedence order is from
left to right.
justknob: If this pytorch installation supports justknobs, that will
override defaults, but will not override the user_override precedence.
default: This value is the lowest precedence, and will be used if nothing is
set.
Environment Variables:
These are interpreted to be either "0" or "1" to represent true and false.
Arguments:
justknob: the name of the feature / JK. In OSS this is unused.
default: is the value to default this knob to in OSS.
alias: The alias config to read instead.
env_name_force: The environment variable, or list of, to read that is a FORCE
environment variable. I.e. it overrides everything except for alias.
env_name_default: The environment variable, or list of, to read that changes the
default behaviour. I.e. user overrides take preference.
"""
default: T | object
justknob: str | None = None
env_name_default: list[str] | None = None
env_name_force: list[str] | None = None
value_type: type | None = None
alias: str | None = None
def __post_init__(self) -> None:
self.env_name_default = _Config.string_or_list_of_string_to_list(
self.env_name_default
)
self.env_name_force = _Config.string_or_list_of_string_to_list(
self.env_name_force
)
if self.alias is not None:
if (
self.default is not _UNSET_SENTINEL
or self.justknob is not None
or self.env_name_default is not None
or self.env_name_force is not None
):
raise AssertionError(
"if alias is set, none of {default, justknob, \
env_name_default and env_name_force} can be set"
)
@staticmethod
def string_or_list_of_string_to_list(
val: str | list[str] | None,
) -> list[str] | None:
if val is None:
return None
if isinstance(val, str):
return [val]
if not isinstance(val, list):
raise AssertionError(f"val is not a list, got {type(val)}")
return val
# In runtime, we unbox the Config[T] to a T, but typechecker cannot see this,
# so in order to allow for this dynamic behavior to work correctly with
# typechecking we are going to lie to the typechecker that Config[T] returns
# a T.
if TYPE_CHECKING:
def Config(
default: T | object = _UNSET_SENTINEL,
justknob: str | None = None,
env_name_default: str | list[str] | None = None,
env_name_force: str | list[str] | None = None,
value_type: type | None = None,
alias: str | None = None,
) -> T: ...
else:
def Config(
default: T | object = _UNSET_SENTINEL,
justknob: str | None = None,
env_name_default: str | list[str] | None = None,
env_name_force: str | list[str] | None = None,
value_type: type | None = None,
alias: str | None = None,
) -> _Config[T]:
return _Config(
default=default,
justknob=justknob,
env_name_default=env_name_default,
env_name_force=env_name_force,
value_type=value_type,
alias=alias,
)
def _read_env_variable(name: str) -> bool | str | None:
value = os.environ.get(name)
if value == "1":
return True
if value == "0":
return False
return value
def install_config_module(module: ModuleType) -> None:
"""
Converts a module-level config into a `ConfigModule()`.
See _config_typing.pyi for instructions on how to get the converted module to typecheck.
"""
class ConfigModuleInstance(ConfigModule):
# __annotations__ is written to by Sphinx autodoc
_bypass_keys = set({"_is_dirty", "_hash_digest", "__annotations__"})
def visit(
source: ModuleType | type,
dest: ModuleType | SubConfigProxy,
prefix: str,
) -> None:
"""Walk the module structure and move everything to module._config"""
type_hints = inspect.get_annotations(source)
for key, value in list(source.__dict__.items()):
if (
key.startswith("__")
or isinstance(value, (ModuleType, FunctionType))
or (
hasattr(value, "__module__")
and (
value.__module__ == "typing"
or value.__module__.startswith("collections.abc")
)
)
# Handle from torch.utils._config_module import Config
or (isinstance(value, type) and issubclass(value, _Config))
):
continue
name = f"{prefix}{key}"
annotated_type = type_hints.get(key, None)
if isinstance(value, CONFIG_TYPES):
config[name] = _ConfigEntry(
_Config(default=value, value_type=annotated_type)
)
if dest is module:
delattr(module, key)
elif isinstance(value, _Config):
if annotated_type is not None and value.value_type is None:
value.value_type = annotated_type
config[name] = _ConfigEntry(value)
if dest is module:
delattr(module, key)
elif isinstance(value, type):
if value.__module__ != module.__name__:
raise AssertionError(
f"subconfig class {value} must be defined in module {module.__name__}"
)
# a subconfig with `class Blah:` syntax
proxy = SubConfigProxy(module, f"{name}.")
visit(value, proxy, f"{name}.")
if dest is module:
setattr(dest, key, proxy)
else:
dest.__dict__[key] = proxy
else:
raise AssertionError(f"Unhandled config {key}={value} ({type(value)})")
config: dict[str, _ConfigEntry] = {}
compile_ignored_keys = get_assignments_with_compile_ignored_comments(module)
visit(module, module, "")
module._config = config # type: ignore[attr-defined]
module._compile_ignored_keys = compile_ignored_keys # type: ignore[attr-defined]
module.__class__ = ConfigModuleInstance
module._is_dirty = True # type: ignore[attr-defined]
module._hash_digest = None # type: ignore[attr-defined]
COMPILE_IGNORED_MARKER = "@compile_ignored"
# Gets all the keys (i.e. assignments) with a @compile_ignored comment
def get_assignments_with_compile_ignored_comments(module: ModuleType) -> set[str]:
source_code = inspect.getsource(module)
assignments = set()
# Tokenize the source code to retrieve comments
tokens = tokenize.tokenize(io.BytesIO(source_code.encode("utf-8")).readline)
current_comment = "", -1
prev_name = ""
for token in tokens:
if token.type == tokenize.COMMENT:
prev_name = ""
maybe_current = token.string.strip()
if COMPILE_IGNORED_MARKER in maybe_current:
if current_comment != ("", -1):
raise AssertionError(f"unconsumed {COMPILE_IGNORED_MARKER}")
current_comment = maybe_current, token.start[0]
elif token.type == tokenize.NAME:
# Only accept the first name token, to handle if you have
# something like foo: Bar = ...
if not prev_name:
prev_name = token.string
elif token.type == tokenize.OP and token.string == "=":
# Check if the current assignment follows a comment
# with COMPILE_IGNORED_MARKER
if (
COMPILE_IGNORED_MARKER in current_comment[0]
and current_comment[1] == token.start[0] - 1
):
assignments.add(prev_name)
current_comment = "", -1 # reset
prev_name = ""
if current_comment != ("", -1):
raise AssertionError(f"unconsumed {COMPILE_IGNORED_MARKER}")
return assignments
@dataclass
| _Config |
python | apache__airflow | airflow-core/tests/unit/dags/test_dag_warnings.py | {
"start": 943,
"end": 1687
} | class ____(BaseOperator):
def __init__(self, *, parameter: str | None = None, deprecated_parameter: str | None = None, **kwargs):
super().__init__(**kwargs)
if deprecated_parameter:
warnings.warn("Deprecated Parameter", category=DeprecationWarning, stacklevel=2)
parameter = deprecated_parameter
self.parameter = parameter
def execute(self, context):
return None
def some_warning():
warnings.warn("Some Warning", category=UserWarning, stacklevel=1)
with DAG(DAG_ID, start_date=datetime(2024, 1, 1), schedule=None):
TestOperator(task_id="test-task", parameter="foo")
TestOperator(task_id="test-task-deprecated", deprecated_parameter="bar")
some_warning()
| TestOperator |
python | jina-ai__jina | jina/serve/runtimes/gateway/composite/__init__.py | {
"start": 160,
"end": 341
} | class ____(CompositeServer, BaseGateway):
"""
:class:`CompositeGateway` is a CompositeServer that can be loaded from YAML as any other Gateway
"""
pass
| CompositeGateway |
python | pytorch__pytorch | torch/utils/_traceback.py | {
"start": 6972,
"end": 10274
} | class ____:
__slots__ = ["tb", "skip"]
def __init__(self, tb, skip=0) -> None:
self.tb = tb
self.skip = skip
def cleanup(self) -> None:
self.tb = None
def summary(self):
import torch._C._profiler
if self.tb is None:
# TODO: Maybe indicate that the traceback was elided?
return traceback.StackSummary()
return _extract_symbolized_tb(
torch._C._profiler.symbolize_tracebacks([self.tb])[0], self.skip
)
def __getstate__(self):
return (
None,
{
"tb": None, # TB is not pickleable
"skip": self.skip,
},
)
@staticmethod
def extract(*, script=False, cpp=False, skip=0):
"""
Like traceback.extract_stack(), but faster (approximately 20x faster); it
is fast enough that you can unconditionally log stacks this way as part of
normal execution. It returns a torch._C._profiler.CapturedTraceback
object that must be formatted specially with format_captured_tb.
By default, this only reports Python backtraces (like extract_stack). You
can set the script/cpp kwargs to also turn on TorchScript/C++ trace
reporting.
"""
import torch._C._profiler
if script or cpp:
if skip != 0:
raise AssertionError("skip with script/cpp NYI")
return CapturedTraceback(
torch._C._profiler.gather_traceback(python=True, script=script, cpp=cpp),
# Elide extract() frame if we don't have script/cpp frames. If
# we do have those frames, it doesn't work so force zero.
0 if script or cpp else skip + 1,
)
def format(self):
"""
Formats a single torch._C._profiler.CapturedTraceback into a list of
strings equivalent to the output of traceback.format_list. Note that if
pass it CapturedTraceback with C++ traces, it is better not to use this
function and use the batch formatting API format_captured_tbs to amortize
the cost of symbolization
"""
return traceback.format_list(self.summary())
@staticmethod
def format_all(tbs):
"""
Bulk version of CapturedTraceback.format. Returns a list of list of strings.
"""
import torch._C._profiler
# Directly populate tracebacks that already have cached summaries
rs: list[list[str] | None] = []
delayed_idxs = []
for i, tb in enumerate(tbs):
if tb.tb is None:
rs.append([])
else:
rs.append(None)
delayed_idxs.append(i)
torch._C._profiler.symbolize_tracebacks([tbs[i].tb for i in delayed_idxs])
for i in delayed_idxs:
rs[i] = traceback.format_list(tbs[i].summary())
return rs
def _extract_symbolized_tb(tb, skip):
"""
Given a symbolized traceback from symbolize_tracebacks, return a StackSummary object of
pre-processed stack trace entries.
"""
stack = traceback.StackSummary()
for f in reversed(tb[skip:]):
stack.append(traceback.FrameSummary(f["filename"], f["line"], f["name"]))
return stack
| CapturedTraceback |
python | pypa__warehouse | tests/unit/manage/views/test_teams.py | {
"start": 6644,
"end": 19777
} | class ____:
@pytest.mark.usefixtures("_enable_organizations")
def test_manage_team_roles(
self,
db_request,
organization_service,
user_service,
):
team = TeamFactory.create()
db_request.POST = MultiDict()
view = team_views.ManageTeamRolesViews(team, db_request)
result = view.manage_team_roles()
form = result["form"]
assert result == {
"team": team,
"roles": [],
"form": form,
}
@pytest.mark.usefixtures("_enable_organizations")
def test_create_team_role(
self,
db_request,
organization_service,
user_service,
monkeypatch,
):
organization = OrganizationFactory.create()
team = TeamFactory(organization=organization)
owner = UserFactory.create(username="owner")
manager = UserFactory.create(username="manager")
member = UserFactory.create(username="user")
OrganizationRoleFactory.create(
organization=organization,
user=owner,
role_name=OrganizationRoleType.Owner,
)
OrganizationRoleFactory.create(
organization=organization,
user=manager,
role_name=OrganizationRoleType.Manager,
)
OrganizationRoleFactory.create(
organization=organization,
user=member,
role_name=OrganizationRoleType.Member,
)
db_request.method = "POST"
db_request.POST = MultiDict({"username": member.username})
db_request.user = owner
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
send_team_member_added_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(
team_views,
"send_team_member_added_email",
send_team_member_added_email,
)
send_added_as_team_member_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(
team_views,
"send_added_as_team_member_email",
send_added_as_team_member_email,
)
view = team_views.ManageTeamRolesViews(team, db_request)
result = view.create_team_role()
roles = organization_service.get_team_roles(team.id)
assert len(roles) == 1
assert roles[0].team_id == team.id
assert roles[0].user_id == member.id
assert send_team_member_added_email.calls == [
pretend.call(
db_request,
{owner, manager},
user=member,
submitter=db_request.user,
organization_name=team.organization.name,
team_name=team.name,
)
]
assert send_added_as_team_member_email.calls == [
pretend.call(
db_request,
member,
submitter=db_request.user,
organization_name=team.organization.name,
team_name=team.name,
)
]
assert db_request.session.flash.calls == [
pretend.call(
f"Added the team {team.name!r} to {team.organization.name!r}",
queue="success",
)
]
assert isinstance(result, HTTPSeeOther)
@pytest.mark.usefixtures("_enable_organizations")
def test_create_team_role_duplicate_member(
self,
db_request,
organization_service,
user_service,
):
organization = OrganizationFactory.create()
team = TeamFactory(organization=organization)
owner = UserFactory.create(username="owner")
manager = UserFactory.create(username="manager")
member = UserFactory.create(username="user")
OrganizationRoleFactory.create(
organization=organization,
user=owner,
role_name=OrganizationRoleType.Owner,
)
OrganizationRoleFactory.create(
organization=organization,
user=manager,
role_name=OrganizationRoleType.Manager,
)
OrganizationRoleFactory.create(
organization=organization,
user=member,
role_name=OrganizationRoleType.Member,
)
role = TeamRoleFactory.create(
team=team,
user=member,
role_name=TeamRoleType.Member,
)
db_request.method = "POST"
db_request.POST = MultiDict({"username": member.username})
db_request.user = owner
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
view = team_views.ManageTeamRolesViews(team, db_request)
result = view.create_team_role()
form = result["form"]
assert organization_service.get_team_roles(team.id) == [role]
assert db_request.session.flash.calls == []
assert result == {
"team": team,
"roles": [role],
"form": form,
}
@pytest.mark.usefixtures("_enable_organizations")
def test_create_team_role_not_a_member(
self,
db_request,
organization_service,
user_service,
):
organization = OrganizationFactory.create()
team = TeamFactory(organization=organization)
owner = UserFactory.create(username="owner")
manager = UserFactory.create(username="manager")
not_a_member = UserFactory.create(username="user")
OrganizationRoleFactory.create(
organization=organization,
user=owner,
role_name=OrganizationRoleType.Owner,
)
OrganizationRoleFactory.create(
organization=organization,
user=manager,
role_name=OrganizationRoleType.Manager,
)
db_request.method = "POST"
db_request.POST = MultiDict({"username": not_a_member.username})
db_request.user = owner
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
view = team_views.ManageTeamRolesViews(team, db_request)
result = view.create_team_role()
form = result["form"]
assert result == {
"team": team,
"roles": [],
"form": form,
}
assert form.username.errors == ["Not a valid choice."]
@pytest.mark.usefixtures("_enable_organizations")
def test_delete_team_role(
self,
db_request,
organization_service,
user_service,
monkeypatch,
):
organization = OrganizationFactory.create()
team = TeamFactory(organization=organization)
owner = UserFactory.create(username="owner")
manager = UserFactory.create(username="manager")
member = UserFactory.create(username="user")
OrganizationRoleFactory.create(
organization=organization,
user=owner,
role_name=OrganizationRoleType.Owner,
)
OrganizationRoleFactory.create(
organization=organization,
user=manager,
role_name=OrganizationRoleType.Manager,
)
OrganizationRoleFactory.create(
organization=organization,
user=member,
role_name=OrganizationRoleType.Member,
)
role = TeamRoleFactory.create(
team=team,
user=member,
role_name=TeamRoleType.Member,
)
db_request.method = "POST"
db_request.POST = MultiDict({"role_id": role.id})
db_request.user = owner
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foo/bar/")
send_team_member_removed_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(
team_views,
"send_team_member_removed_email",
send_team_member_removed_email,
)
send_removed_as_team_member_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(
team_views,
"send_removed_as_team_member_email",
send_removed_as_team_member_email,
)
view = team_views.ManageTeamRolesViews(team, db_request)
result = view.delete_team_role()
assert organization_service.get_team_roles(team.id) == []
assert send_team_member_removed_email.calls == [
pretend.call(
db_request,
{owner, manager},
user=member,
submitter=db_request.user,
organization_name=team.organization.name,
team_name=team.name,
)
]
assert send_removed_as_team_member_email.calls == [
pretend.call(
db_request,
member,
submitter=db_request.user,
organization_name=team.organization.name,
team_name=team.name,
)
]
assert db_request.session.flash.calls == [
pretend.call("Removed from team", queue="success")
]
assert isinstance(result, HTTPSeeOther)
@pytest.mark.usefixtures("_enable_organizations")
def test_delete_team_role_not_a_member(
self,
db_request,
organization_service,
user_service,
):
organization = OrganizationFactory.create()
team = TeamFactory(organization=organization)
other_team = TeamFactory(organization=organization)
owner = UserFactory.create(username="owner")
manager = UserFactory.create(username="manager")
not_a_member = UserFactory.create(username="user")
OrganizationRoleFactory.create(
organization=organization,
user=owner,
role_name=OrganizationRoleType.Owner,
)
OrganizationRoleFactory.create(
organization=organization,
user=manager,
role_name=OrganizationRoleType.Manager,
)
OrganizationRoleFactory.create(
organization=organization,
user=not_a_member,
role_name=OrganizationRoleType.Member,
)
other_team_role = TeamRoleFactory.create(
team=other_team,
user=not_a_member,
role_name=TeamRoleType.Member,
)
db_request.method = "POST"
db_request.POST = MultiDict({"role_id": other_team_role.id})
db_request.user = owner
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foo/bar/")
view = team_views.ManageTeamRolesViews(team, db_request)
result = view.delete_team_role()
assert organization_service.get_team_roles(team.id) == []
assert db_request.session.flash.calls == [
pretend.call("Could not find member", queue="error")
]
assert isinstance(result, HTTPSeeOther)
@pytest.mark.usefixtures("_enable_organizations")
def test_delete_team_role_not_a_manager(
self,
db_request,
organization_service,
):
organization = OrganizationFactory.create()
team = TeamFactory(organization=organization)
owner = UserFactory.create(username="owner")
not_a_manager = UserFactory.create(username="manager")
member = UserFactory.create(username="user")
OrganizationRoleFactory.create(
organization=organization,
user=owner,
role_name=OrganizationRoleType.Owner,
)
OrganizationRoleFactory.create(
organization=organization,
user=not_a_manager,
role_name=OrganizationRoleType.Member,
)
OrganizationRoleFactory.create(
organization=organization,
user=member,
role_name=OrganizationRoleType.Member,
)
role = TeamRoleFactory.create(
team=team,
user=member,
role_name=TeamRoleType.Member,
)
db_request.method = "POST"
db_request.POST = MultiDict({"role_id": role.id})
db_request.user = not_a_manager
db_request.has_permission = lambda *a, **kw: False
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foo/bar/")
view = team_views.ManageTeamRolesViews(team, db_request)
result = view.delete_team_role()
assert organization_service.get_team_roles(team.id) == [role]
assert db_request.session.flash.calls == [
pretend.call("Cannot remove other people from the team", queue="error")
]
assert isinstance(result, HTTPSeeOther)
| TestManageTeamRoles |
python | FactoryBoy__factory_boy | tests/test_fuzzy.py | {
"start": 8081,
"end": 10236
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Setup useful constants
cls.jan1 = datetime.date(2013, 1, 1)
cls.jan3 = datetime.date(2013, 1, 3)
cls.jan31 = datetime.date(2013, 1, 31)
def test_accurate_definition(self):
"""Tests all ways of defining a FuzzyDate."""
fuzz = fuzzy.FuzzyDate(self.jan1, self.jan31)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertLessEqual(self.jan1, res)
self.assertLessEqual(res, self.jan31)
def test_partial_definition(self):
"""Test defining a FuzzyDate without passing an end date."""
with utils.mocked_date_today(self.jan3, fuzzy):
fuzz = fuzzy.FuzzyDate(self.jan1)
for _i in range(20):
res = utils.evaluate_declaration(fuzz)
self.assertLessEqual(self.jan1, res)
self.assertLessEqual(res, self.jan3)
def test_invalid_definition(self):
with self.assertRaises(ValueError):
fuzzy.FuzzyDate(self.jan31, self.jan1)
def test_invalid_partial_definition(self):
with utils.mocked_date_today(self.jan1, fuzzy):
with self.assertRaises(ValueError):
fuzzy.FuzzyDate(self.jan31)
def test_biased(self):
"""Tests a FuzzyDate with a biased random.randint."""
fake_randint = lambda low, high: (low + high) // 2
fuzz = fuzzy.FuzzyDate(self.jan1, self.jan31)
with mock.patch('factory.random.randgen.randint', fake_randint):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(datetime.date(2013, 1, 16), res)
def test_biased_partial(self):
"""Tests a FuzzyDate with a biased random and implicit upper bound."""
with utils.mocked_date_today(self.jan3, fuzzy):
fuzz = fuzzy.FuzzyDate(self.jan1)
fake_randint = lambda low, high: (low + high) // 2
with mock.patch('factory.random.randgen.randint', fake_randint):
res = utils.evaluate_declaration(fuzz)
self.assertEqual(datetime.date(2013, 1, 2), res)
| FuzzyDateTestCase |
python | tornadoweb__tornado | tornado/platform/asyncio.py | {
"start": 10953,
"end": 16895
} | class ____(BaseAsyncIOLoop):
"""``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop.
This class follows the usual Tornado semantics for creating new
``IOLoops``; these loops are not necessarily related to the
``asyncio`` default event loop.
Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object
can be accessed with the ``asyncio_loop`` attribute.
.. versionchanged:: 6.2
Support explicit ``asyncio_loop`` argument
for specifying the asyncio loop to attach to,
rather than always creating a new one with the default policy.
.. versionchanged:: 5.0
When an ``AsyncIOLoop`` becomes the current `.IOLoop`, it also sets
the current `asyncio` event loop.
.. deprecated:: 5.0
Now used automatically when appropriate; it is no longer necessary
to refer to this class directly.
"""
def initialize(self, **kwargs: Any) -> None: # type: ignore
self.is_current = False
loop = None
if "asyncio_loop" not in kwargs:
kwargs["asyncio_loop"] = loop = asyncio.new_event_loop()
try:
super().initialize(**kwargs)
except Exception:
# If initialize() does not succeed (taking ownership of the loop),
# we have to close it.
if loop is not None:
loop.close()
raise
def close(self, all_fds: bool = False) -> None:
if self.is_current:
self._clear_current()
super().close(all_fds=all_fds)
def _make_current(self) -> None:
if not self.is_current:
try:
self.old_asyncio = asyncio.get_event_loop()
except (RuntimeError, AssertionError):
self.old_asyncio = None # type: ignore
self.is_current = True
asyncio.set_event_loop(self.asyncio_loop)
def _clear_current_hook(self) -> None:
if self.is_current:
asyncio.set_event_loop(self.old_asyncio)
self.is_current = False
def to_tornado_future(asyncio_future: asyncio.Future) -> asyncio.Future:
"""Convert an `asyncio.Future` to a `tornado.concurrent.Future`.
.. versionadded:: 4.1
.. deprecated:: 5.0
Tornado ``Futures`` have been merged with `asyncio.Future`,
so this method is now a no-op.
"""
return asyncio_future
def to_asyncio_future(tornado_future: asyncio.Future) -> asyncio.Future:
"""Convert a Tornado yieldable object to an `asyncio.Future`.
.. versionadded:: 4.1
.. versionchanged:: 4.3
Now accepts any yieldable object, not just
`tornado.concurrent.Future`.
.. deprecated:: 5.0
Tornado ``Futures`` have been merged with `asyncio.Future`,
so this method is now equivalent to `tornado.gen.convert_yielded`.
"""
return convert_yielded(tornado_future)
_AnyThreadEventLoopPolicy = None
def __getattr__(name: str) -> typing.Any:
# The event loop policy system is deprecated in Python 3.14; simply accessing
# the name asyncio.DefaultEventLoopPolicy will raise a warning. Lazily create
# the AnyThreadEventLoopPolicy class so that the warning is only raised if
# the policy is used.
if name != "AnyThreadEventLoopPolicy":
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
global _AnyThreadEventLoopPolicy
if _AnyThreadEventLoopPolicy is None:
if sys.platform == "win32" and hasattr(
asyncio, "WindowsSelectorEventLoopPolicy"
):
# "Any thread" and "selector" should be orthogonal, but there's not a clean
# interface for composing policies so pick the right base.
_BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore
else:
_BasePolicy = asyncio.DefaultEventLoopPolicy
class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore
"""Event loop policy that allows loop creation on any thread.
The default `asyncio` event loop policy only automatically creates
event loops in the main threads. Other threads must create event
loops explicitly or `asyncio.get_event_loop` (and therefore
`.IOLoop.current`) will fail. Installing this policy allows event
loops to be created automatically on any thread, matching the
behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).
Usage::
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
.. versionadded:: 5.0
.. deprecated:: 6.2
``AnyThreadEventLoopPolicy`` affects the implicit creation
of an event loop, which is deprecated in Python 3.10 and
will be removed in a future version of Python. At that time
``AnyThreadEventLoopPolicy`` will no longer be useful.
If you are relying on it, use `asyncio.new_event_loop`
or `asyncio.run` explicitly in any non-main threads that
need event loops.
"""
def __init__(self) -> None:
super().__init__()
warnings.warn(
"AnyThreadEventLoopPolicy is deprecated, use asyncio.run "
"or asyncio.new_event_loop instead",
DeprecationWarning,
stacklevel=2,
)
def get_event_loop(self) -> asyncio.AbstractEventLoop:
try:
return super().get_event_loop()
except RuntimeError:
# "There is no current event loop in thread %r"
loop = self.new_event_loop()
self.set_event_loop(loop)
return loop
_AnyThreadEventLoopPolicy = AnyThreadEventLoopPolicy
return _AnyThreadEventLoopPolicy
| AsyncIOLoop |
python | doocs__leetcode | solution/0700-0799/0731.My Calendar II/Solution.py | {
"start": 0,
"end": 601
} | class ____:
def __init__(self):
self.sd = SortedDict()
def book(self, startTime: int, endTime: int) -> bool:
self.sd[startTime] = self.sd.get(startTime, 0) + 1
self.sd[endTime] = self.sd.get(endTime, 0) - 1
s = 0
for v in self.sd.values():
s += v
if s > 2:
self.sd[startTime] -= 1
self.sd[endTime] += 1
return False
return True
# Your MyCalendarTwo object will be instantiated and called as such:
# obj = MyCalendarTwo()
# param_1 = obj.book(startTime,endTime)
| MyCalendarTwo |
python | ray-project__ray | python/ray/train/tests/test_predictor.py | {
"start": 517,
"end": 1068
} | class ____(Preprocessor):
def __init__(self, multiplier=2):
self.multiplier = multiplier
self.inputs = []
self.outputs = []
self.id = uuid.uuid4()
def fit_status(self) -> Preprocessor.FitStatus:
"""Override fit status to test full transform_batch path."""
return Preprocessor.FitStatus.FITTED
def _transform_pandas(self, df: pd.DataFrame) -> pd.DataFrame:
self.inputs.append(df)
rst = df * self.multiplier
self.outputs.append(rst)
return rst
| DummyPreprocessor |
python | Farama-Foundation__Gymnasium | gymnasium/envs/mujoco/swimmer_v4.py | {
"start": 159,
"end": 3626
} | class ____(MujocoEnv, utils.EzPickle):
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
"rgbd_tuple",
],
"render_fps": 25,
}
def __init__(
self,
forward_reward_weight=1.0,
ctrl_cost_weight=1e-4,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
**kwargs,
):
utils.EzPickle.__init__(
self,
forward_reward_weight,
ctrl_cost_weight,
reset_noise_scale,
exclude_current_positions_from_observation,
**kwargs,
)
self._forward_reward_weight = forward_reward_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation
)
if exclude_current_positions_from_observation:
observation_space = Box(
low=-np.inf, high=np.inf, shape=(8,), dtype=np.float64
)
else:
observation_space = Box(
low=-np.inf, high=np.inf, shape=(10,), dtype=np.float64
)
MujocoEnv.__init__(
self, "swimmer.xml", 4, observation_space=observation_space, **kwargs
)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
xy_position_before = self.data.qpos[0:2].copy()
self.do_simulation(action, self.frame_skip)
xy_position_after = self.data.qpos[0:2].copy()
xy_velocity = (xy_position_after - xy_position_before) / self.dt
x_velocity, y_velocity = xy_velocity
forward_reward = self._forward_reward_weight * x_velocity
ctrl_cost = self.control_cost(action)
observation = self._get_obs()
reward = forward_reward - ctrl_cost
info = {
"reward_fwd": forward_reward,
"reward_ctrl": -ctrl_cost,
"x_position": xy_position_after[0],
"y_position": xy_position_after[1],
"distance_from_origin": np.linalg.norm(xy_position_after, ord=2),
"x_velocity": x_velocity,
"y_velocity": y_velocity,
"forward_reward": forward_reward,
}
if self.render_mode == "human":
self.render()
# truncation=False as the time limit is handled by the `TimeLimit` wrapper added during `make`
return observation, reward, False, False, info
def _get_obs(self):
position = self.data.qpos.flat.copy()
velocity = self.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[2:]
observation = np.concatenate([position, velocity]).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq
)
qvel = self.init_qvel + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nv
)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
| SwimmerEnv |
python | huggingface__transformers | tests/models/whisper/test_modeling_whisper.py | {
"start": 238989,
"end": 242559
} | class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (WhisperForAudioClassification,) if is_torch_available() else ()
is_encoder_decoder = False
test_missing_keys = False
def setUp(self):
self.model_tester = WhisperEncoderModelTester(self)
self.config_tester = ConfigTester(self, config_class=WhisperConfig)
self.maxDiff = 3000
def test_config(self):
self.config_tester.run_common_tests()
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["input_features", "encoder_outputs"]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
def test_forward_pass(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_forward(*config_and_inputs)
def test_forward_pass_weighted_layer_sum(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_forward(*config_and_inputs, use_weighted_layer_sum=True)
@unittest.skip(reason="Not applicable for an encoder-only acoustic model")
def test_inputs_embeds(self):
# input embeds is meaningless for an encoder-only acoustic model
pass
# the equivalent test is passing the encoder outputs directly to the model
def test_encoder_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
with torch.no_grad():
outputs = model(**inputs)[0]
encoder = model.encoder
encoder_inputs = {"input_features": inputs["input_features"]}
del inputs["input_features"]
if "attention_mask" in inputs:
encoder_inputs["attention_mask"] = inputs["attention_mask"]
if "output_attentions" in inputs:
encoder_inputs["output_attentions"] = inputs["output_attentions"]
with torch.no_grad():
inputs["encoder_outputs"] = encoder(**encoder_inputs)
outputs_embeds = model(**inputs)[0]
self.assertTrue((outputs_embeds == outputs).all())
# Needs to override as the encoder input embedding is a Conv1d
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Conv1d))
model.set_input_embeddings(torch.nn.Conv1d(10, 10, 3))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, torch.nn.Conv1d))
# WhisperEncoder cannot resize token embeddings since it has no tokens embeddings
@unittest.skip(reason="Model has no tokens embeds")
def test_resize_tokens_embeddings(self):
pass
| WhisperEncoderModelTest |
python | Lightning-AI__lightning | .actions/assistant.py | {
"start": 1450,
"end": 14248
} | class ____(Requirement):
strict_cmd = "strict"
def __init__(self, *args: Any, comment: str = "", pip_argument: Optional[str] = None, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.comment = comment
assert pip_argument is None or pip_argument # sanity check that it's not an empty str
self.pip_argument = pip_argument
self.strict = self.strict_cmd in comment.lower()
def adjust(self, unfreeze: str) -> str:
"""Remove version restrictions unless they are strict.
>>> _RequirementWithComment("arrow<=1.2.2,>=1.2.0", comment="# anything").adjust("none")
'arrow<=1.2.2,>=1.2.0'
>>> _RequirementWithComment("arrow<=1.2.2,>=1.2.0", comment="# strict").adjust("none")
'arrow<=1.2.2,>=1.2.0 # strict'
>>> _RequirementWithComment('arrow<=1.2.2,>=1.2.0; python_version >= "3.10"', comment="# my name").adjust("all")
'arrow>=1.2.0; python_version >= "3.10"'
>>> _RequirementWithComment("arrow>=1.2.0, <=1.2.2", comment="# strict").adjust("all")
'arrow<=1.2.2,>=1.2.0 # strict'
>>> _RequirementWithComment('arrow; python_version >= "3.10"').adjust("all")
'arrow; python_version >= "3.10"'
>>> _RequirementWithComment("arrow>=1.2.0, <=1.2.2", comment="# cool").adjust("major")
'arrow<2.0,>=1.2.0'
>>> _RequirementWithComment("arrow>=1.2.0, <=1.2.2", comment="# strict").adjust("major")
'arrow<=1.2.2,>=1.2.0 # strict'
>>> _RequirementWithComment('arrow>=1.2.0; python_version >= "3.10"').adjust("major")
'arrow>=1.2.0; python_version >= "3.10"'
>>> _RequirementWithComment("arrow").adjust("major")
'arrow'
"""
out = str(self)
if self.strict:
return f"{out} # {self.strict_cmd}"
specs = [(spec.operator, spec.version) for spec in self.specifier]
if unfreeze == "major":
for operator, version in specs:
if operator in ("<", "<="):
major = Version(version).major
# replace upper bound with major version increased by one
return out.replace(f"{operator}{version}", f"<{major + 1}.0")
elif unfreeze == "all":
for operator, version in specs:
if operator in ("<", "<="):
# drop upper bound
return out.replace(f"{operator}{version},", "")
elif unfreeze != "none":
raise ValueError(f"Unexpected unfreeze: {unfreeze!r} value.")
return out
def _parse_requirements(lines: Iterable[str]) -> Iterator[_RequirementWithComment]:
"""Adapted from `pkg_resources.parse_requirements` to include comments.
>>> txt = ['# ignored', '', 'this # is an', '--piparg', 'example', 'foo # strict', 'thing', '-r different/file.txt']
>>> [r.adjust('none') for r in _parse_requirements(txt)]
['this', 'example', 'foo # strict', 'thing']
"""
pip_argument = None
for line in lines:
line = line.strip()
if not line or line.startswith("#"):
continue
# Drop comments -- a hash without a space may be in a URL.
if " #" in line:
comment_pos = line.find(" #")
line, comment = line[:comment_pos], line[comment_pos:]
else:
comment = ""
# If there's a pip argument, save it
if line.startswith("--"):
pip_argument = line
continue
if line.startswith("-r "):
# linked requirement files are unsupported
continue
yield _RequirementWithComment(line, comment=comment, pip_argument=pip_argument)
pip_argument = None
def load_requirements(path_dir: str, file_name: str = "base.txt", unfreeze: str = "all") -> list[str]:
"""Loading requirements from a file.
>>> path_req = os.path.join(_PROJECT_ROOT, "requirements")
>>> load_requirements(path_req, "docs.txt", unfreeze="major") # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
['sphinx<...]
"""
assert unfreeze in {"none", "major", "all"}
path = Path(path_dir) / file_name
if not path.exists():
logging.warning(f"Folder {path_dir} does not have any base requirements.")
return []
assert path.exists(), (path_dir, file_name, path)
text = path.read_text().splitlines()
return [req.adjust(unfreeze) for req in _parse_requirements(text)]
def load_readme_description(path_dir: str, homepage: str, version: str) -> str:
"""Load readme as decribtion.
>>> load_readme_description(_PROJECT_ROOT, "", "") # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
'...PyTorch Lightning is just organized PyTorch...'
"""
path_readme = os.path.join(path_dir, "README.md")
with open(path_readme, encoding="utf-8") as fopen:
text = fopen.read()
# drop images from readme
text = text.replace(
"", ""
)
# https://github.com/Lightning-AI/lightning/raw/master/docs/source/_static/images/lightning_module/pt_to_pl.png
github_source_url = os.path.join(homepage, "raw", version)
# replace relative repository path to absolute link to the release
# do not replace all "docs" as in the readme we reger some other sources with particular path to docs
text = text.replace(
"docs/source-pytorch/_static/", f"{os.path.join(github_source_url, 'docs/source-app/_static/')}"
)
# readthedocs badge
text = text.replace("badge/?version=stable", f"badge/?version={version}")
text = text.replace("pytorch-lightning.readthedocs.io/en/stable/", f"pytorch-lightning.readthedocs.io/en/{version}")
# codecov badge
text = text.replace("/branch/master/graph/badge.svg", f"/release/{version}/graph/badge.svg")
# github actions badge
text = text.replace("badge.svg?branch=master&event=push", f"badge.svg?tag={version}")
# azure pipelines badge
text = text.replace("?branchName=master", f"?branchName=refs%2Ftags%2F{version}")
skip_begin = r"<!-- following section will be skipped from PyPI description -->"
skip_end = r"<!-- end skipping PyPI description -->"
# todo: wrap content as commented description
return re.sub(rf"{skip_begin}.+?{skip_end}", "<!-- -->", text, flags=re.IGNORECASE + re.DOTALL)
# # https://github.com/Borda/pytorch-lightning/releases/download/1.1.0a6/codecov_badge.png
# github_release_url = os.path.join(homepage, "releases", "download", version)
# # download badge and replace url with local file
# text = _parse_for_badge(text, github_release_url)
def distribute_version(src_folder: str, ver_file: str = "version.info") -> None:
"""Copy the global version to all packages."""
ls_ver = glob.glob(os.path.join(src_folder, "*", "__version__.py"))
ver_template = os.path.join(src_folder, ver_file)
for fpath in ls_ver:
fpath = os.path.join(os.path.dirname(fpath), ver_file)
print("Distributing the version to", fpath)
if os.path.isfile(fpath):
os.remove(fpath)
shutil.copy2(ver_template, fpath)
def _load_aggregate_requirements(req_dir: str = "requirements", freeze_requirements: bool = False) -> None:
"""Load all base requirements from all particular packages and prune duplicates.
>>> _load_aggregate_requirements(os.path.join(_PROJECT_ROOT, "requirements"))
"""
requires = [
load_requirements(d, unfreeze="none" if freeze_requirements else "major")
for d in glob.glob(os.path.join(req_dir, "*"))
# skip empty folder (git artifacts), and resolving Will's special issue
if os.path.isdir(d) and len(glob.glob(os.path.join(d, "*"))) > 0 and not os.path.basename(d).startswith("_")
]
if not requires:
return
# TODO: add some smarter version aggregation per each package
requires = sorted(set(chain(*requires)))
with open(os.path.join(req_dir, "base.txt"), "w") as fp:
fp.writelines([ln + os.linesep for ln in requires] + [os.linesep])
def _retrieve_files(directory: str, *ext: str) -> list[str]:
all_files = []
for root, _, files in os.walk(directory):
for fname in files:
if not ext or any(os.path.split(fname)[1].lower().endswith(e) for e in ext):
all_files.append(os.path.join(root, fname))
return all_files
def _replace_imports(lines: list[str], mapping: list[tuple[str, str]], lightning_by: str = "") -> list[str]:
"""Replace imports of standalone package to lightning.
>>> lns = [
... '"lightning_app"',
... "lightning_app",
... "lightning_app/",
... "delete_cloud_lightning_apps",
... "from lightning_app import",
... "lightning_apps = []",
... "lightning_app and pytorch_lightning are ours",
... "def _lightning_app():",
... ":class:`~lightning_app.core.flow.LightningFlow`",
... "http://pytorch_lightning.ai",
... "from lightning import __version__",
... "@lightning.ai"
... ]
>>> mapping = [("lightning_app", "lightning.app"), ("pytorch_lightning", "lightning.pytorch")]
>>> _replace_imports(lns, mapping, lightning_by="lightning_fabric") # doctest: +NORMALIZE_WHITESPACE
['"lightning.app"', \
'lightning.app', \
'lightning_app/', \
'delete_cloud_lightning_apps', \
'from lightning.app import', \
'lightning_apps = []', \
'lightning.app and lightning.pytorch are ours', \
'def _lightning_app():', \
':class:`~lightning.app.core.flow.LightningFlow`', \
'http://pytorch_lightning.ai', \
'from lightning_fabric import __version__', \
'@lightning.ai']
"""
out = lines[:]
for source_import, target_import in mapping:
for i, ln in enumerate(out):
out[i] = re.sub(
rf"([^_/@]|^){source_import}([^_\w/]|$)",
rf"\1{target_import}\2",
ln,
)
if lightning_by: # in addition, replace base package
out[i] = out[i].replace("from lightning import ", f"from {lightning_by} import ")
out[i] = out[i].replace("import lightning ", f"import {lightning_by} ")
return out
def copy_replace_imports(
source_dir: str,
source_imports: Sequence[str],
target_imports: Sequence[str],
target_dir: Optional[str] = None,
lightning_by: str = "",
) -> None:
"""Copy package content with import adjustments."""
print(f"Replacing imports: {locals()}")
assert len(source_imports) == len(target_imports), (
"source and target imports must have the same length, "
f"source: {len(source_imports)}, target: {len(target_imports)}"
)
if target_dir is None:
target_dir = source_dir
ls = _retrieve_files(source_dir)
for fp in ls:
fp_new = fp.replace(source_dir, target_dir)
_, ext = os.path.splitext(fp)
if ext in (".png", ".jpg", ".ico"):
os.makedirs(dirname(fp_new), exist_ok=True)
if not isfile(fp_new):
shutil.copy(fp, fp_new)
continue
if ext in (".pyc",):
continue
# Try to parse everything else
with open(fp, encoding="utf-8") as fopen:
try:
lines = fopen.readlines()
except UnicodeDecodeError:
# a binary file, skip
print(f"Skipped replacing imports for {fp}")
continue
lines = _replace_imports(lines, list(zip(source_imports, target_imports)), lightning_by=lightning_by)
os.makedirs(os.path.dirname(fp_new), exist_ok=True)
with open(fp_new, "w", encoding="utf-8") as fopen:
fopen.writelines(lines)
def create_mirror_package(source_dir: str, package_mapping: dict[str, str]) -> None:
"""Create a mirror package with adjusted imports."""
# replace imports and copy the code
mapping = package_mapping.copy()
mapping.pop("lightning", None) # pop this key to avoid replacing `lightning` to `lightning.lightning`
mapping = {f"lightning.{sp}": sl for sp, sl in mapping.items()}
for pkg_from, pkg_to in mapping.items():
source_imports, target_imports = zip(*mapping.items())
copy_replace_imports(
source_dir=os.path.join(source_dir, pkg_from.replace(".", os.sep)),
# pytorch_lightning uses lightning_fabric, so we need to replace all imports for all directories
source_imports=source_imports,
target_imports=target_imports,
target_dir=os.path.join(source_dir, pkg_to.replace(".", os.sep)),
lightning_by=pkg_from,
)
| _RequirementWithComment |
python | pandas-dev__pandas | pandas/io/formats/format.py | {
"start": 28580,
"end": 38311
} | class ____:
"""Class for creating dataframe output in multiple formats.
Called in pandas.core.generic.NDFrame:
- to_csv
- to_latex
Called in pandas.DataFrame:
- to_html
- to_string
Parameters
----------
fmt : DataFrameFormatter
Formatter with the formatting options.
"""
def __init__(self, fmt: DataFrameFormatter) -> None:
self.fmt = fmt
def to_html(
self,
buf: FilePath | WriteBuffer[str] | None = None,
encoding: str | None = None,
classes: str | list | tuple | None = None,
notebook: bool = False,
border: int | bool | None = None,
table_id: str | None = None,
render_links: bool = False,
) -> str | None:
"""
Render a DataFrame to a html table.
Parameters
----------
buf : str, path object, file-like object, or None, default None
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a string ``write()`` function. If None, the result is
returned as a string.
encoding : str, default “utf-8”
Set character encoding.
classes : str or list-like
classes to include in the `class` attribute of the opening
``<table>`` tag, in addition to the default "dataframe".
notebook : {True, False}, optional, default False
Whether the generated HTML is for IPython Notebook.
border : int or bool
When an integer value is provided, it sets the border attribute in
the opening tag, specifying the thickness of the border.
If ``False`` or ``0`` is passed, the border attribute will not
be present in the ``<table>`` tag.
The default value for this parameter is governed by
``pd.options.display.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links.
"""
from pandas.io.formats.html import (
HTMLFormatter,
NotebookFormatter,
)
Klass = NotebookFormatter if notebook else HTMLFormatter
html_formatter = Klass(
self.fmt,
classes=classes,
border=border,
table_id=table_id,
render_links=render_links,
)
string = html_formatter.to_string()
return save_to_buffer(string, buf=buf, encoding=encoding)
def to_string(
self,
buf: FilePath | WriteBuffer[str] | None = None,
encoding: str | None = None,
line_width: int | None = None,
) -> str | None:
"""
Render a DataFrame to a console-friendly tabular output.
Parameters
----------
buf : str, path object, file-like object, or None, default None
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a string ``write()`` function. If None, the result is
returned as a string.
encoding: str, default “utf-8”
Set character encoding.
line_width : int, optional
Width to wrap a line in characters.
"""
from pandas.io.formats.string import StringFormatter
string_formatter = StringFormatter(self.fmt, line_width=line_width)
string = string_formatter.to_string()
return save_to_buffer(string, buf=buf, encoding=encoding)
def to_csv(
self,
path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
encoding: str | None = None,
sep: str = ",",
columns: Sequence[Hashable] | None = None,
index_label: IndexLabel | None = None,
mode: str = "w",
compression: CompressionOptions = "infer",
quoting: int | None = None,
quotechar: str = '"',
lineterminator: str | None = None,
chunksize: int | None = None,
date_format: str | None = None,
doublequote: bool = True,
escapechar: str | None = None,
errors: str = "strict",
storage_options: StorageOptions | None = None,
) -> str | None:
"""
Render dataframe as comma-separated file.
"""
from pandas.io.formats.csvs import CSVFormatter
if path_or_buf is None:
created_buffer = True
path_or_buf = StringIO()
else:
created_buffer = False
csv_formatter = CSVFormatter(
path_or_buf=path_or_buf,
lineterminator=lineterminator,
sep=sep,
encoding=encoding,
errors=errors,
compression=compression,
quoting=quoting,
cols=columns,
index_label=index_label,
mode=mode,
chunksize=chunksize,
quotechar=quotechar,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
storage_options=storage_options,
formatter=self.fmt,
)
csv_formatter.save()
if created_buffer:
assert isinstance(path_or_buf, StringIO)
content = path_or_buf.getvalue()
path_or_buf.close()
return content
return None
def save_to_buffer(
string: str,
buf: FilePath | WriteBuffer[str] | None = None,
encoding: str | None = None,
) -> str | None:
"""
Perform serialization. Write to buf or return as string if buf is None.
"""
with _get_buffer(buf, encoding=encoding) as fd:
fd.write(string)
if buf is None:
# error: "WriteBuffer[str]" has no attribute "getvalue"
return fd.getvalue() # type: ignore[attr-defined]
return None
@contextmanager
def _get_buffer(
buf: FilePath | WriteBuffer[str] | None, encoding: str | None = None
) -> Generator[WriteBuffer[str]] | Generator[StringIO]:
"""
Context manager to open, yield and close buffer for filenames or Path-like
objects, otherwise yield buf unchanged.
"""
if buf is not None:
buf = stringify_path(buf)
else:
buf = StringIO()
if encoding is None:
encoding = "utf-8"
elif not isinstance(buf, str):
raise ValueError("buf is not a file name and encoding is specified.")
if hasattr(buf, "write"):
# Incompatible types in "yield" (actual type "Union[str, WriteBuffer[str],
# StringIO]", expected type "Union[WriteBuffer[str], StringIO]")
yield buf # type: ignore[misc]
elif isinstance(buf, str):
check_parent_directory(str(buf))
with open(buf, "w", encoding=encoding, newline="") as f:
# GH#30034 open instead of codecs.open prevents a file leak
# if we have an invalid encoding argument.
# newline="" is needed to roundtrip correctly on
# windows test_to_latex_filename
yield f
else:
raise TypeError("buf is not a file name and it has no write method")
# ----------------------------------------------------------------------
# Array formatters
def format_array(
values: ArrayLike,
formatter: Callable | None,
float_format: FloatFormatType | None = None,
na_rep: str = "NaN",
digits: int | None = None,
space: str | int | None = None,
justify: str = "right",
decimal: str = ".",
leading_space: bool | None = True,
quoting: int | None = None,
fallback_formatter: Callable | None = None,
) -> list[str]:
"""
Format an array for printing.
Parameters
----------
values : np.ndarray or ExtensionArray
formatter
float_format
na_rep
digits
space
justify
decimal
leading_space : bool, optional, default True
Whether the array should be formatted with a leading space.
When an array as a column of a Series or DataFrame, we do want
the leading space to pad between columns.
When formatting an Index subclass
(e.g. IntervalIndex._get_values_for_csv), we don't want the
leading space since it should be left-aligned.
fallback_formatter
Returns
-------
List[str]
"""
fmt_klass: type[_GenericArrayFormatter]
if lib.is_np_dtype(values.dtype, "M"):
fmt_klass = _Datetime64Formatter
values = cast(DatetimeArray, values)
elif isinstance(values.dtype, DatetimeTZDtype):
fmt_klass = _Datetime64TZFormatter
values = cast(DatetimeArray, values)
elif lib.is_np_dtype(values.dtype, "m"):
fmt_klass = _Timedelta64Formatter
values = cast(TimedeltaArray, values)
elif isinstance(values.dtype, ExtensionDtype):
fmt_klass = _ExtensionArrayFormatter
elif lib.is_np_dtype(values.dtype, "fc"):
fmt_klass = FloatArrayFormatter
elif lib.is_np_dtype(values.dtype, "iu"):
fmt_klass = _IntArrayFormatter
else:
fmt_klass = _GenericArrayFormatter
if space is None:
space = 12
if float_format is None:
float_format = get_option("display.float_format")
if digits is None:
digits = get_option("display.precision")
fmt_obj = fmt_klass(
values,
digits=digits,
na_rep=na_rep,
float_format=float_format,
formatter=formatter,
space=space,
justify=justify,
decimal=decimal,
leading_space=leading_space,
quoting=quoting,
fallback_formatter=fallback_formatter,
)
return fmt_obj.get_result()
| DataFrameRenderer |
python | wandb__wandb | wandb/sdk/launch/runner/local_container.py | {
"start": 3091,
"end": 10358
} | class ____(AbstractRunner):
"""Runner class, uses a project to create a LocallySubmittedRun."""
def __init__(
self,
api: "Api",
backend_config: Dict[str, Any],
environment: AbstractEnvironment,
registry: AbstractRegistry,
) -> None:
super().__init__(api, backend_config)
self.environment = environment
self.registry = registry
def _populate_docker_args(
self, launch_project: LaunchProject, image_uri: str
) -> Dict[str, Any]:
docker_args: Dict[str, Any] = launch_project.fill_macros(image_uri).get(
"local-container", {}
)
if _is_wandb_local_uri(self._api.settings("base_url")):
if sys.platform == "win32":
docker_args["net"] = "host"
else:
docker_args["network"] = "host"
if sys.platform == "linux" or sys.platform == "linux2":
docker_args["add-host"] = "host.docker.internal:host-gateway"
base_image = launch_project.job_base_image
if base_image is not None:
# Mount code into the container and set the working directory.
if "volume" not in docker_args:
docker_args["volume"] = []
docker_args["volume"].append(
f"{launch_project.project_dir}:{CODE_MOUNT_DIR}"
)
docker_args["workdir"] = CODE_MOUNT_DIR
return docker_args
async def run(
self,
launch_project: LaunchProject,
image_uri: str,
) -> Optional[AbstractRun]:
docker_args = self._populate_docker_args(launch_project, image_uri)
synchronous: bool = self.backend_config[PROJECT_SYNCHRONOUS]
env_vars = launch_project.get_env_vars_dict(
self._api, MAX_ENV_LENGTHS[self.__class__.__name__]
)
# When running against local port, need to swap to local docker host
if (
_is_wandb_local_uri(self._api.settings("base_url"))
and sys.platform == "darwin"
):
_, _, port = self._api.settings("base_url").split(":")
env_vars["WANDB_BASE_URL"] = f"http://host.docker.internal:{port}"
elif _is_wandb_dev_uri(self._api.settings("base_url")):
env_vars["WANDB_BASE_URL"] = "http://host.docker.internal:9001"
if launch_project.docker_image or launch_project.job_base_image:
try:
pull_docker_image(image_uri)
except Exception as e:
wandb.termwarn(f"Error attempting to pull docker image {image_uri}")
if not docker_image_exists(image_uri):
raise LaunchError(
f"Failed to pull docker image {image_uri} with error: {e}"
)
entrypoint = launch_project.get_job_entry_point()
entry_cmd = None if entrypoint is None else entrypoint.command
command_str = " ".join(
get_docker_command(
image_uri,
env_vars,
docker_args=docker_args,
entry_cmd=entry_cmd,
additional_args=launch_project.override_args,
)
).strip()
sanitized_cmd_str = sanitize_wandb_api_key(command_str)
_msg = f"{LOG_PREFIX}Launching run in docker with command: {sanitized_cmd_str}"
wandb.termlog(_msg)
run = _run_entry_point(command_str, launch_project.project_dir)
if synchronous:
await run.wait()
return run
def _run_entry_point(command: str, work_dir: Optional[str]) -> AbstractRun:
"""Run an entry point command in a subprocess.
Arguments:
command: Entry point command to run
work_dir: Working directory in which to run the command
Returns:
An instance of `LocalSubmittedRun`
"""
if work_dir is None:
work_dir = os.getcwd()
env = os.environ.copy()
run = LocalSubmittedRun()
thread = threading.Thread(
target=_thread_process_runner,
args=(run, ["bash", "-c", command], work_dir, env),
)
run.set_thread(thread)
thread.start()
return run
def _thread_process_runner(
run: LocalSubmittedRun, args: List[str], work_dir: str, env: Dict[str, str]
) -> None:
# cancel was called before we started the subprocess
if run._terminate_flag:
return
# TODO: Make this async
process = subprocess.Popen(
args,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
bufsize=1,
cwd=work_dir,
env=env,
)
run.set_command_proc(process)
run._stdout = ""
while True:
# the agent thread could set the terminate flag
if run._terminate_flag:
process.terminate() # type: ignore
chunk = os.read(process.stdout.fileno(), 4096) # type: ignore
if not chunk:
break
index = chunk.find(b"\r")
decoded_chunk = None
while not decoded_chunk:
try:
decoded_chunk = chunk.decode()
except UnicodeDecodeError:
# Multi-byte character cut off, try to get the rest of it
chunk += os.read(process.stdout.fileno(), 1) # type: ignore
if index != -1:
run._stdout += decoded_chunk
print(chunk.decode(), end="")
else:
run._stdout += decoded_chunk + "\r"
print(chunk.decode(), end="\r")
def get_docker_command(
image: str,
env_vars: Dict[str, str],
entry_cmd: Optional[List[str]] = None,
docker_args: Optional[Dict[str, Any]] = None,
additional_args: Optional[List[str]] = None,
) -> List[str]:
"""Construct the docker command using the image and docker args.
Arguments:
image: a Docker image to be run
env_vars: a dictionary of environment variables for the command
entry_cmd: the entry point command to run
docker_args: a dictionary of additional docker args for the command
"""
docker_path = "docker"
cmd: List[Any] = [docker_path, "run", "--rm"]
# hacky handling of env vars, needs to be improved
for env_key, env_value in env_vars.items():
cmd += ["-e", f"{shlex.quote(env_key)}={shlex.quote(env_value)}"]
if docker_args:
for name, value in docker_args.items():
if len(name) == 1:
prefix = "-" + shlex.quote(name)
else:
prefix = "--" + shlex.quote(name)
if isinstance(value, list):
for v in value:
cmd += [prefix, shlex.quote(str(v))]
elif isinstance(value, bool) and value:
cmd += [prefix]
else:
cmd += [prefix, shlex.quote(str(value))]
if entry_cmd:
cmd += ["--entrypoint", entry_cmd[0]]
cmd += [shlex.quote(image)]
if entry_cmd and len(entry_cmd) > 1:
cmd += entry_cmd[1:]
if additional_args:
cmd += additional_args
return cmd
def join(split_command: List[str]) -> str:
"""Return a shell-escaped string from *split_command*."""
return " ".join(shlex.quote(arg) for arg in split_command)
| LocalContainerRunner |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 5104,
"end": 5417
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = (
"ERROR",
"FAILURE",
"INACTIVE",
"IN_PROGRESS",
"PENDING",
"QUEUED",
"SUCCESS",
"WAITING",
)
| DeploymentStatusState |
python | pandas-dev__pandas | pandas/tests/groupby/test_grouping.py | {
"start": 5495,
"end": 32566
} | class ____:
@pytest.mark.parametrize(
"index",
[
Index(list("abcde")),
Index(np.arange(5)),
Index(np.arange(5, dtype=float)),
date_range("2020-01-01", periods=5),
period_range("2020-01-01", periods=5),
],
)
def test_grouper_index_types(self, index):
# related GH5375
# groupby misbehaving when using a Floatlike index
df = DataFrame(np.arange(10).reshape(5, 2), columns=list("AB"), index=index)
df.groupby(list("abcde"), group_keys=False).apply(lambda x: x)
df.index = df.index[::-1]
df.groupby(list("abcde"), group_keys=False).apply(lambda x: x)
def test_grouper_multilevel_freq(self):
# GH 7885
# with level and freq specified in a Grouper
d0 = date.today() - timedelta(days=14)
dates = date_range(d0, date.today())
date_index = MultiIndex.from_product([dates, dates], names=["foo", "bar"])
df = DataFrame(np.random.default_rng(2).integers(0, 100, 225), index=date_index)
# Check string level
expected = (
df.reset_index()
.groupby([Grouper(key="foo", freq="W"), Grouper(key="bar", freq="W")])
.sum()
)
# reset index changes columns dtype to object
expected.columns = Index([0], dtype="int64")
result = df.groupby(
[Grouper(level="foo", freq="W"), Grouper(level="bar", freq="W")]
).sum()
tm.assert_frame_equal(result, expected)
# Check integer level
result = df.groupby(
[Grouper(level=0, freq="W"), Grouper(level=1, freq="W")]
).sum()
tm.assert_frame_equal(result, expected)
def test_grouper_creation_bug(self):
# GH 8795
df = DataFrame({"A": [0, 0, 1, 1, 2, 2], "B": [1, 2, 3, 4, 5, 6]})
g = df.groupby("A")
expected = g.sum()
g = df.groupby(Grouper(key="A"))
result = g.sum()
tm.assert_frame_equal(result, expected)
result = g.apply(lambda x: x.sum())
tm.assert_frame_equal(result, expected)
def test_grouper_creation_bug2(self):
# GH14334
# Grouper(key=...) may be passed in a list
df = DataFrame(
{"A": [0, 0, 0, 1, 1, 1], "B": [1, 1, 2, 2, 3, 3], "C": [1, 2, 3, 4, 5, 6]}
)
# Group by single column
expected = df.groupby("A").sum()
g = df.groupby([Grouper(key="A")])
result = g.sum()
tm.assert_frame_equal(result, expected)
# Group by two columns
# using a combination of strings and Grouper objects
expected = df.groupby(["A", "B"]).sum()
# Group with two Grouper objects
g = df.groupby([Grouper(key="A"), Grouper(key="B")])
result = g.sum()
tm.assert_frame_equal(result, expected)
# Group with a string and a Grouper object
g = df.groupby(["A", Grouper(key="B")])
result = g.sum()
tm.assert_frame_equal(result, expected)
# Group with a Grouper object and a string
g = df.groupby([Grouper(key="A"), "B"])
result = g.sum()
tm.assert_frame_equal(result, expected)
def test_grouper_creation_bug3(self, unit):
# GH8866
dti = date_range("20130101", periods=2, unit=unit)
mi = MultiIndex.from_product(
[list("ab"), range(2), dti],
names=["one", "two", "three"],
)
ser = Series(
np.arange(8, dtype="int64"),
index=mi,
)
result = ser.groupby(Grouper(level="three", freq="ME")).sum()
exp_dti = pd.DatetimeIndex(
[Timestamp("2013-01-31")], freq="ME", name="three"
).as_unit(unit)
expected = Series(
[28],
index=exp_dti,
)
tm.assert_series_equal(result, expected)
# just specifying a level breaks
result = ser.groupby(Grouper(level="one")).sum()
expected = ser.groupby(level="one").sum()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", [False, True])
def test_grouper_returning_tuples(self, func):
# GH 22257 , both with dict and with callable
df = DataFrame({"X": ["A", "B", "A", "B"], "Y": [1, 4, 3, 2]})
mapping = dict(zip(range(4), [("C", 5), ("D", 6)] * 2))
if func:
gb = df.groupby(by=lambda idx: mapping[idx], sort=False)
else:
gb = df.groupby(by=mapping, sort=False)
name, expected = next(iter(gb))
assert name == ("C", 5)
result = gb.get_group(name)
tm.assert_frame_equal(result, expected)
def test_grouper_column_and_index(self):
# GH 14327
# Grouping a multi-index frame by a column and an index level should
# be equivalent to resetting the index and grouping by two columns
idx = MultiIndex.from_tuples(
[("a", 1), ("a", 2), ("a", 3), ("b", 1), ("b", 2), ("b", 3)]
)
idx.names = ["outer", "inner"]
df_multi = DataFrame(
{"A": np.arange(6), "B": ["one", "one", "two", "two", "one", "one"]},
index=idx,
)
result = df_multi.groupby(["B", Grouper(level="inner")]).mean(numeric_only=True)
expected = (
df_multi.reset_index().groupby(["B", "inner"]).mean(numeric_only=True)
)
tm.assert_frame_equal(result, expected)
# Test the reverse grouping order
result = df_multi.groupby([Grouper(level="inner"), "B"]).mean(numeric_only=True)
expected = (
df_multi.reset_index().groupby(["inner", "B"]).mean(numeric_only=True)
)
tm.assert_frame_equal(result, expected)
# Grouping a single-index frame by a column and the index should
# be equivalent to resetting the index and grouping by two columns
df_single = df_multi.reset_index("outer")
result = df_single.groupby(["B", Grouper(level="inner")]).mean(
numeric_only=True
)
expected = (
df_single.reset_index().groupby(["B", "inner"]).mean(numeric_only=True)
)
tm.assert_frame_equal(result, expected)
# Test the reverse grouping order
result = df_single.groupby([Grouper(level="inner"), "B"]).mean(
numeric_only=True
)
expected = (
df_single.reset_index().groupby(["inner", "B"]).mean(numeric_only=True)
)
tm.assert_frame_equal(result, expected)
def test_groupby_levels_and_columns(self):
# GH9344, GH9049
idx_names = ["x", "y"]
idx = MultiIndex.from_tuples([(1, 1), (1, 2), (3, 4), (5, 6)], names=idx_names)
df = DataFrame(np.arange(12).reshape(-1, 3), index=idx)
by_levels = df.groupby(level=idx_names).mean()
# reset_index changes columns dtype to object
by_columns = df.reset_index().groupby(idx_names).mean()
# without casting, by_columns.columns is object-dtype
by_columns.columns = by_columns.columns.astype(np.int64)
tm.assert_frame_equal(by_levels, by_columns)
def test_groupby_categorical_index_and_columns(self, observed):
# GH18432, adapted for GH25871
columns = ["A", "B", "A", "B"]
categories = ["B", "A"]
data = np.array(
[[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2]], int
)
cat_columns = CategoricalIndex(columns, categories=categories, ordered=True)
expected_data = np.array([[4, 2], [4, 2], [4, 2], [4, 2], [4, 2]], int)
expected_columns = CategoricalIndex(
categories, categories=categories, ordered=True
)
# test transposed version
df = DataFrame(data.T, index=cat_columns)
result = df.groupby(level=0, observed=observed).sum()
expected = DataFrame(data=expected_data.T, index=expected_columns)
tm.assert_frame_equal(result, expected)
def test_grouper_getting_correct_binner(self):
# GH 10063
# using a non-time-based grouper and a time-based grouper
# and specifying levels
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product(
[list("ab"), date_range("20130101", periods=80)], names=["one", "two"]
),
)
result = df.groupby(
[Grouper(level="one"), Grouper(level="two", freq="ME")]
).sum()
expected = DataFrame(
{"A": [31, 28, 21, 31, 28, 21]},
index=MultiIndex.from_product(
[list("ab"), date_range("20130101", freq="ME", periods=3)],
names=["one", "two"],
),
)
tm.assert_frame_equal(result, expected)
def test_grouper_iter(self, df):
gb = df.groupby("A")
grouper = gb._grouper
result = sorted(grouper)
expected = ["bar", "foo"]
assert result == expected
def test_empty_groups(self, df):
# see gh-1048
with pytest.raises(ValueError, match="No group keys passed!"):
df.groupby([])
def test_groupby_grouper(self, df):
grouped = df.groupby("A")
grouper = grouped._grouper
result = df.groupby(grouper).mean(numeric_only=True)
expected = grouped.mean(numeric_only=True)
tm.assert_frame_equal(result, expected)
def test_groupby_dict_mapping(self):
# GH #679
s = Series({"T1": 5})
result = s.groupby({"T1": "T2"}).agg("sum")
expected = s.groupby(["T2"]).agg("sum")
tm.assert_series_equal(result, expected)
s = Series([1.0, 2.0, 3.0, 4.0], index=list("abcd"))
mapping = {"a": 0, "b": 0, "c": 1, "d": 1}
result = s.groupby(mapping).mean()
result2 = s.groupby(mapping).agg("mean")
exp_key = np.array([0, 0, 1, 1], dtype=np.int64)
expected = s.groupby(exp_key).mean()
expected2 = s.groupby(exp_key).mean()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, result2)
tm.assert_series_equal(result, expected2)
@pytest.mark.parametrize(
"index",
[
[0, 1, 2, 3],
["a", "b", "c", "d"],
[Timestamp(2021, 7, 28 + i) for i in range(4)],
],
)
def test_groupby_series_named_with_tuple(self, frame_or_series, index):
# GH 42731
obj = frame_or_series([1, 2, 3, 4], index=index)
groups = Series([1, 0, 1, 0], index=index, name=("a", "a"))
result = obj.groupby(groups).last()
expected = frame_or_series([4, 3])
expected.index.name = ("a", "a")
tm.assert_equal(result, expected)
def test_groupby_grouper_f_sanity_checked(self):
dates = date_range("01-Jan-2013", periods=12, freq="MS")
ts = Series(np.random.default_rng(2).standard_normal(12), index=dates)
# GH51979
# simple check that the passed function doesn't operates on the whole index
msg = "'Timestamp' object is not subscriptable"
with pytest.raises(TypeError, match=msg):
ts.groupby(lambda key: key[0:6])
result = ts.groupby(lambda x: x).sum()
expected = ts.groupby(ts.index).sum()
expected.index.freq = None
tm.assert_series_equal(result, expected)
def test_groupby_with_datetime_key(self):
# GH 51158
df = DataFrame(
{
"id": ["a", "b"] * 3,
"b": date_range("2000-01-01", "2000-01-03", freq="9h"),
}
)
grouper = Grouper(key="b", freq="D")
gb = df.groupby([grouper, "id"])
# test number of groups
expected = {
(Timestamp("2000-01-01"), "a"): [0, 2],
(Timestamp("2000-01-01"), "b"): [1],
(Timestamp("2000-01-02"), "a"): [4],
(Timestamp("2000-01-02"), "b"): [3, 5],
}
tm.assert_dict_equal(gb.groups, expected)
# test number of group keys
assert len(gb.groups.keys()) == 4
def test_grouping_error_on_multidim_input(self, df):
msg = "Grouper for '<class 'pandas.DataFrame'>' not 1-dimensional"
with pytest.raises(ValueError, match=msg):
Grouping(df.index, df[["A", "A"]])
def test_multiindex_negative_level(self, multiindex_dataframe_random_data):
# GH 13901
result = multiindex_dataframe_random_data.groupby(level=-1).sum()
expected = multiindex_dataframe_random_data.groupby(level="second").sum()
tm.assert_frame_equal(result, expected)
result = multiindex_dataframe_random_data.groupby(level=-2).sum()
expected = multiindex_dataframe_random_data.groupby(level="first").sum()
tm.assert_frame_equal(result, expected)
result = multiindex_dataframe_random_data.groupby(level=[-2, -1]).sum()
expected = multiindex_dataframe_random_data.sort_index()
tm.assert_frame_equal(result, expected)
result = multiindex_dataframe_random_data.groupby(level=[-1, "first"]).sum()
expected = multiindex_dataframe_random_data.groupby(
level=["second", "first"]
).sum()
tm.assert_frame_equal(result, expected)
def test_agg_with_dict_raises(self, df):
df.columns = np.arange(len(df.columns))
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
df.groupby(1, as_index=False)[2].agg({"Q": np.mean})
def test_multiindex_columns_empty_level(self):
lst = [["count", "values"], ["to filter", ""]]
midx = MultiIndex.from_tuples(lst)
df = DataFrame([[1, "A"]], columns=midx)
msg = "`groups` by one element list returns scalar is deprecated"
grouped = df.groupby("to filter").groups
assert grouped["A"] == [0]
with tm.assert_produces_warning(Pandas4Warning, match=msg):
grouped = df.groupby([("to filter", "")]).groups
assert grouped["A"] == [0]
df = DataFrame([[1, "A"], [2, "B"]], columns=midx)
expected = df.groupby("to filter").groups
with tm.assert_produces_warning(Pandas4Warning, match=msg):
result = df.groupby([("to filter", "")]).groups
assert result == expected
df = DataFrame([[1, "A"], [2, "A"]], columns=midx)
expected = df.groupby("to filter").groups
with tm.assert_produces_warning(Pandas4Warning, match=msg):
result = df.groupby([("to filter", "")]).groups
tm.assert_dict_equal(result, expected)
def test_groupby_multiindex_tuple(self):
# GH 17979, GH#59179
df = DataFrame(
[[1, 2, 3, 4], [3, 4, 5, 6], [1, 4, 2, 3]],
columns=MultiIndex.from_arrays([["a", "b", "b", "c"], [1, 1, 2, 2]]),
)
msg = "`groups` by one element list returns scalar is deprecated"
with tm.assert_produces_warning(Pandas4Warning, match=msg):
expected = df.groupby([("b", 1)]).groups
result = df.groupby(("b", 1)).groups
tm.assert_dict_equal(expected, result)
df2 = DataFrame(
df.values,
columns=MultiIndex.from_arrays(
[["a", "b", "b", "c"], ["d", "d", "e", "e"]]
),
)
with tm.assert_produces_warning(Pandas4Warning, match=msg):
expected = df2.groupby([("b", "d")]).groups
result = df.groupby(("b", 1)).groups
tm.assert_dict_equal(expected, result)
df3 = DataFrame(df.values, columns=[("a", "d"), ("b", "d"), ("b", "e"), "c"])
with tm.assert_produces_warning(Pandas4Warning, match=msg):
expected = df3.groupby([("b", "d")]).groups
result = df.groupby(("b", 1)).groups
tm.assert_dict_equal(expected, result)
def test_groupby_multiindex_partial_indexing_equivalence(self):
# GH 17977, GH#59179
df = DataFrame(
[[1, 2, 3, 4], [3, 4, 5, 6], [1, 4, 2, 3]],
columns=MultiIndex.from_arrays([["a", "b", "b", "c"], [1, 1, 2, 2]]),
)
expected_mean = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].mean()
result_mean = df.groupby([("a", 1)])["b"].mean()
tm.assert_frame_equal(expected_mean, result_mean)
expected_sum = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].sum()
result_sum = df.groupby([("a", 1)])["b"].sum()
tm.assert_frame_equal(expected_sum, result_sum)
expected_count = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].count()
result_count = df.groupby([("a", 1)])["b"].count()
tm.assert_frame_equal(expected_count, result_count)
expected_min = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].min()
result_min = df.groupby([("a", 1)])["b"].min()
tm.assert_frame_equal(expected_min, result_min)
expected_max = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].max()
result_max = df.groupby([("a", 1)])["b"].max()
tm.assert_frame_equal(expected_max, result_max)
msg = "`groups` by one element list returns scalar is deprecated"
with tm.assert_produces_warning(Pandas4Warning, match=msg):
expected_groups = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].groups
result_groups = df.groupby([("a", 1)])["b"].groups
tm.assert_dict_equal(expected_groups, result_groups)
def test_groupby_level(self, sort, multiindex_dataframe_random_data, df):
# GH 17537
frame = multiindex_dataframe_random_data
deleveled = frame.reset_index()
result0 = frame.groupby(level=0, sort=sort).sum()
result1 = frame.groupby(level=1, sort=sort).sum()
expected0 = frame.groupby(deleveled["first"].values, sort=sort).sum()
expected1 = frame.groupby(deleveled["second"].values, sort=sort).sum()
expected0.index.name = "first"
expected1.index.name = "second"
assert result0.index.name == "first"
assert result1.index.name == "second"
tm.assert_frame_equal(result0, expected0)
tm.assert_frame_equal(result1, expected1)
assert result0.index.name == frame.index.names[0]
assert result1.index.name == frame.index.names[1]
# groupby level name
result0 = frame.groupby(level="first", sort=sort).sum()
result1 = frame.groupby(level="second", sort=sort).sum()
tm.assert_frame_equal(result0, expected0)
tm.assert_frame_equal(result1, expected1)
# raise exception for non-MultiIndex
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
df.groupby(level=1)
def test_groupby_level_index_names(self):
# GH4014 this used to raise ValueError since 'exp'>1 (in py2)
df = DataFrame({"exp": ["A"] * 3 + ["B"] * 3, "var1": range(6)}).set_index(
"exp"
)
df.groupby(level="exp")
msg = "level name foo is not the name of the index"
with pytest.raises(ValueError, match=msg):
df.groupby(level="foo")
def test_groupby_level_with_nas(self, sort):
# GH 17537
index = MultiIndex(
levels=[[1, 0], [0, 1, 2, 3]],
codes=[[1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]],
)
# factorizing doesn't confuse things
s = Series(np.arange(8.0), index=index)
result = s.groupby(level=0, sort=sort).sum()
expected = Series([6.0, 22.0], index=[0, 1])
tm.assert_series_equal(result, expected)
index = MultiIndex(
levels=[[1, 0], [0, 1, 2, 3]],
codes=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]],
)
# factorizing doesn't confuse things
s = Series(np.arange(8.0), index=index)
result = s.groupby(level=0, sort=sort).sum()
expected = Series([6.0, 18.0], index=[0.0, 1.0])
tm.assert_series_equal(result, expected)
def test_groupby_args(self, multiindex_dataframe_random_data):
# PR8618 and issue 8015
frame = multiindex_dataframe_random_data
msg = "You have to supply one of 'by' and 'level'"
with pytest.raises(TypeError, match=msg):
frame.groupby()
msg = "You have to supply one of 'by' and 'level'"
with pytest.raises(TypeError, match=msg):
frame.groupby(by=None, level=None)
@pytest.mark.parametrize(
"sort,labels",
[
[True, [2, 2, 2, 0, 0, 1, 1, 3, 3, 3]],
[False, [0, 0, 0, 1, 1, 2, 2, 3, 3, 3]],
],
)
def test_level_preserve_order(self, sort, labels, multiindex_dataframe_random_data):
# GH 17537
grouped = multiindex_dataframe_random_data.groupby(level=0, sort=sort)
exp_labels = np.array(labels, np.intp)
tm.assert_almost_equal(grouped._grouper.ids, exp_labels)
def test_grouping_labels(self, multiindex_dataframe_random_data):
grouped = multiindex_dataframe_random_data.groupby(
multiindex_dataframe_random_data.index.get_level_values(0)
)
exp_labels = np.array([2, 2, 2, 0, 0, 1, 1, 3, 3, 3], dtype=np.intp)
tm.assert_almost_equal(grouped._grouper.codes[0], exp_labels)
def test_list_grouper_with_nat(self):
# GH 14715, GH#59179
df = DataFrame({"date": date_range("1/1/2011", periods=365, freq="D")})
df.iloc[-1] = pd.NaT
grouper = Grouper(key="date", freq="YS")
msg = "`groups` by one element list returns scalar is deprecated"
# Grouper in a list grouping
gb = df.groupby([grouper])
expected = {Timestamp("2011-01-01"): Index(list(range(364)))}
with tm.assert_produces_warning(Pandas4Warning, match=msg):
result = gb.groups
tm.assert_dict_equal(result, expected)
# Test case without a list
result = df.groupby(grouper)
expected = {Timestamp("2011-01-01"): 365}
tm.assert_dict_equal(result.groups, expected)
@pytest.mark.parametrize(
"func,expected",
[
(
"transform",
Series(name=2, dtype=np.float64),
),
(
"agg",
Series(
name=2, dtype=np.float64, index=Index([], dtype=np.float64, name=1)
),
),
(
"apply",
Series(
name=2, dtype=np.float64, index=Index([], dtype=np.float64, name=1)
),
),
],
)
def test_evaluate_with_empty_groups(self, func, expected):
# 26208
# test transform'ing empty groups
# (not testing other agg fns, because they return
# different index objects.
df = DataFrame({1: [], 2: []})
g = df.groupby(1, group_keys=True)
result = getattr(g[2], func)(lambda x: x)
tm.assert_series_equal(result, expected)
def test_groupby_apply_empty_with_group_keys_false(self):
# 60471
# test apply'ing empty groups with group_keys False
# (not testing other agg fns, because they return
# different index objects.
df = DataFrame({"A": [], "B": [], "C": []})
g = df.groupby("A", group_keys=False)
result = g.apply(lambda x: x / x.sum())
expected = DataFrame({"B": [], "C": []}, index=None)
tm.assert_frame_equal(result, expected)
def test_groupby_empty(self):
# https://github.com/pandas-dev/pandas/issues/27190
s = Series([], name="name", dtype="float64")
gr = s.groupby([])
result = gr.mean()
expected = s.set_axis(Index([], dtype=np.intp))
tm.assert_series_equal(result, expected)
# check group properties
assert len(gr._grouper.groupings) == 1
tm.assert_numpy_array_equal(
gr._grouper.ids, np.array([], dtype=np.dtype(np.intp))
)
assert gr._grouper.ngroups == 0
# check name
gb = s.groupby(s)
grouper = gb._grouper
result = grouper.names
expected = ["name"]
assert result == expected
def test_groupby_level_index_value_all_na(self):
# issue 20519
df = DataFrame(
[["x", np.nan, 10], [None, np.nan, 20]], columns=["A", "B", "C"]
).set_index(["A", "B"])
result = df.groupby(level=["A", "B"]).sum()
expected = DataFrame(
data=[],
index=MultiIndex(
levels=[Index(["x"], dtype="str"), Index([], dtype="float64")],
codes=[[], []],
names=["A", "B"],
),
columns=["C"],
dtype="int64",
)
tm.assert_frame_equal(result, expected)
def test_groupby_multiindex_level_empty(self):
# https://github.com/pandas-dev/pandas/issues/31670
df = DataFrame(
[[123, "a", 1.0], [123, "b", 2.0]], columns=["id", "category", "value"]
)
df = df.set_index(["id", "category"])
empty = df[df.value < 0]
result = empty.groupby("id").sum()
expected = DataFrame(
dtype="float64",
columns=["value"],
index=Index([], dtype=np.int64, name="id"),
)
tm.assert_frame_equal(result, expected)
def test_groupby_tuple_keys_handle_multiindex(self):
# https://github.com/pandas-dev/pandas/issues/21340
df = DataFrame(
{
"num1": [0, 8, 9, 4, 3, 3, 5, 9, 3, 6],
"num2": [3, 8, 6, 4, 9, 2, 1, 7, 0, 9],
"num3": [6, 5, 7, 8, 5, 1, 1, 10, 7, 8],
"category_tuple": [
(0, 1),
(0, 1),
(0, 1),
(0, 4),
(2, 3),
(2, 3),
(2, 3),
(2, 3),
(5,),
(6,),
],
"category_string": list("aaabbbbcde"),
}
)
expected = df.sort_values(by=["category_tuple", "num1"])
result = df.groupby("category_tuple").apply(lambda x: x.sort_values(by="num1"))
expected = expected[result.columns]
tm.assert_frame_equal(result.reset_index(drop=True), expected)
def test_groupby_grouper_immutable_list_item(self):
# GH 26564 - prevent 'ValueError: all keys need to be the same shape'
# when reusing a list of groupers
df1 = DataFrame([["05/29/2019"], ["05/28/2019"]], columns=["date"]).assign(
date=lambda df: pd.to_datetime(df["date"])
)
df2 = DataFrame(columns=["date"]).assign(
date=lambda df: pd.to_datetime(df["date"])
)
groupers = [Grouper(key="date", freq="1D")]
df1.groupby(groupers).head()
# no error
df2.groupby(groupers).head()
# get_group
# --------------------------------
| TestGrouping |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor6.py | {
"start": 1952,
"end": 2132
} | class ____(Generic[_T3, _T4]):
def __init__(self: "Class2[_S2, _S1]", value1: _S1, value2: _S2) -> None: ...
reveal_type(Class2(0, ""), expected_text="Class2[str, int]")
| Class2 |
python | miyuchina__mistletoe | test/test_block_token.py | {
"start": 13930,
"end": 14717
} | class ____(unittest.TestCase):
def test_different_markers(self):
lines = ['- foo\n',
'* bar\n',
'1. baz\n',
'2) spam\n']
l1, l2, l3, l4 = block_token.tokenize(lines)
self.assertIsInstance(l1, block_token.List)
self.assertTrue('foo' in l1)
self.assertIsInstance(l2, block_token.List)
self.assertTrue('bar' in l2)
self.assertIsInstance(l3, block_token.List)
self.assertTrue('baz' in l3)
self.assertIsInstance(l4, block_token.List)
self.assertTrue('spam' in l4)
def test_sublist(self):
lines = ['- foo\n',
' + bar\n']
token, = block_token.tokenize(lines)
self.assertIsInstance(token, block_token.List)
| TestList |
python | getsentry__sentry | src/sentry/testutils/cases.py | {
"start": 69551,
"end": 78437
} | class ____(BaseMetricsLayerTestCase, TestCase):
TYPE_MAP = {
"metrics_distributions": "distribution",
"metrics_sets": "set",
"metrics_counters": "counter",
"metrics_gauges": "gauge",
}
ENTITY_MAP = {
"transaction.duration": "metrics_distributions",
"span.duration": "metrics_distributions",
"span.self_time": "metrics_distributions",
"http.response_content_length": "metrics_distributions",
"http.decoded_response_content_length": "metrics_distributions",
"cache.item_size": "metrics_distributions",
"http.response_transfer_size": "metrics_distributions",
"measurements.lcp": "metrics_distributions",
"measurements.fp": "metrics_distributions",
"measurements.fcp": "metrics_distributions",
"measurements.fid": "metrics_distributions",
"measurements.cls": "metrics_distributions",
"measurements.frames_frozen_rate": "metrics_distributions",
"measurements.time_to_initial_display": "metrics_distributions",
"measurements.score.lcp": "metrics_distributions",
"measurements.score.fcp": "metrics_distributions",
"measurements.score.fid": "metrics_distributions",
"measurements.score.cls": "metrics_distributions",
"measurements.score.ttfb": "metrics_distributions",
"measurements.score.inp": "metrics_distributions",
"measurements.score.total": "metrics_distributions",
"measurements.score.weight.lcp": "metrics_distributions",
"measurements.score.weight.fcp": "metrics_distributions",
"measurements.score.weight.fid": "metrics_distributions",
"measurements.score.weight.cls": "metrics_distributions",
"measurements.score.weight.ttfb": "metrics_distributions",
"measurements.score.weight.inp": "metrics_distributions",
"measurements.app_start_cold": "metrics_distributions",
"measurements.app_start_warm": "metrics_distributions",
"spans.http": "metrics_distributions",
"user": "metrics_sets",
"function.duration": "metrics_distributions",
"measurements.inp": "metrics_distributions",
"messaging.message.receive.latency": "metrics_gauges",
}
ON_DEMAND_KEY_MAP = {
"c": TransactionMetricKey.COUNT_ON_DEMAND.value,
"d": TransactionMetricKey.DIST_ON_DEMAND.value,
"s": TransactionMetricKey.SET_ON_DEMAND.value,
}
ON_DEMAND_MRI_MAP = {
"c": TransactionMRI.COUNT_ON_DEMAND.value,
"d": TransactionMRI.DIST_ON_DEMAND.value,
"s": TransactionMRI.SET_ON_DEMAND.value,
}
ON_DEMAND_ENTITY_MAP = {
"c": EntityKey.MetricsCounters.value,
"d": EntityKey.MetricsDistributions.value,
"s": EntityKey.MetricsSets.value,
}
METRIC_STRINGS: list[str] = []
DEFAULT_METRIC_TIMESTAMP = datetime(2015, 1, 1, 10, 15, 0, tzinfo=UTC)
def setUp(self):
super().setUp()
self.min_ago = before_now(minutes=1)
self.two_min_ago = before_now(minutes=2)
self.login_as(user=self.user)
self._index_metric_strings()
def do_request(self, data: dict[str, Any], features: dict[str, bool] | None = None) -> Response:
"""Set up self.features and self.url in the inheriting classes.
You can pass your own features if you do not want to use the default used by the subclass.
"""
with self.feature(features or self.features):
ret = self.client.get(self.url, data=data, format="json")
assert is_drf_response(ret)
return ret
def _index_metric_strings(self):
strings = [
"transaction",
"environment",
"http.status",
"transaction.status",
METRIC_TOLERATED_TAG_VALUE,
METRIC_SATISFIED_TAG_VALUE,
METRIC_FRUSTRATED_TAG_VALUE,
METRIC_SATISFACTION_TAG_KEY,
*self.METRIC_STRINGS,
*list(SPAN_STATUS_NAME_TO_CODE.keys()),
*list(METRICS_MAP.values()),
]
org_strings = {self.organization.id: set(strings)}
indexer.bulk_record({UseCaseID.TRANSACTIONS: org_strings})
def store_transaction_metric(
self,
value: list[Any] | Any,
metric: str = "transaction.duration",
internal_metric: str | None = None,
entity: str | None = None,
tags: dict[str, str] | None = None,
timestamp: datetime | None = None,
project: int | None = None,
use_case_id: UseCaseID = UseCaseID.TRANSACTIONS,
aggregation_option: AggregationOption | None = None,
) -> None:
internal_metric = METRICS_MAP[metric] if internal_metric is None else internal_metric
entity = self.ENTITY_MAP[metric] if entity is None else entity
org_id = self.organization.id
if tags is None:
tags = {}
if timestamp is None:
metric_timestamp = self.DEFAULT_METRIC_TIMESTAMP.timestamp()
else:
metric_timestamp = timestamp.timestamp()
if project is None:
project = self.project.id
if not isinstance(value, list):
value = [value]
for subvalue in value:
self.store_metric(
org_id,
project,
internal_metric,
tags,
int(metric_timestamp),
subvalue,
aggregation_option=aggregation_option,
)
def store_on_demand_metric(
self,
value: int | float | str,
spec: OnDemandMetricSpec,
additional_tags: dict[str, str] | None = None,
timestamp: datetime | None = None,
) -> None:
"""Convert on-demand metric and store it.
For sets, value needs to be a unique identifier while for counters it is a count."""
relay_metric_spec = spec.to_metric_spec(self.project)
metric_spec_tags = relay_metric_spec["tags"] or [] if relay_metric_spec else []
tags = {i["key"]: i.get("value") or i.get("field") for i in metric_spec_tags}
metric_type = spec.metric_type
if additional_tags:
# Additional tags might be needed to override field values from the spec.
tags.update(additional_tags)
self.store_transaction_metric(
value,
metric=self.ON_DEMAND_KEY_MAP[metric_type],
internal_metric=self.ON_DEMAND_MRI_MAP[metric_type],
entity=self.ON_DEMAND_ENTITY_MAP[metric_type],
tags=tags,
timestamp=timestamp,
)
def store_span_metric(
self,
value: dict[str, int] | list[int] | list[dict[str, int]] | int,
metric: str = "span.self_time",
internal_metric: str | None = None,
entity: str | None = None,
tags: dict[str, str] | None = None,
timestamp: datetime | None = None,
project: int | None = None,
use_case_id: UseCaseID = UseCaseID.SPANS,
):
internal_metric = SPAN_METRICS_MAP[metric] if internal_metric is None else internal_metric
org_id = self.organization.id
if tags is None:
tags = {}
if timestamp is None:
metric_timestamp = self.DEFAULT_METRIC_TIMESTAMP.timestamp()
else:
metric_timestamp = timestamp.timestamp()
if project is None:
project = self.project.id
if not isinstance(value, list):
value = [value]
for subvalue in value:
self.store_metric(
org_id,
project,
internal_metric,
tags,
int(metric_timestamp),
subvalue,
)
def wait_for_metric_count(
self,
project,
total,
metric="transaction.duration",
mri=TransactionMRI.DURATION.value,
attempts=2,
):
attempt = 0
metrics_query = self.build_metrics_query(
before_now="1d",
granularity="1d",
select=[
MetricField(
op="count",
metric_mri=mri,
),
],
include_series=False,
)
while attempt < attempts:
data = get_series(
[project],
metrics_query=metrics_query,
use_case_id=UseCaseID.TRANSACTIONS,
)
count = data["groups"][0]["totals"][f"count({metric})"]
if count >= total:
break
attempt += 1
time.sleep(0.05)
if attempt == attempts:
assert (
False
), f"Could not ensure that {total} metric(s) were persisted within {attempt} attempt(s)."
| MetricsEnhancedPerformanceTestCase |
python | django__django | tests/servers/test_basehttp.py | {
"start": 687,
"end": 8044
} | class ____(SimpleTestCase):
request_factory = RequestFactory()
def test_log_message(self):
request = WSGIRequest(self.request_factory.get("/").environ)
request.makefile = lambda *args, **kwargs: BytesIO()
handler = WSGIRequestHandler(request, "192.168.0.2", None)
level_status_codes = {
"info": [200, 301, 304],
"warning": [400, 403, 404],
"error": [500, 503],
}
for level, status_codes in level_status_codes.items():
for status_code in status_codes:
# The correct level gets the message.
with self.assertLogs("django.server", level.upper()) as cm:
handler.log_message("GET %s %s", "A", str(status_code))
self.assertIn("GET A %d" % status_code, cm.output[0])
# Incorrect levels don't have any messages.
for wrong_level in level_status_codes:
if wrong_level != level:
with self.assertLogs("django.server", "INFO") as cm:
handler.log_message("GET %s %s", "A", str(status_code))
self.assertNotEqual(
cm.records[0].levelname, wrong_level.upper()
)
def test_log_message_escapes_control_sequences(self):
request = WSGIRequest(self.request_factory.get("/").environ)
request.makefile = lambda *args, **kwargs: BytesIO()
handler = WSGIRequestHandler(request, "192.168.0.2", None)
malicious_path = "\x1b[31mALERT\x1b[0m"
with self.assertLogs("django.server", "WARNING") as cm:
handler.log_message("GET %s %s", malicious_path, "404")
log = cm.output[0]
self.assertNotIn("\x1b[31m", log)
self.assertIn("\\x1b[31mALERT\\x1b[0m", log)
def test_https(self):
request = WSGIRequest(self.request_factory.get("/").environ)
request.makefile = lambda *args, **kwargs: BytesIO()
handler = WSGIRequestHandler(request, "192.168.0.2", None)
with self.assertLogs("django.server", "ERROR") as cm:
handler.log_message("GET %s %s", "\x16\x03", "4")
self.assertEqual(
"You're accessing the development server over HTTPS, "
"but it only supports HTTP.",
cm.records[0].getMessage(),
)
def test_strips_underscore_headers(self):
"""WSGIRequestHandler ignores headers containing underscores.
This follows the lead of nginx and Apache 2.4, and is to avoid
ambiguity between dashes and underscores in mapping to WSGI environ,
which can have security implications.
"""
def test_app(environ, start_response):
"""A WSGI app that just reflects its HTTP environ."""
start_response("200 OK", [])
http_environ_items = sorted(
"%s:%s" % (k, v) for k, v in environ.items() if k.startswith("HTTP_")
)
yield (",".join(http_environ_items)).encode()
rfile = BytesIO()
rfile.write(b"GET / HTTP/1.0\r\n")
rfile.write(b"Some-Header: good\r\n")
rfile.write(b"Some_Header: bad\r\n")
rfile.write(b"Other_Header: bad\r\n")
rfile.seek(0)
wfile = UnclosableBytesIO()
def makefile(mode, *a, **kw):
if mode == "rb":
return rfile
elif mode == "wb":
return wfile
request = Stub(makefile=makefile)
server = Stub(base_environ={}, get_app=lambda: test_app)
# Prevent logging from appearing in test output.
with self.assertLogs("django.server", "INFO"):
# instantiating a handler runs the request as side effect
WSGIRequestHandler(request, "192.168.0.2", server)
wfile.seek(0)
body = list(wfile.readlines())[-1]
self.assertEqual(body, b"HTTP_SOME_HEADER:good")
def test_no_body_returned_for_head_requests(self):
hello_world_body = b"<!DOCTYPE html><html><body>Hello World</body></html>"
content_length = len(hello_world_body)
def test_app(environ, start_response):
"""A WSGI app that returns a hello world."""
start_response("200 OK", [])
return [hello_world_body]
rfile = BytesIO(b"GET / HTTP/1.0\r\n")
rfile.seek(0)
wfile = UnclosableBytesIO()
def makefile(mode, *a, **kw):
if mode == "rb":
return rfile
elif mode == "wb":
return wfile
request = Stub(makefile=makefile)
server = Stub(base_environ={}, get_app=lambda: test_app)
# Prevent logging from appearing in test output.
with self.assertLogs("django.server", "INFO"):
# Instantiating a handler runs the request as side effect.
WSGIRequestHandler(request, "192.168.0.2", server)
wfile.seek(0)
lines = list(wfile.readlines())
body = lines[-1]
# The body is returned in a GET response.
self.assertEqual(body, hello_world_body)
self.assertIn(f"Content-Length: {content_length}\r\n".encode(), lines)
self.assertNotIn(b"Connection: close\r\n", lines)
rfile = BytesIO(b"HEAD / HTTP/1.0\r\n")
rfile.seek(0)
wfile = UnclosableBytesIO()
with self.assertLogs("django.server", "INFO"):
WSGIRequestHandler(request, "192.168.0.2", server)
wfile.seek(0)
lines = list(wfile.readlines())
body = lines[-1]
# The body is not returned in a HEAD response.
self.assertEqual(body, b"\r\n")
self.assertIs(
any([line.startswith(b"Content-Length:") for line in lines]), False
)
self.assertNotIn(b"Connection: close\r\n", lines)
def test_non_zero_content_length_set_head_request(self):
hello_world_body = b"<!DOCTYPE html><html><body>Hello World</body></html>"
content_length = len(hello_world_body)
def test_app(environ, start_response):
"""
A WSGI app that returns a hello world with non-zero Content-Length.
"""
start_response("200 OK", [("Content-length", str(content_length))])
return [hello_world_body]
rfile = BytesIO(b"HEAD / HTTP/1.0\r\n")
rfile.seek(0)
wfile = UnclosableBytesIO()
def makefile(mode, *a, **kw):
if mode == "rb":
return rfile
elif mode == "wb":
return wfile
request = Stub(makefile=makefile)
server = Stub(base_environ={}, get_app=lambda: test_app)
# Prevent logging from appearing in test output.
with self.assertLogs("django.server", "INFO"):
# Instantiating a handler runs the request as side effect.
WSGIRequestHandler(request, "192.168.0.2", server)
wfile.seek(0)
lines = list(wfile.readlines())
body = lines[-1]
# The body is not returned in a HEAD response.
self.assertEqual(body, b"\r\n")
# Non-zero Content-Length is not removed.
self.assertEqual(lines[-2], f"Content-length: {content_length}\r\n".encode())
self.assertNotIn(b"Connection: close\r\n", lines)
| WSGIRequestHandlerTestCase |
python | doocs__leetcode | solution/1500-1599/1562.Find Latest Group of Size M/Solution.py | {
"start": 0,
"end": 885
} | class ____:
def findLatestStep(self, arr: List[int], m: int) -> int:
def find(x):
if p[x] != x:
p[x] = find(p[x])
return p[x]
def union(a, b):
pa, pb = find(a), find(b)
if pa == pb:
return
p[pa] = pb
size[pb] += size[pa]
n = len(arr)
if m == n:
return n
vis = [False] * n
p = list(range(n))
size = [1] * n
ans = -1
for i, v in enumerate(arr):
v -= 1
if v and vis[v - 1]:
if size[find(v - 1)] == m:
ans = i
union(v, v - 1)
if v < n - 1 and vis[v + 1]:
if size[find(v + 1)] == m:
ans = i
union(v, v + 1)
vis[v] = True
return ans
| Solution |
python | celery__celery | t/unit/app/test_loaders.py | {
"start": 7440,
"end": 7744
} | class ____:
def setup_method(self):
self.loader = AppLoader(app=self.app)
def test_on_worker_init(self):
self.app.conf.imports = ('subprocess',)
sys.modules.pop('subprocess', None)
self.loader.init_worker()
assert 'subprocess' in sys.modules
| test_AppLoader |
python | ray-project__ray | python/ray/tests/test_state_api.py | {
"start": 64748,
"end": 119477
} | class ____:
def test_list_get_actors(self, class_ray_instance):
@ray.remote
class A:
pass
@ray.remote(num_gpus=1)
class UnschedulableActor:
pass
job_id = ray.get_runtime_context().get_job_id()
node_id = ray.get_runtime_context().get_node_id()
a = A.remote()
b = UnschedulableActor.remote()
def verify():
# Test list
actors = list_actors(filters=[("actor_id", "=", a._actor_id.hex())])
assert len(actors) == 1
assert actors[0]["state"] == "ALIVE"
assert is_hex(actors[0]["actor_id"])
assert a._actor_id.hex() == actors[0]["actor_id"]
assert actors[0]["job_id"] == job_id
assert actors[0]["node_id"] == node_id
# Test the second actor's node id is None because
# it is not scheduled.
actors = list_actors(filters=[("actor_id", "=", b._actor_id.hex())])
assert actors[0]["node_id"] is None
# Test get
actors = list_actors(detail=True)
for actor in actors:
get_actor_data = get_actor(actor["actor_id"])
assert get_actor_data is not None
assert get_actor_data == actor
return True
wait_for_condition(verify)
print(list_actors())
def test_list_actors_namespace(self, class_ray_instance):
"""Check that list_actors returns namespaces."""
@ray.remote
class A:
pass
A.options(namespace="x").remote()
A.options(namespace="y").remote()
actors = list_actors()
namespaces = Counter([actor["ray_namespace"] for actor in actors])
assert namespaces["x"] == 1
assert namespaces["y"] == 1
# Check that we can filter by namespace
x_actors = list_actors(filters=[("ray_namespace", "=", "x")])
assert len(x_actors) == 1
assert x_actors[0]["ray_namespace"] == "x"
@pytest.mark.skipif(
sys.platform == "win32",
reason="Failed on Windows",
)
@pytest.mark.parametrize(
"override_url",
[
"https://external_dashboard_url",
"https://external_dashboard_url/path1/?query_param1=val1&query_param2=val2",
"new_external_dashboard_url",
],
)
def test_state_api_with_external_dashboard_override(
shutdown_only, override_url, monkeypatch
):
with monkeypatch.context() as m:
if override_url:
m.setenv(
ray_constants.RAY_OVERRIDE_DASHBOARD_URL,
override_url,
)
ray.init()
@ray.remote
class A:
pass
a = A.remote() # noqa
def verify():
# Test list
actors = list_actors()
assert len(actors) == 1
assert actors[0]["state"] == "ALIVE"
assert is_hex(actors[0]["actor_id"])
assert a._actor_id.hex() == actors[0]["actor_id"]
# Test get
actors = list_actors(detail=True)
for actor in actors:
get_actor_data = get_actor(actor["actor_id"])
assert get_actor_data is not None
assert get_actor_data == actor
return True
wait_for_condition(verify)
print(list_actors())
@pytest.mark.skipif(
sys.platform == "win32",
reason="Failed on Windows",
)
def test_list_get_pgs(shutdown_only):
ray.init()
pg = ray.util.placement_group(bundles=[{"CPU": 1}]) # noqa
def verify():
# Test list
pgs = list_placement_groups()
assert len(pgs) == 1
assert pgs[0]["state"] == "CREATED"
assert is_hex(pgs[0]["placement_group_id"])
assert pg.id.hex() == pgs[0]["placement_group_id"]
# Test get
pgs = list_placement_groups(detail=True)
for pg_data in pgs:
get_pg_data = get_placement_group(pg_data["placement_group_id"])
assert get_pg_data is not None
assert pg_data == get_pg_data
return True
wait_for_condition(verify)
print(list_placement_groups())
@pytest.mark.asyncio
async def test_cloud_envs(ray_start_cluster, monkeypatch):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, node_name="head_node")
ray.init(address=cluster.address)
with monkeypatch.context() as m:
m.setenv(
"RAY_CLOUD_INSTANCE_ID",
"test_cloud_id",
)
m.setenv("RAY_NODE_TYPE_NAME", "test-node-type")
cluster.add_node(
num_cpus=1,
node_name="worker_node",
dashboard_agent_listen_port=find_free_port(),
)
client = state_source_client(cluster.address)
async def verify():
reply = await client.get_all_node_info()
print(reply)
assert len(reply.node_info_list) == 2
for node_info in reply.node_info_list:
if node_info.node_name == "worker_node":
assert node_info.instance_id == "test_cloud_id"
assert node_info.node_type_name == "test-node-type"
else:
assert node_info.instance_id == ""
assert node_info.node_type_name == ""
return True
await async_wait_for_condition(verify)
@pytest.mark.skipif(
sys.platform == "win32",
reason="Failed on Windows",
)
def test_list_get_nodes(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, node_name="head_node")
ray.init(address=cluster.address)
worker_node = cluster.add_node(
num_cpus=1,
node_name="worker_node",
dashboard_agent_listen_port=find_free_port(),
)
cluster.remove_node(worker_node)
def verify():
nodes = list_nodes(detail=True)
for node in nodes:
assert is_hex(node["node_id"])
assert node["labels"] == {"ray.io/node-id": node["node_id"]}
if node["node_name"] == "head_node":
assert node["is_head_node"]
assert node["state"] == "ALIVE"
assert node["state_message"] is None
else:
assert not node["is_head_node"]
assert node["state"] == "DEAD"
assert node["state_message"] == "Expected termination: received SIGTERM"
# Check with legacy API
check_nodes = ray.nodes()
assert len(check_nodes) == len(nodes)
check_nodes = sorted(check_nodes, key=lambda n: n["NodeID"])
nodes = sorted(nodes, key=lambda n: n["node_id"])
for check_node, node in zip(check_nodes, nodes):
assert check_node["NodeID"] == node["node_id"]
assert check_node["NodeName"] == node["node_name"]
# Check the Get api
nodes = list_nodes(detail=True)
for node in nodes:
get_node_data = get_node(node["node_id"])
assert get_node_data == node
return True
wait_for_condition(verify)
@pytest.mark.skipif(
sys.platform == "win32",
reason="Failed on Windows",
)
def test_list_get_jobs(shutdown_only):
ray.init()
# Test submission job
client = JobSubmissionClient(
f"http://{ray._private.worker.global_worker.node.address_info['webui_url']}"
)
job_id = client.submit_job( # noqa
# Entrypoint shell command to execute
entrypoint="ls",
)
def verify():
job_data = list_jobs(detail=True)[0]
print(job_data)
job_id_from_api = job_data["submission_id"]
assert job_data["status"] == "SUCCEEDED"
assert job_id == job_id_from_api
assert job_data["start_time"] > 0
assert job_data["end_time"] > 0
return True
wait_for_condition(verify)
# Test driver jobs
script = """
import ray
ray.init("auto")
@ray.remote
def f():
pass
ray.get(f.remote())
"""
run_string_as_driver(script)
def verify():
jobs = list_jobs(filters=[("type", "=", "DRIVER")], detail=True)
assert len(jobs) == 2, "1 test driver + 1 script run above"
for driver_job in jobs:
assert driver_job["driver_info"] is not None
assert driver_job["start_time"] > 0
sub_jobs = list_jobs(filters=[("type", "=", "SUBMISSION")])
assert len(sub_jobs) == 1
assert sub_jobs[0]["submission_id"] is not None
return True
wait_for_condition(verify)
# Test GET api
def verify():
job = get_job(id=job_id)
assert job["submission_id"] == job_id
assert job["entrypoint"] == "ls"
assert job["status"] == "SUCCEEDED"
return True
wait_for_condition(verify)
@pytest.mark.skipif(
sys.platform == "win32",
reason="Failed on Windows",
)
def test_list_get_workers(shutdown_only):
ray.init()
def verify():
workers = list_workers(detail=True)
assert is_hex(workers[0]["worker_id"])
# +1 to take into account of drivers.
assert len(workers) == ray.cluster_resources()["CPU"] + 1
# End time should be 0 as it is not configured yet.
assert workers[0]["end_time_ms"] == 0
# Test get worker returns the same result
workers = list_workers(detail=True)
for worker in workers:
got_worker = get_worker(worker["worker_id"])
assert got_worker == worker
return True
wait_for_condition(verify)
# Kill the worker
workers = list_workers()
os.kill(workers[-1]["pid"], signal.SIGKILL)
def verify():
workers = list_workers(detail=True, filters=[("is_alive", "=", "False")])
assert len(workers) == 1
assert workers[0]["end_time_ms"] != 0
return True
wait_for_condition(verify)
print(list_workers(detail=True))
@pytest.mark.skipif(
sys.platform == "win32",
reason="Failed on Windows",
)
def test_list_cluster_events(shutdown_only):
ray.init()
@ray.remote(num_gpus=1)
def f():
pass
f.remote()
def verify():
events = list_cluster_events()
print(events)
assert len(events) == 1
assert (
"Error: No available node types can fulfill resource request"
) in events[0]["message"]
return True
wait_for_condition(verify)
print(list_cluster_events())
# TODO(sang): Support get_cluster_events
def test_list_get_tasks(shutdown_only):
ray.init(num_cpus=2)
job_id = ray.get_runtime_context().get_job_id()
node_id = ray.get_runtime_context().get_node_id()
@ray.remote
def f():
import time
time.sleep(30)
@ray.remote
def g(dep):
import time
time.sleep(30)
@ray.remote(num_gpus=1)
def impossible():
pass
f_refs = [f.options(name=f"f_{i}").remote() for i in range(2)] # noqa
g_ref = g.remote(f.remote()) # noqa
im_ref = impossible.remote() # noqa
def verify_task_from_objectref(task, job_id, tasks):
assert task["job_id"] == job_id
assert task["actor_id"] is None
assert any(task["task_id"] == t["task_id"] for t in tasks)
def verify():
tasks = list_tasks()
assert len(tasks) == 5
for task in tasks:
assert task["job_id"] == job_id
for task in tasks:
assert task["actor_id"] is None
# Test get_task by objectRef
for ref in f_refs:
verify_task_from_objectref(get_task(ref), job_id, tasks)
verify_task_from_objectref(get_task(g_ref), job_id, tasks)
verify_task_from_objectref(get_task(im_ref), job_id, tasks)
waiting_for_execution = len(
list(
filter(
lambda task: task["state"] == "SUBMITTED_TO_WORKER",
tasks,
)
)
)
assert waiting_for_execution == 0
scheduled = len(
list(
filter(
lambda task: task["state"] == "PENDING_NODE_ASSIGNMENT",
tasks,
)
)
)
assert scheduled == 2
waiting_for_dep = len(
list(
filter(
lambda task: task["state"] == "PENDING_ARGS_AVAIL",
tasks,
)
)
)
assert waiting_for_dep == 1
running = len(
list(
filter(
lambda task: task["state"] == "RUNNING",
tasks,
)
)
)
assert running == 2
# Test get tasks
tasks = list_tasks(detail=True)
for task in tasks:
get_task_data = get_task(task["task_id"])
assert get_task_data == task
# Test node id.
tasks = list_tasks(filters=[("state", "=", "PENDING_NODE_ASSIGNMENT")])
for task in tasks:
assert task["node_id"] is None
tasks = list_tasks(filters=[("state", "=", "RUNNING")])
for task in tasks:
assert task["node_id"] == node_id
tasks = list_tasks(filters=[("job_id", "=", job_id)])
for task in tasks:
assert task["job_id"] == job_id
tasks = list_tasks(filters=[("name", "=", "f_0")], limit=1)
assert len(tasks) == 1
# using limit to make sure state filtering is done on the gcs side
tasks = list_tasks(filters=[("STATE", "=", "PENDING_ARGS_AVAIL")], limit=1)
assert len(tasks) == 1
return True
wait_for_condition(verify)
print(list_tasks())
def test_list_get_tasks_call_site(shutdown_only):
"""
Call chain: Driver -> caller -> callee.
Verify that the call site is captured in callee, and it contains string
"caller".
"""
ray.init(
num_cpus=2,
runtime_env={"env_vars": {"RAY_record_task_actor_creation_sites": "true"}},
)
@ray.remote
def callee():
import time
time.sleep(30)
@ray.remote
def caller():
return callee.remote()
caller_ref = caller.remote()
callee_ref = ray.get(caller_ref)
def verify():
callee_task = get_task(callee_ref)
assert callee_task["call_site"] is not None
assert "caller" in callee_task["call_site"]
return True
wait_for_condition(verify)
print(list_tasks())
def test_list_actor_tasks_call_site(shutdown_only):
"""
Call chain: Driver -> create_actor -> (Actor, Actor.method).
Verify that the call sites are captured in both Actor and Actor.method,
and they contain string "create_actor".
"""
ray.init(
num_cpus=2,
runtime_env={"env_vars": {"RAY_record_task_actor_creation_sites": "true"}},
)
@ray.remote
class Actor:
def method(self):
import time
time.sleep(30)
@ray.remote
def create_actor():
a = Actor.remote()
m_ref = a.method.remote()
return a, m_ref
actor_ref, method_ref = ray.get(create_actor.remote())
def verify():
method_task = get_task(method_ref)
assert method_task["call_site"] is not None
assert "create_actor" in method_task["call_site"]
actors = list_actors(detail=True)
assert len(actors) == 1
actor = actors[0]
assert actor["call_site"] is not None
assert "create_actor" in actor["call_site"]
return True
wait_for_condition(verify)
print(list_tasks())
def test_list_get_tasks_label_selector(ray_start_cluster):
"""
Call chain: Driver -> caller -> callee.
Verify that the call site is captured in callee, and it contains string
"caller".
"""
cluster = ray_start_cluster
cluster.add_node(
num_cpus=2, labels={"ray.io/accelerator-type": "A100", "region": "us-west4"}
)
ray.init(address=cluster.address)
cluster.wait_for_nodes()
@ray.remote(label_selector={"region": "us-west4"})
def foo():
import time
time.sleep(5)
call_ref = foo.remote()
ray.get(call_ref)
def verify():
task = get_task(call_ref)
assert task["label_selector"] == {"region": "us-west4"}
return True
wait_for_condition(verify)
print(list_tasks())
def test_list_actor_tasks_label_selector(ray_start_cluster):
"""
Call chain: Driver -> create_actor -> (Actor, Actor.method).
Verify that the call sites are captured in both Actor and Actor.method,
and they contain string "create_actor".
"""
cluster = ray_start_cluster
cluster.add_node(num_cpus=2, labels={"region": "us-west4"})
ray.init(address=cluster.address)
cluster.wait_for_nodes()
@ray.remote(label_selector={"region": "us-west4"})
class Actor:
def method(self):
import time
time.sleep(5)
actor = Actor.remote()
ray.get(actor.method.remote())
def verify():
actors = list_actors(detail=True)
assert len(actors) == 1
actor = actors[0]
assert actor["label_selector"] == {"region": "us-west4"}
return True
wait_for_condition(verify)
print(list_actors(detail=True))
def test_pg_worker_id_tasks(shutdown_only):
ray.init(num_cpus=1)
pg = ray.util.placement_group(bundles=[{"CPU": 1}])
pg.wait()
@ray.remote
def f():
pass
@ray.remote
class A:
def ready(self):
return os.getpid()
ray.get(
f.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg)
).remote()
)
def verify():
tasks = list_tasks(detail=True)
workers = list_workers(filters=[("worker_type", "=", "WORKER")])
assert len(tasks) == 1
assert len(workers) == 1
assert tasks[0]["placement_group_id"] == pg.id.hex()
assert tasks[0]["worker_id"] == workers[0]["worker_id"]
assert tasks[0]["worker_pid"] == workers[0]["pid"]
return True
wait_for_condition(verify)
print(list_tasks(detail=True))
a = A.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg)
).remote()
pid = ray.get(a.ready.remote())
def verify():
actors = list_actors(detail=True)
workers = list_workers(detail=True, filters=[("pid", "=", pid)])
assert len(actors) == 1
assert len(workers) == 1
assert actors[0]["placement_group_id"] == pg.id.hex()
return True
wait_for_condition(verify)
print(list_actors(detail=True))
def test_parent_task_id(shutdown_only):
"""Test parent task id set up properly"""
ray.init(num_cpus=2)
@ray.remote
def child():
pass
@ray.remote
def parent():
ray.get(child.remote())
ray.get(parent.remote())
def verify():
tasks = list_tasks(detail=True)
assert len(tasks) == 2, "Expect 2 tasks to finished"
parent_task_id = None
child_parent_task_id = None
for task in tasks:
if task["func_or_class_name"] == "parent":
parent_task_id = task["task_id"]
elif task["func_or_class_name"] == "child":
child_parent_task_id = task["parent_task_id"]
assert (
parent_task_id == child_parent_task_id
), "Child should have the parent task id"
return True
wait_for_condition(verify)
def test_list_get_task_multiple_attempt_all_failed(shutdown_only):
ray.init(num_cpus=2)
job_id = ray.get_runtime_context().get_job_id()
node_id = ray.get_runtime_context().get_node_id()
@ray.remote(retry_exceptions=True, max_retries=2)
def f():
raise ValueError("f is expected to failed")
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(f.remote())
def verify(task_attempts):
assert len(task_attempts) == 3 # 2 retries + 1 initial run
for task_attempt in task_attempts:
assert task_attempt["job_id"] == job_id
assert task_attempt["state"] == "FAILED"
assert task_attempt["node_id"] == node_id
assert {task_attempt["attempt_number"] for task_attempt in task_attempts} == {
0,
1,
2,
}, "Attempt number should be 0,1,2"
assert (
len({task_attempt["task_id"] for task_attempt in task_attempts}) == 1
), "Same task id"
return True
wait_for_condition(lambda: verify(list_tasks()))
# Test get with task id returns multiple task attempts
task_id = list_tasks()[0]["task_id"]
wait_for_condition(lambda: verify(get_task(task_id)))
def test_list_get_task_multiple_attempt_finished_after_retry(shutdown_only):
ray.init(num_cpus=2)
# Test success after retries.
@ray.remote
class Phaser:
def __init__(self):
self.i = 0
def inc(self):
self.i += 1
if self.i < 3:
raise ValueError(
f"First two tries are expected to fail (try={self.i})."
)
phaser = Phaser.remote()
@ray.remote(retry_exceptions=True, max_retries=3)
def f():
ray.get(phaser.inc.remote())
ray.get(f.remote())
def verify(task_attempts):
assert len(task_attempts) == 3
for task_attempt in task_attempts[1:]:
assert task_attempt["state"] == "FAILED"
task_attempts[0]["state"] == "FINISHED"
assert {task_attempt["attempt_number"] for task_attempt in task_attempts} == {
0,
1,
2,
}, "Attempt number should be 0,1,2"
return True
wait_for_condition(lambda: verify(list_tasks(filters=[("name", "=", "f")])))
def test_list_actor_tasks(shutdown_only):
ray.init(num_cpus=2)
job_id = ray.get_runtime_context().get_job_id()
@ray.remote(max_concurrency=2)
class Actor:
def call(self):
import time
time.sleep(30)
a = Actor.remote()
actor_id = a._actor_id.hex()
calls = [a.call.remote() for _ in range(10)] # noqa
def verify():
tasks = list_tasks()
for task in tasks:
assert task["job_id"] == job_id
for task in tasks:
assert task["actor_id"] == actor_id
# Actor.__init__: 1 finished
# Actor.call: 2 running, 8 waiting for execution (queued).
assert len(tasks) == 11
assert (
len(
list(
filter(
lambda task: task["state"]
== "PENDING_ACTOR_TASK_ORDERING_OR_CONCURRENCY",
tasks,
)
)
)
== 8
)
assert (
len(
list(
filter(
lambda task: task["state"] == "PENDING_NODE_ASSIGNMENT",
tasks,
)
)
)
== 0
)
assert (
len(
list(
filter(
lambda task: task["state"] == "PENDING_ARGS_AVAIL",
tasks,
)
)
)
== 0
)
assert (
len(
list(
filter(
lambda task: task["state"] == "RUNNING",
tasks,
)
)
)
== 2
)
# Filters with actor id.
assert len(list_tasks(filters=[("actor_id", "=", actor_id)])) == 11
assert len(list_tasks(filters=[("actor_id", "!=", actor_id)])) == 0
return True
wait_for_condition(verify)
print(list_tasks())
def test_list_get_objects(shutdown_only):
ray.init()
import numpy as np
data = np.ones(50 * 1024 * 1024, dtype=np.uint8)
plasma_obj = ray.put(data)
@ray.remote
def f(obj):
print(obj)
ray.get(f.remote(plasma_obj))
def verify():
obj = list_objects()[0]
# For detailed output, the test is covered from `test_memstat.py`
assert obj["object_id"] == plasma_obj.hex()
obj = list_objects(detail=True)[0]
got_objs = get_objects(plasma_obj.hex())
assert len(got_objs) == 1
assert obj == got_objs[0]
return True
wait_for_condition(verify)
print(list_objects())
@pytest.mark.skipif(
sys.platform == "win32", reason="Runtime env not working in Windows."
)
def test_list_runtime_envs(shutdown_only):
ray.init(runtime_env={"pip": ["requests"]})
@ray.remote
class Actor:
def ready(self):
pass
a = Actor.remote() # noqa
b = Actor.options(runtime_env={"pip": ["nonexistent_dep"]}).remote() # noqa
ray.get(a.ready.remote())
with pytest.raises(ray.exceptions.RuntimeEnvSetupError):
ray.get(b.ready.remote())
def verify():
result = list_runtime_envs(detail=True)
assert len(result) == 2
failed_runtime_env = result[0]
assert (
not failed_runtime_env["success"]
and failed_runtime_env["error"]
and failed_runtime_env["ref_cnt"] == 0
)
successful_runtime_env = result[1]
assert (
successful_runtime_env["success"] and successful_runtime_env["ref_cnt"] == 2
)
return True
wait_for_condition(verify)
def test_limit(shutdown_only):
ray.init()
@ray.remote
class A:
def ready(self):
pass
actors = [A.remote() for _ in range(4)]
ray.get([actor.ready.remote() for actor in actors])
output = list_actors(limit=2)
assert len(output) == 2
# Make sure the output is deterministic.
assert output == list_actors(limit=2)
@pytest.mark.skipif(
sys.platform == "win32",
reason="Failed on Windows",
)
def test_network_failure(shutdown_only):
"""When the request fails due to network failure,
verifies it raises an exception."""
ray.init()
@ray.remote
def f():
import time
time.sleep(30)
a = [f.remote() for _ in range(4)] # noqa
wait_for_condition(lambda: len(list_tasks()) == 4)
# Kill raylet so that list_tasks will have network error on querying raylets.
ray._private.worker._global_node.kill_raylet()
with pytest.raises(ConnectionError):
list_tasks(_explain=True)
def test_network_partial_failures(monkeypatch, ray_start_cluster):
"""When the request fails due to network failure,
verifies it prints proper warning."""
with monkeypatch.context() as m:
# defer for 5s for the second node.
# This will help the API not return until the node is killed.
m.setenv(
"RAY_testing_asio_delay_us",
"NodeManagerService.grpc_server.GetObjectsInfo=5000000:5000000",
)
m.setenv("RAY_record_ref_creation_sites", "1")
cluster = ray_start_cluster
cluster.add_node(num_cpus=2)
ray.init(address=cluster.address)
n = cluster.add_node(num_cpus=2)
@ray.remote
def f():
ray.put(1)
a = [f.remote() for _ in range(4)] # noqa
wait_for_condition(lambda: len(list_objects()) == 4)
# Make sure when there's 0 node failure, it doesn't print the error.
with pytest.warns(None) as record:
list_objects(_explain=True)
assert len(record) == 0
# Kill raylet so that list_objects will have network error on querying raylets.
cluster.remove_node(n, allow_graceful=False)
with pytest.warns(UserWarning):
list_objects(raise_on_missing_output=False, _explain=True)
# Make sure when _explain == False, warning is not printed.
with pytest.warns(None) as record:
list_objects(raise_on_missing_output=False, _explain=False)
assert len(record) == 0
def test_network_partial_failures_timeout(monkeypatch, ray_start_cluster):
"""When the request fails due to network timeout,
verifies it prints proper warning."""
monkeypatch.setenv("RAY_record_ref_creation_sites", "1")
cluster = ray_start_cluster
cluster.add_node(num_cpus=2)
ray.init(address=cluster.address)
with monkeypatch.context() as m:
# defer for 10s for the second node.
m.setenv(
"RAY_testing_asio_delay_us",
"NodeManagerService.grpc_server.GetObjectsInfo=10000000:10000000",
)
cluster.add_node(num_cpus=2)
@ray.remote
def f():
ray.put(1)
a = [f.remote() for _ in range(4)] # noqa
def verify():
with pytest.warns(None) as record:
list_objects(raise_on_missing_output=False, _explain=True, timeout=5)
return len(record) == 1
wait_for_condition(verify)
@pytest.mark.asyncio
async def test_cli_format_print(state_api_manager):
data_source_client = state_api_manager.data_source_client
actor_id = b"1234"
data_source_client.get_all_actor_info.return_value = GetAllActorInfoReply(
actor_table_data=[generate_actor_data(actor_id), generate_actor_data(b"12345")]
)
result = await state_api_manager.list_actors(option=create_api_options())
print(result)
result = [ActorState(**d) for d in result.result]
# If the format is not yaml, it will raise an exception.
yaml.safe_load(
format_list_api_output(result, schema=ActorState, format=AvailableFormat.YAML)
)
# If the format is not json, it will raise an exception.
json.loads(
format_list_api_output(result, schema=ActorState, format=AvailableFormat.JSON)
)
# Test a table formatting.
output = format_list_api_output(
result, schema=ActorState, format=AvailableFormat.TABLE
)
assert "Table:" in output
assert "Stats:" in output
with pytest.raises(ValueError):
format_list_api_output(result, schema=ActorState, format="random_format")
# Verify the default format.
output = format_list_api_output(result, schema=ActorState)
assert "Table:" in output
assert "Stats:" in output
# Verify the ordering is equal to it is defined in `StateSchema` class.
# Index 8 contains headers
headers = output.split("\n")[8]
cols = ActorState.list_columns()
headers = list(filter(lambda item: item != "", headers.strip().split(" ")))
for i in range(len(headers)):
header = headers[i].upper()
col = cols[i].upper()
assert header == col
def test_filter(shutdown_only):
ray.init()
# Test unsupported predicates.
with pytest.raises(ValueError):
list_actors(filters=[("state", ">", "DEAD")])
@ray.remote
class Actor:
def __init__(self):
self.obj = None
def ready(self):
pass
def put(self):
self.obj = ray.put(123)
def getpid(self):
import os
return os.getpid()
"""
Test basic case.
"""
a = Actor.remote()
b = Actor.remote()
a_pid = ray.get(a.getpid.remote())
b_pid = ray.get(b.getpid.remote())
ray.get([a.ready.remote(), b.ready.remote()])
ray.kill(b)
def verify():
result = list_actors(filters=[("state", "=", "DEAD")])
assert len(result) == 1
actor = result[0]
assert actor["pid"] == b_pid
result = list_actors(filters=[("state", "!=", "DEAD")])
assert len(result) == 1
actor = result[0]
assert actor["pid"] == a_pid
return True
wait_for_condition(verify)
"""
Test filter with different types (integer/bool).
"""
obj_1 = ray.put(123) # noqa
ray.get(a.put.remote())
pid = ray.get(a.getpid.remote())
def verify():
# There's only 1 object.
result = list_objects(
filters=[("pid", "=", pid), ("reference_type", "=", "LOCAL_REFERENCE")]
)
return len(result) == 1
wait_for_condition(verify)
def verify():
workers = list_workers()
live_workers = list_workers(filters=[("is_alive", "=", "true")])
non_alive_workers = list_workers(filters=[("is_alive", "!=", "true")])
assert len(live_workers) + len(non_alive_workers) == len(workers)
live_workers = list_workers(filters=[("is_alive", "=", "1")])
non_alive_workers = list_workers(filters=[("is_alive", "!=", "1")])
assert len(live_workers) + len(non_alive_workers) == len(workers)
live_workers = list_workers(filters=[("is_alive", "=", "True")])
non_alive_workers = list_workers(filters=[("is_alive", "!=", "True")])
assert len(live_workers) + len(non_alive_workers) == len(workers)
return True
wait_for_condition(verify)
"""
Test CLI
"""
dead_actor_id = list_actors(filters=[("state", "=", "DEAD")])[0]["actor_id"]
alive_actor_id = list_actors(filters=[("state", "=", "ALIVE")])[0]["actor_id"]
runner = CliRunner()
result = runner.invoke(ray_list, ["actors", "--filter", "state=DEAD"])
assert result.exit_code == 0
assert dead_actor_id in result.output
assert alive_actor_id not in result.output
result = runner.invoke(ray_list, ["actors", "--filter", "state!=DEAD"])
assert result.exit_code == 0
assert dead_actor_id not in result.output
assert alive_actor_id in result.output
"""
Test case insensitive match on string fields.
"""
@ray.remote
def task():
pass
ray.get(task.remote())
def verify():
result_1 = list_tasks(filters=[("name", "=", "task")])
result_2 = list_tasks(filters=[("name", "=", "TASK")])
assert result_1 == result_2
result_1 = list_tasks(filters=[("state", "=", "FINISHED")])
result_2 = list_tasks(filters=[("state", "=", "finished")])
assert result_1 == result_2
result_1 = list_objects(
filters=[("pid", "=", pid), ("reference_type", "=", "LOCAL_REFERENCE")]
)
result_2 = list_objects(
filters=[("pid", "=", pid), ("reference_type", "=", "local_reference")]
)
assert result_1 == result_2
result_1 = list_actors(filters=[("state", "=", "DEAD")])
result_2 = list_actors(filters=[("state", "=", "dead")])
assert result_1 == result_2
result_1 = list_actors(filters=[("state", "!=", "DEAD")])
result_2 = list_actors(filters=[("state", "!=", "dead")])
assert result_1 == result_2
return True
wait_for_condition(verify)
def test_data_truncate(shutdown_only, monkeypatch):
"""
Verify the data is properly truncated when there are too many entries to return.
"""
with monkeypatch.context() as m:
max_limit_data_source = 10
max_limit_api_server = 1000
m.setenv("RAY_MAX_LIMIT_FROM_API_SERVER", f"{max_limit_api_server}")
m.setenv("RAY_MAX_LIMIT_FROM_DATA_SOURCE", f"{max_limit_data_source}")
ray.init(num_cpus=16)
pgs = [ # noqa
ray.util.placement_group(bundles=[{"CPU": 0.001}])
for _ in range(max_limit_data_source + 1)
]
runner = CliRunner()
with pytest.warns(UserWarning) as record:
result = runner.invoke(ray_list, ["placement-groups"])
assert (
f"{max_limit_data_source} ({max_limit_data_source + 1} total "
"from the cluster) placement_groups are retrieved from the "
"data source. 1 entries have been truncated." in record[0].message.args[0]
)
assert result.exit_code == 0
# Make sure users cannot specify higher limit than MAX_LIMIT_FROM_API_SERVER
with pytest.raises(RayStateApiException):
list_placement_groups(limit=max_limit_api_server + 1)
# TODO(rickyyx): We should support error code or more granular errors from
# the server to the client so we could assert the specific type of error.
# assert (
# f"Given limit {max_limit_api_server+1} exceeds the supported "
# f"limit {max_limit_api_server}." in str(e)
# )
# Make sure warning is not printed when truncation doesn't happen.
@ray.remote
class A:
def ready(self):
pass
a = A.remote()
ray.get(a.ready.remote())
with pytest.warns(None) as record:
result = runner.invoke(ray_list, ["actors"])
assert len(record) == 0
def test_detail(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
class Actor:
def ready(self):
pass
a = Actor.remote()
ray.get(a.ready.remote())
"""
Test CLI
"""
runner = CliRunner()
result = runner.invoke(ray_list, ["actors", "--detail"])
print(result.output)
assert result.exit_code == 0
# The column for --detail should be in the output.
assert "test_detail" in result.output
# Columns are upper case in the default formatting (table).
assert "serialized_runtime_env" in result.output
assert "actor_id" in result.output
# Make sure when the --detail option is specified, the default formatting
# is yaml. If the format is not yaml, the below line will raise an yaml exception.
# Retrieve yaml content from result output
print(yaml.safe_load(result.output.split("---")[1].split("...")[0]))
# When the format is given, it should respect that formatting.
result = runner.invoke(ray_list, ["actors", "--detail", "--format=json"])
assert result.exit_code == 0
# Fails if output is not JSON
print(json.loads(result.output))
def _try_state_query_expect_rate_limit(api_func, res_q, start_q=None, **kwargs):
"""Utility functions for rate limit related e2e tests below"""
try:
# Indicate start of the process
if start_q is not None:
start_q.put(1)
api_func(**kwargs)
except RayStateApiException as e:
# Other exceptions will be thrown
if "Max number of in-progress requests" in str(e):
res_q.put(1)
else:
res_q.put(e)
except Exception as e:
res_q.put(e)
else:
res_q.put(0)
@pytest.mark.skipif(
sys.platform == "win32",
reason="Lambda test functions could not be pickled on Windows",
)
def test_state_api_rate_limit_with_failure(monkeypatch, shutdown_only):
import queue
import threading
# Set environment
with monkeypatch.context() as m:
m.setenv("RAY_STATE_SERVER_MAX_HTTP_REQUEST", "3")
# These make list_nodes, list_workers, list_actors never return in 20secs
m.setenv(
"RAY_testing_asio_delay_us",
(
"TaskInfoGcsService.grpc_server.GetTaskEvents=20000000:20000000,"
"WorkerInfoGcsService.grpc_server.GetAllWorkerInfo=20000000:20000000,"
"ActorInfoGcsService.grpc_server.GetAllActorInfo=20000000:20000000"
),
)
# Set up scripts
ray.init()
@ray.remote
def f():
import time
time.sleep(30)
@ray.remote
class Actor:
pass
task = f.remote() # noqa
actor = Actor.remote() # noqa
actor_runtime_env = Actor.options( # noqa
runtime_env={"pip": ["requests"]}
).remote()
pg = ray.util.placement_group(bundles=[{"CPU": 1}]) # noqa
_objs = [ray.put(x) for x in range(10)] # noqa
# Running 3 slow apis to exhaust the limits
res_q = queue.Queue()
start_q = queue.Queue() # used for sync
procs = [
threading.Thread(
target=_try_state_query_expect_rate_limit,
args=(
list_workers,
res_q,
start_q,
),
kwargs={"timeout": 6},
),
threading.Thread(
target=_try_state_query_expect_rate_limit,
args=(
list_tasks,
res_q,
start_q,
),
kwargs={"timeout": 6},
),
threading.Thread(
target=_try_state_query_expect_rate_limit,
args=(
list_actors,
res_q,
start_q,
),
kwargs={"timeout": 6},
),
]
[p.start() for p in procs]
# Wait for other processes to start so rate limit will be reached
def _wait_to_start():
started = 0
for _ in range(3):
started += start_q.get()
return started == 3
wait_for_condition(_wait_to_start)
# Wait 1 more second to make sure the API call happens after all
# process has a call.
time.sleep(1)
# Running another 1 should return error
with pytest.raises(RayStateApiException) as e:
print(list_objects())
# TODO(rickyyx): We will use fine-grained exceptions/error code soon
assert "Max" in str(
e
), f"Expect an exception raised due to rate limit, but have {str(e)}"
# Consecutive APIs should be successful after the previous delay ones timeout
def verify():
assert len(list_objects()) > 0, "non-delay APIs should be successful"
"after previous ones timeout"
return True
wait_for_condition(verify)
@pytest.mark.skipif(
sys.platform == "win32",
reason="Lambda test functions could not be pickled on Windows",
)
@pytest.mark.parametrize(
"api_func",
[
# NOTE(rickyyx): arbitrary list of APIs, not exhaustive.
list_objects,
list_tasks,
list_actors,
list_nodes,
list_placement_groups,
],
)
def test_state_api_server_enforce_concurrent_http_requests(
api_func, monkeypatch, shutdown_only
):
import queue
import threading
import time
# Set environment
with monkeypatch.context() as m:
max_requests = 2
m.setenv("RAY_STATE_SERVER_MAX_HTTP_REQUEST", str(max_requests))
# All relevant calls delay to 2 secs
m.setenv(
"RAY_testing_asio_delay_us",
(
"TaskInfoGcsService.grpc_server.GetTaskEvents=200000:200000,"
"NodeManagerService.grpc_server.GetObjectsInfo=200000:200000,"
"ActorInfoGcsService.grpc_server.GetAllActorInfo=200000:200000,"
"NodeInfoGcsService.grpc_server.GetAllNodeInfo=200000:200000,"
"PlacementGroupInfoGcsService.grpc_server.GetAllPlacementGroup="
"200000:200000"
),
)
ray.init()
# Set up scripts
@ray.remote
def f():
time.sleep(30)
@ray.remote
class Actor:
pass
task = f.remote() # noqa
actor = Actor.remote() # noqa
actor_runtime_env = Actor.options( # noqa
runtime_env={"pip": ["requests"]}
).remote()
pg = ray.util.placement_group(bundles=[{"CPU": 1}]) # noqa
_objs = [ray.put(x) for x in range(10)] # noqa
def verify():
q = queue.Queue()
num_procs = 3
procs = [
threading.Thread(
target=_try_state_query_expect_rate_limit,
args=(
api_func,
q,
),
)
for _ in range(num_procs)
]
[p.start() for p in procs]
max_concurrent_reqs_error = 0
for _ in range(num_procs):
try:
res = q.get(timeout=10)
if isinstance(res, Exception):
assert False, f"State API error: {res}"
elif isinstance(res, int):
max_concurrent_reqs_error += res
else:
raise ValueError(res)
except queue.Empty:
assert False, "Failed to get some results from a subprocess"
# We should run into max in-progress requests errors
assert (
max_concurrent_reqs_error == num_procs - max_requests
), f"{num_procs - max_requests} requests should be rate limited"
[p.join(5) for p in procs]
for proc in procs:
assert not proc.is_alive(), "All threads should exit"
return True
wait_for_condition(verify)
@pytest.mark.parametrize("callsite_enabled", [True, False])
def test_callsite_warning(callsite_enabled, monkeypatch, shutdown_only):
# Set environment
with monkeypatch.context() as m:
m.setenv("RAY_record_ref_creation_sites", str(int(callsite_enabled)))
ray.init()
a = ray.put(1) # noqa
runner = CliRunner()
wait_for_condition(lambda: len(list_objects()) > 0)
with pytest.warns(None) as record:
result = runner.invoke(ray_list, ["objects"])
assert result.exit_code == 0
if callsite_enabled:
assert len(record) == 0
else:
assert len(record) == 1
assert "RAY_record_ref_creation_sites=1" in str(record[0].message)
def test_raise_on_missing_output_partial_failures(monkeypatch, ray_start_cluster):
"""
Verify when there are network partial failures,
state API raises an exception when `raise_on_missing_output=True`.
"""
monkeypatch.setenv("RAY_record_ref_creation_sites", "1")
cluster = ray_start_cluster
cluster.add_node(num_cpus=2)
ray.init(address=cluster.address)
with monkeypatch.context() as m:
# defer for 10s for the second node.
m.setenv(
"RAY_testing_asio_delay_us",
"NodeManagerService.grpc_server.GetObjectsInfo=10000000:10000000",
)
cluster.add_node(num_cpus=2)
@ray.remote
def f():
ray.put(1)
a = [f.remote() for _ in range(4)] # noqa
runner = CliRunner()
# Verify
def verify():
# Verify when raise_on_missing_output=True, it raises an exception.
try:
list_objects(_explain=True, timeout=3)
except RayStateApiException as e:
assert "Failed to retrieve all objects from the cluster" in str(e)
assert "due to query failures to the data sources." in str(e)
else:
assert False
try:
summarize_objects(_explain=True, timeout=3)
except RayStateApiException as e:
assert "Failed to retrieve all objects from the cluster" in str(e)
assert "due to query failures to the data sources." in str(e)
else:
assert False
# Verify when raise_on_missing_output=False, it prints warnings.
with pytest.warns(None) as record:
list_objects(raise_on_missing_output=False, _explain=True, timeout=3)
assert len(record) == 1
with pytest.warns(None) as record:
summarize_objects(raise_on_missing_output=False, _explain=True, timeout=3)
assert len(record) == 1
# Verify when CLI is used, exceptions are not raised.
with pytest.warns(None) as record:
result = runner.invoke(ray_list, ["objects", "--timeout=3"])
assert len(record) == 1
assert result.exit_code == 0
# Verify summary CLI also doesn't raise an exception.
with pytest.warns(None) as record:
result = runner.invoke(summary_state_cli_group, ["objects", "--timeout=3"])
assert result.exit_code == 0
assert len(record) == 1
return True
wait_for_condition(verify)
def test_raise_on_missing_output_truncation(monkeypatch, shutdown_only):
with monkeypatch.context() as m:
# defer for 10s for the second node.
m.setenv(
"RAY_MAX_LIMIT_FROM_DATA_SOURCE",
"10",
)
m.setenv(
"RAY_task_events_skip_driver_for_test",
"1",
)
ray.init()
@ray.remote
def task():
time.sleep(300)
tasks = [task.remote() for _ in range(15)] # noqa
runner = CliRunner()
# Verify
def verify():
# Verify when raise_on_missing_output=True, it raises an exception.
try:
list_tasks(_explain=True, timeout=3)
except RayStateApiException as e:
assert "Failed to retrieve all" in str(e)
assert "(> 10)" in str(e)
else:
assert False
try:
summarize_tasks(_explain=True, timeout=3)
except RayStateApiException as e:
assert "Failed to retrieve all" in str(e)
assert "(> 10)" in str(e)
else:
assert False
# Verify when raise_on_missing_output=False, it prints warnings.
with pytest.warns(None) as record:
list_tasks(raise_on_missing_output=False, _explain=True, timeout=3)
assert len(record) == 1
with pytest.warns(None) as record:
summarize_tasks(raise_on_missing_output=False, _explain=True, timeout=3)
assert len(record) == 1
# Verify when CLI is used, exceptions are not raised.
with pytest.warns(None) as record:
result = runner.invoke(ray_list, ["tasks", "--timeout=3"])
assert len(record) == 1
assert result.exit_code == 0
# Verify summary CLI also doesn't raise an exception.
with pytest.warns(None) as record:
result = runner.invoke(summary_state_cli_group, ["tasks", "--timeout=3"])
assert result.exit_code == 0
assert len(record) == 1
return True
wait_for_condition(verify)
def test_get_id_not_found(shutdown_only):
"""Test get API CLI fails correctly when there's no corresponding id
Related: https://github.com/ray-project/ray/issues/26808
"""
ray.init()
runner = CliRunner()
id = ActorID.from_random().hex()
result = runner.invoke(ray_get, ["actors", id])
assert result.exit_code == 0, str(result.exception) + result.output
assert f"Resource with id={id} not found in the cluster." in result.output
def test_core_state_api_usage_tags(shutdown_only):
from ray._common.usage.usage_lib import TagKey, get_extra_usage_tags_to_report
ctx = ray.init()
gcs_client = GcsClient(address=ctx.address_info["gcs_address"])
list_actors()
list_tasks()
list_jobs()
list_cluster_events()
list_nodes()
list_objects()
list_runtime_envs()
list_workers()
summarize_actors()
summarize_objects()
summarize_tasks()
result = get_extra_usage_tags_to_report(gcs_client)
expected_tags = [
TagKey.CORE_STATE_API_LIST_ACTORS,
TagKey.CORE_STATE_API_LIST_TASKS,
TagKey.CORE_STATE_API_LIST_JOBS,
TagKey.CORE_STATE_API_LIST_CLUSTER_EVENTS,
TagKey.CORE_STATE_API_LIST_NODES,
TagKey.CORE_STATE_API_LIST_OBJECTS,
TagKey.CORE_STATE_API_LIST_RUNTIME_ENVS,
TagKey.CORE_STATE_API_LIST_WORKERS,
TagKey.CORE_STATE_API_SUMMARIZE_ACTORS,
TagKey.CORE_STATE_API_SUMMARIZE_OBJECTS,
TagKey.CORE_STATE_API_SUMMARIZE_TASKS,
]
assert set(result.keys()).issuperset(
{TagKey.Name(tag).lower() for tag in expected_tags}
)
# Tests fix for https://github.com/ray-project/ray/issues/44459
def test_job_info_is_running_task(shutdown_only):
ray.init()
# To reliably know a job has a long running task, we need to wait a SignalActor
# to know the task has started.
signal = SignalActor.remote()
@ray.remote
def f(signal):
ray.get(signal.send.remote())
import time
while True:
time.sleep(10000)
long_running = f.remote(signal) # noqa: F841
ray.get(signal.wait.remote())
client = ray.worker.global_worker.gcs_client
job_id = ray.worker.global_worker.current_job_id
all_job_info = client.get_all_job_info()
assert len(all_job_info) == 1
assert job_id in all_job_info
assert all_job_info[job_id].is_running_tasks is True
def test_hang_driver_has_no_is_running_task(monkeypatch, ray_start_cluster):
"""
When there's a call to JobInfoGcsService.GetAllJobInfo, GCS sends RPC
CoreWorkerService.NumPendingTasks to all drivers for "is_running_task". Our driver
however has trouble serving such RPC, and GCS should timeout that RPC and unsest the
field.
"""
cluster = ray_start_cluster
cluster.add_node(num_cpus=10)
address = cluster.address
monkeypatch.setenv(
"RAY_testing_asio_delay_us",
"CoreWorkerService.grpc_server.NumPendingTasks=2000000:2000000",
)
ray.init(address=address)
client = ray.worker.global_worker.gcs_client
my_job_id = ray.worker.global_worker.current_job_id
all_job_info = client.get_all_job_info()
assert list(all_job_info.keys()) == [my_job_id]
assert not all_job_info[my_job_id].HasField("is_running_tasks")
def test_get_actor_timeout_multiplier(shutdown_only):
"""Test that GetApiOptions applies the same timeout multiplier as ListApiOptions.
This test reproduces the issue where get_actor with timeout=1 fails even though
the actual operation takes less than 1 second, because GetApiOptions doesn't
apply the 0.8 server timeout multiplier that ListApiOptions uses.
Related issue: https://github.com/ray-project/ray/issues/54153
"""
@ray.remote
class TestActor:
def ready(self):
pass
actor = TestActor.remote()
ray.get(actor.ready.remote())
# Test that both options classes apply the same timeout multiplier
test_timeout = 1
get_options = GetApiOptions(timeout=test_timeout)
list_options = ListApiOptions(timeout=test_timeout)
# After __post_init__, both should have the same effective timeout
assert get_options.timeout == list_options.timeout
# Test that get_actor works with a 1-second timeout
actors = list_actors()
actor_id = actors[0]["actor_id"]
# This should work without timeout issues
result = get_actor(actor_id, timeout=1)
assert result["actor_id"] == actor_id
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| TestListActors |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride1.py | {
"start": 9825,
"end": 9944
} | class ____:
def submit(self, fn: Callable[..., _T1], *args: Any, **kwargs: Any) -> list[_T1]:
return []
| Base1 |
python | walkccc__LeetCode | solutions/2481. Minimum Cuts to Divide a Circle/2481.py | {
"start": 0,
"end": 126
} | class ____:
def numberOfCuts(self, n: int) -> int:
if n == 1:
return 0
return n // 2 if n % 2 == 0 else n
| Solution |
python | python__mypy | mypy/test/meta/test_diff_helper.py | {
"start": 81,
"end": 1692
} | class ____(Suite):
def test_render_diff_range(self) -> None:
expected = ["hello", "world"]
actual = ["goodbye", "world"]
expected_ranges, actual_ranges = diff_ranges(expected, actual)
output = io.StringIO()
render_diff_range(expected_ranges, expected, output=output)
assert output.getvalue() == " hello (diff)\n world\n"
output = io.StringIO()
render_diff_range(actual_ranges, actual, output=output)
assert output.getvalue() == " goodbye (diff)\n world\n"
expected = ["a", "b", "c", "d", "e", "f", "g", "h", "circle", "i", "j"]
actual = ["a", "b", "c", "d", "e", "f", "g", "h", "square", "i", "j"]
expected_ranges, actual_ranges = diff_ranges(expected, actual)
output = io.StringIO()
render_diff_range(expected_ranges, expected, output=output, indent=0)
assert output.getvalue() == "a\nb\nc\n...\nf\ng\nh\ncircle (diff)\ni\nj\n"
output = io.StringIO()
render_diff_range(actual_ranges, actual, output=output, indent=0)
assert output.getvalue() == "a\nb\nc\n...\nf\ng\nh\nsquare (diff)\ni\nj\n"
def test_diff_ranges(self) -> None:
a = ["hello", "world"]
b = ["hello", "world"]
assert diff_ranges(a, b) == (
[(0, 0), (0, 2), (2, 2), (2, 2)],
[(0, 0), (0, 2), (2, 2), (2, 2)],
)
a = ["hello", "world"]
b = ["goodbye", "world"]
assert diff_ranges(a, b) == (
[(0, 1), (1, 2), (2, 2), (2, 2)],
[(0, 1), (1, 2), (2, 2), (2, 2)],
)
| DiffHelperSuite |
python | Netflix__metaflow | metaflow/plugins/airflow/airflow_utils.py | {
"start": 7270,
"end": 20915
} | class ____(object):
# `_arg_types` is a dictionary which represents the types of the arguments of an Airflow `DAG`.
# `_arg_types` is used when parsing types back from the configuration json.
# It doesn't cover all the arguments but covers many of the important one which can come from the cli.
_arg_types = {
"dag_id": str,
"description": str,
"schedule_interval": str,
"start_date": datetime,
"catchup": bool,
"tags": list,
"dagrun_timeout": timedelta,
"default_args": {
"owner": str,
"depends_on_past": bool,
"email": list,
"email_on_failure": bool,
"email_on_retry": bool,
"retries": int,
"retry_delay": timedelta,
"queue": str, # which queue to target when running this job. Not all executors implement queue management, the CeleryExecutor does support targeting specific queues.
"pool": str, # the slot pool this task should run in, slot pools are a way to limit concurrency for certain tasks
"priority_weight": int,
"wait_for_downstream": bool,
"sla": timedelta,
"execution_timeout": timedelta,
"trigger_rule": str,
},
}
# Reference for user_defined_filters : https://stackoverflow.com/a/70175317
filters = dict(
task_id_creator=lambda v: task_id_creator(v),
json_dump=lambda val: json_dump(val),
run_id_creator=lambda val: run_id_creator(val),
join_list=lambda x: ",".join(list(x)),
)
def __init__(self, **kwargs):
self._args = kwargs
@property
def arguments(self):
return dict(**self._args, user_defined_filters=self.filters)
def serialize(self):
def parse_args(dd):
data_dict = {}
for k, v in dd.items():
if isinstance(v, dict):
data_dict[k] = parse_args(v)
elif isinstance(v, datetime):
data_dict[k] = v.isoformat()
elif isinstance(v, timedelta):
data_dict[k] = dict(seconds=v.total_seconds())
else:
data_dict[k] = v
return data_dict
return parse_args(self._args)
@classmethod
def deserialize(cls, data_dict):
def parse_args(dd, type_check_dict):
kwrgs = {}
for k, v in dd.items():
if k not in type_check_dict:
kwrgs[k] = v
elif isinstance(v, dict) and isinstance(type_check_dict[k], dict):
kwrgs[k] = parse_args(v, type_check_dict[k])
elif type_check_dict[k] == datetime:
kwrgs[k] = datetimeparse(v)
elif type_check_dict[k] == timedelta:
kwrgs[k] = timedelta(**v)
else:
kwrgs[k] = v
return kwrgs
return cls(**parse_args(data_dict, cls._arg_types))
def _kubernetes_pod_operator_args(operator_args):
from kubernetes import client
from airflow.kubernetes.secret import Secret
# Set dynamic env variables like run-id, task-id etc from here.
secrets = [
Secret("env", secret, secret) for secret in operator_args.get("secrets", [])
]
args = operator_args
args.update(
{
"secrets": secrets,
# Question for (savin):
# Default timeout in airflow is 120. I can remove `startup_timeout_seconds` for now. how should we expose it to the user?
}
)
# We need to explicitly add the `client.V1EnvVar` over here because
# `pod_runtime_info_envs` doesn't accept arguments in dictionary form and strictly
# Requires objects of type `client.V1EnvVar`
additional_env_vars = [
client.V1EnvVar(
name=k,
value_from=client.V1EnvVarSource(
field_ref=client.V1ObjectFieldSelector(field_path=str(v))
),
)
for k, v in {
"METAFLOW_KUBERNETES_POD_NAMESPACE": "metadata.namespace",
"METAFLOW_KUBERNETES_POD_NAME": "metadata.name",
"METAFLOW_KUBERNETES_POD_ID": "metadata.uid",
"METAFLOW_KUBERNETES_SERVICE_ACCOUNT_NAME": "spec.serviceAccountName",
"METAFLOW_KUBERNETES_NODE_IP": "status.hostIP",
}.items()
]
args["pod_runtime_info_envs"] = additional_env_vars
resources = args.get("resources")
# KubernetesPodOperator version 4.2.0 renamed `resources` to
# `container_resources` (https://github.com/apache/airflow/pull/24673) / (https://github.com/apache/airflow/commit/45f4290712f5f779e57034f81dbaab5d77d5de85)
# This was done because `KubernetesPodOperator` didn't play nice with dynamic task mapping and they had to
# deprecate the `resources` argument. Hence, the below code path checks for the version of `KubernetesPodOperator`
# and then sets the argument. If the version < 4.2.0 then we set the argument as `resources`.
# If it is > 4.2.0 then we set the argument as `container_resources`
# The `resources` argument of `KubernetesPodOperator` is going to be deprecated soon in the future.
# So we will only use it for `KubernetesPodOperator` version < 4.2.0
# The `resources` argument will also not work for `foreach`s.
provider_version = get_kubernetes_provider_version()
k8s_op_ver = create_absolute_version_number(provider_version)
if k8s_op_ver is None or k8s_op_ver < create_absolute_version_number(
KUBERNETES_PROVIDER_FOREACH_VERSION
):
# Since the provider version is less than `4.2.0` so we need to use the `resources` argument
# We need to explicitly parse `resources`/`container_resources` to `k8s.V1ResourceRequirements`,
# otherwise airflow tries to parse dictionaries to `airflow.providers.cncf.kubernetes.backcompat.pod.Resources`
# object via `airflow.providers.cncf.kubernetes.backcompat.backward_compat_converts.convert_resources` function.
# This fails many times since the dictionary structure it expects is not the same as
# `client.V1ResourceRequirements`.
args["resources"] = client.V1ResourceRequirements(
requests=resources["requests"],
limits=None if "limits" not in resources else resources["limits"],
)
else: # since the provider version is greater than `4.2.0` so should use the `container_resources` argument
args["container_resources"] = client.V1ResourceRequirements(
requests=resources["requests"],
limits=None if "limits" not in resources else resources["limits"],
)
del args["resources"]
if operator_args.get("execution_timeout"):
args["execution_timeout"] = timedelta(
**operator_args.get(
"execution_timeout",
)
)
if operator_args.get("retry_delay"):
args["retry_delay"] = timedelta(**operator_args.get("retry_delay"))
return args
def _parse_sensor_args(name, kwargs):
if name == SensorNames.EXTERNAL_TASK_SENSOR:
if "execution_delta" in kwargs:
if type(kwargs["execution_delta"]) == dict:
kwargs["execution_delta"] = timedelta(**kwargs["execution_delta"])
else:
del kwargs["execution_delta"]
return kwargs
def _get_sensor(name):
# from airflow import XComArg
# XComArg()
if name == SensorNames.EXTERNAL_TASK_SENSOR:
# ExternalTaskSensors uses an execution_date of a dag to
# determine the appropriate DAG.
# This is set to the exact date the current dag gets executed on.
# For example if "DagA" (Upstream DAG) got scheduled at
# 12 Jan 4:00 PM PDT then "DagB"(current DAG)'s task sensor will try to
# look for a "DagA" that got executed at 12 Jan 4:00 PM PDT **exactly**.
# They also support a `execution_timeout` argument to
from airflow.sensors.external_task_sensor import ExternalTaskSensor
return ExternalTaskSensor
elif name == SensorNames.S3_SENSOR:
try:
from airflow.providers.amazon.aws.sensors.s3 import S3KeySensor
except ImportError:
raise AirflowSensorNotFound(
"This DAG requires a `S3KeySensor`. "
"Install the Airflow AWS provider using : "
"`pip install apache-airflow-providers-amazon`"
)
return S3KeySensor
def get_metaflow_kubernetes_operator():
try:
from airflow.contrib.operators.kubernetes_pod_operator import (
KubernetesPodOperator,
)
except ImportError:
try:
from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import (
KubernetesPodOperator,
)
except ImportError as e:
raise KubernetesProviderNotFound(
"This DAG utilizes `KubernetesPodOperator`. "
"Install the Airflow Kubernetes provider using "
"`%s -m pip install apache-airflow-providers-cncf-kubernetes`"
% sys.executable
)
class MetaflowKubernetesOperator(KubernetesPodOperator):
"""
## Why Inherit the `KubernetesPodOperator` class ?
Two key reasons :
1. So that we can override the `execute` method.
The only change we introduce to the method is to explicitly modify xcom relating to `return_values`.
We do this so that the `XComArg` object can work with `expand` function.
2. So that we can introduce a keyword argument named `mapper_arr`.
This keyword argument can help as a dummy argument for the `KubernetesPodOperator.partial().expand` method. Any Airflow Operator can be dynamically mapped to runtime artifacts using `Operator.partial(**kwargs).extend(**mapper_kwargs)` post the introduction of [Dynamic Task Mapping](https://airflow.apache.org/docs/apache-airflow/stable/concepts/dynamic-task-mapping.html).
The `expand` function takes keyword arguments taken by the operator.
## Why override the `execute` method ?
When we dynamically map vanilla Airflow operators with artifacts generated at runtime, we need to pass that information via `XComArg` to a operator's keyword argument in the `expand` [function](https://airflow.apache.org/docs/apache-airflow/stable/concepts/dynamic-task-mapping.html#mapping-over-result-of-classic-operators).
The `XComArg` object retrieves XCom values for a particular task based on a `key`, the default key being `return_values`.
Oddly dynamic task mapping [doesn't support XCom values from any other key except](https://github.com/apache/airflow/blob/8a34d25049a060a035d4db4a49cd4a0d0b07fb0b/airflow/models/mappedoperator.py#L150) `return_values`
The values of XCom passed by the `KubernetesPodOperator` are mapped to the `return_values` XCom key.
The biggest problem this creates is that the values of the Foreach cardinality are stored inside the dictionary of `return_values` and cannot be accessed trivially like : `XComArg(task)['foreach_key']` since they are resolved during runtime.
This puts us in a bind since the only xcom we can retrieve is the full dictionary and we cannot pass that as the iterable for the mapper tasks.
Hence, we inherit the `execute` method and push custom xcom keys (needed by downstream tasks such as metaflow taskids) and modify `return_values` captured from the container whenever a foreach related xcom is passed.
When we encounter a foreach xcom we resolve the cardinality which is passed to an actual list and return that as `return_values`.
This is later useful in the `Workflow.compile` where the operator's `expand` method is called and we are able to retrieve the xcom value.
"""
template_fields = KubernetesPodOperator.template_fields + (
"metaflow_pathspec",
"metaflow_run_id",
"metaflow_task_id",
"metaflow_attempt",
"metaflow_step_name",
"metaflow_flow_name",
)
def __init__(
self,
*args,
mapper_arr=None,
flow_name=None,
flow_contains_foreach=False,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.mapper_arr = mapper_arr
self._flow_name = flow_name
self._flow_contains_foreach = flow_contains_foreach
self.metaflow_pathspec = AIRFLOW_MACROS.pathspec(
self._flow_name, is_foreach=self._flow_contains_foreach
)
self.metaflow_run_id = AIRFLOW_MACROS.RUN_ID
self.metaflow_task_id = AIRFLOW_MACROS.create_task_id(
self._flow_contains_foreach
)
self.metaflow_attempt = AIRFLOW_MACROS.ATTEMPT
self.metaflow_step_name = AIRFLOW_MACROS.STEPNAME
self.metaflow_flow_name = self._flow_name
def execute(self, context):
result = super().execute(context)
if result is None:
return
ti = context["ti"]
if TASK_ID_XCOM_KEY in result:
ti.xcom_push(
key=TASK_ID_XCOM_KEY,
value=result[TASK_ID_XCOM_KEY],
)
if FOREACH_CARDINALITY_XCOM_KEY in result:
return list(range(result[FOREACH_CARDINALITY_XCOM_KEY]))
return MetaflowKubernetesOperator
| AirflowDAGArgs |
python | pytorch__pytorch | test/dynamo/test_subclasses.py | {
"start": 5191,
"end": 5386
} | class ____(torch.Tensor):
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
return super().__torch_function__(func, types, args, kwargs)
| BaseTorchFunction |
python | python-excel__xlwt | xlwt/Bitmap.py | {
"start": 10525,
"end": 10923
} | class ____(ImRawDataBmpRecord):
def __init__(self, filename):
"""Insert a 24bit bitmap image in a worksheet. The main record required is
IMDATA but it must be proceeded by a OBJ record to define its position.
"""
BiffRecord.__init__(self)
self.width, self.height, self.size, data = _process_bitmap(filename)
self._write_imdata(data)
| ImDataBmpRecord |
python | encode__django-rest-framework | tests/test_atomic_requests.py | {
"start": 788,
"end": 932
} | class ____(APIView):
def post(self, request, *args, **kwargs):
BasicModel.objects.create()
raise APIException
| APIExceptionView |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-tair/llama_index/vector_stores/tair/base.py | {
"start": 945,
"end": 8869
} | class ____(BasePydanticVectorStore):
"""
Initialize TairVectorStore.
Two index types are available: FLAT & HNSW.
index args for HNSW:
- ef_construct
- M
- ef_search
Detailed info for these arguments can be found here:
https://www.alibabacloud.com/help/en/tair/latest/tairvector#section-c76-ull-5mk
Args:
index_name (str): Name of the index.
index_type (str): Type of the index. Defaults to 'HNSW'.
index_args (Dict[str, Any]): Arguments for the index. Defaults to None.
tair_url (str): URL for the Tair instance.
overwrite (bool): Whether to overwrite the index if it already exists.
Defaults to False.
kwargs (Any): Additional arguments to pass to the Tair client.
Raises:
ValueError: If tair-py is not installed
ValueError: If failed to connect to Tair instance
Examples:
`pip install llama-index-vector-stores-tair`
```python
from llama_index.core.vector_stores.tair import TairVectorStore
# Create a TairVectorStore
vector_store = TairVectorStore(
tair_url="redis://{username}:{password}@r-bp****************.redis.rds.aliyuncs.com:{port}",
index_name="my_index",
index_type="HNSW",
index_args={"M": 16, "ef_construct": 200},
overwrite=True
)
```
"""
stores_text: bool = True
stores_node: bool = True
flat_metadata: bool = False
_tair_client: Tair = PrivateAttr()
_index_name: str = PrivateAttr()
_index_type: str = PrivateAttr()
_metric_type: str = PrivateAttr()
_overwrite: bool = PrivateAttr()
_index_args: Dict[str, Any] = PrivateAttr()
_query_args: Dict[str, Any] = PrivateAttr()
_dim: int = PrivateAttr()
def __init__(
self,
tair_url: str,
index_name: str,
index_type: str = "HNSW",
index_args: Optional[Dict[str, Any]] = None,
overwrite: bool = False,
**kwargs: Any,
) -> None:
super().__init__()
try:
self._tair_client = Tair.from_url(tair_url, **kwargs)
except ValueError as e:
raise ValueError(f"Tair failed to connect: {e}")
# index identifiers
self._index_name = index_name
self._index_type = index_type
self._metric_type = "L2"
self._overwrite = overwrite
self._index_args = {}
self._query_args = {}
if index_type == "HNSW":
if index_args is not None:
ef_construct = index_args.get("ef_construct", 500)
M = index_args.get("M", 24)
ef_search = index_args.get("ef_search", 400)
else:
ef_construct = 500
M = 24
ef_search = 400
self._index_args = {"ef_construct": ef_construct, "M": M}
self._query_args = {"ef_search": ef_search}
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "TairVectorStore"
@property
def client(self) -> "Tair":
"""Return the Tair client instance."""
return self._tair_client
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""
Add nodes to the index.
Args:
nodes (List[BaseNode]): List of nodes with embeddings
Returns:
List[str]: List of ids of the documents added to the index.
"""
# check to see if empty document list was passed
if len(nodes) == 0:
return []
# set vector dim for creation if index doesn't exist
self._dim = len(nodes[0].get_embedding())
if self._index_exists():
if self._overwrite:
self.delete_index()
self._create_index()
else:
logging.info(f"Adding document to existing index {self._index_name}")
else:
self._create_index()
ids = []
for node in nodes:
attributes = {
"id": node.node_id,
"doc_id": node.ref_doc_id,
"text": node.get_content(metadata_mode=MetadataMode.NONE),
}
metadata_dict = node_to_metadata_dict(
node, remove_text=True, flat_metadata=self.flat_metadata
)
attributes.update(metadata_dict)
ids.append(node.node_id)
self._tair_client.tvs_hset(
self._index_name,
f"{node.ref_doc_id}#{node.node_id}",
vector=node.get_embedding(),
is_binary=False,
**attributes,
)
_logger.info(f"Added {len(ids)} documents to index {self._index_name}")
return ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete a document.
Args:
doc_id (str): document id
"""
iter = self._tair_client.tvs_scan(self._index_name, "%s#*" % ref_doc_id)
for k in iter:
self._tair_client.tvs_del(self._index_name, k)
def delete_index(self) -> None:
"""Delete the index and all documents."""
_logger.info(f"Deleting index {self._index_name}")
self._tair_client.tvs_del_index(self._index_name)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""
Query the index.
Args:
query (VectorStoreQuery): query object
Returns:
VectorStoreQueryResult: query result
Raises:
ValueError: If query.query_embedding is None.
"""
filter_expr = None
if query.filters is not None:
filter_expr = _to_filter_expr(query.filters)
if not query.query_embedding:
raise ValueError("Query embedding is required for querying.")
_logger.info(f"Querying index {self._index_name}")
query_args = self._query_args
if self._index_type == "HNSW" and "ef_search" in kwargs:
query_args["ef_search"] = kwargs["ef_search"]
results = self._tair_client.tvs_knnsearch(
self._index_name,
query.similarity_top_k,
query.query_embedding,
False,
filter_str=filter_expr,
**query_args,
)
results = [(k.decode(), float(s)) for k, s in results]
ids = []
nodes = []
scores = []
pipe = self._tair_client.pipeline(transaction=False)
for key, score in results:
scores.append(score)
pipe.tvs_hmget(self._index_name, key, "id", "doc_id", "text")
metadatas = pipe.execute()
for i, m in enumerate(metadatas):
# TODO: properly get the _node_conent
doc_id = m[0].decode()
node = TextNode(
text=m[2].decode(),
id_=doc_id,
embedding=None,
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id=m[1].decode())
},
)
ids.append(doc_id)
nodes.append(node)
_logger.info(f"Found {len(nodes)} results for query with id {ids}")
return VectorStoreQueryResult(nodes=nodes, ids=ids, similarities=scores)
def _create_index(self) -> None:
_logger.info(f"Creating index {self._index_name}")
self._tair_client.tvs_create_index(
self._index_name,
self._dim,
distance_type=self._metric_type,
index_type=self._index_type,
data_type=tairvector.DataType.Float32,
**self._index_args,
)
def _index_exists(self) -> bool:
index = self._tair_client.tvs_get_index(self._index_name)
return index is not None
| TairVectorStore |
python | google__pytype | pytype/rewrite/abstract/containers_test.py | {
"start": 1084,
"end": 1787
} | class ____(BaseTest):
def test_constant_type(self):
a = self.const_var("a")
b = self.const_var("b")
c = containers.Dict(self.ctx, {a: b})
assert_type(c.constant, dict[_Var, _Var])
def test_setitem(self):
d1 = containers.Dict(self.ctx, {})
d2 = d1.setitem(self.const_var("a"), self.const_var("b"))
self.assertEqual(d2.constant, {self.const_var("a"): self.const_var("b")})
def test_update(self):
d1 = containers.Dict(self.ctx, {})
d2 = containers.Dict(self.ctx, {self.const_var("a"): self.const_var("b")})
d3 = d1.update(d2)
self.assertIsInstance(d3, containers.Dict)
self.assertEqual(d3.constant, {self.const_var("a"): self.const_var("b")})
| DictTest |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 304563,
"end": 304746
} | class ____(ColorScheme):
"""Diverging schema wrapper."""
_schema = {"$ref": "#/definitions/Diverging"}
def __init__(self, *args):
super().__init__(*args)
| Diverging |
python | ray-project__ray | python/ray/air/tests/execution/test_e2e_tune_flow.py | {
"start": 567,
"end": 1473
} | class ____:
"""Simple actor for testing an execution flow.
This actor can fail in three ways:
1. On init if ``actor_error_init`` is passed as a kwarg
2. On run() if ``actor_error_task`` is passed as a kwarg (RayActorError)
3. On run() if ``task_error`` is passed as a kwarg (RayTaskError)
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
if self.kwargs.get("actor_error_init"):
raise RuntimeError("INIT")
def get_kwargs(self):
return self.kwargs
def run(self, value: float) -> float:
if value == 2:
if self.kwargs.get("actor_error_task"):
# SystemExit will invoke a RayActorError
raise SystemExit
if self.kwargs.get("task_error"):
# RuntimeError will invoke a RayTaskError
raise RuntimeError("TASK")
return value
| Actor |
python | tensorflow__tensorflow | tensorflow/python/framework/tensor_util_test.py | {
"start": 35739,
"end": 45045
} | class ____(test.TestCase):
def testConstant(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = constant_op.constant(np_val)
self.assertAllClose(np_val, tensor_util.constant_value(tf_val))
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = constant_op.constant(np_val)
self.assertAllClose(np_val, tensor_util.constant_value(tf_val))
def testUnknown(self):
with ops.Graph().as_default():
tf_val = gen_state_ops.variable(
shape=[3, 4, 7],
dtype=dtypes.float32,
name="tf_val",
container="",
shared_name="")
self.assertIs(None, tensor_util.constant_value(tf_val))
def testShape(self):
np_val = np.array([1, 2, 3], dtype=np.int32)
tf_val = array_ops.shape(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(np_val, c_val)
self.assertEqual(np.int32, c_val.dtype)
def testFill(self):
np_val = np.array([-1, -1, -1], dtype=np.float32)
tf_val = array_ops.fill([3], constant_op.constant(-1.0))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(np_val, c_val)
self.assertEqual(np.float32, c_val.dtype)
def testSize(self):
tf_val = array_ops.size(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(6, c_val)
def testSizeOfScalar(self):
tf_val = array_ops.size(constant_op.constant(0.0))
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(1, c_val)
self.assertIn(type(c_val), [np.ndarray, np.int32])
def testRank(self):
tf_val = array_ops.rank(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertIn(type(c_val), [np.ndarray, np.int32])
self.assertEqual((), c_val.shape)
self.assertEqual(3, c_val)
# Repeat test using array_ops.rank_internal to avoid the optimization that
# happens in the rank function.
tf_val = array_ops.rank_internal(
constant_op.constant(
0.0, shape=[1, 2, 3]), optimize=False)
c_val = tensor_util.constant_value(tf_val)
self.assertIn(type(c_val), [np.ndarray, np.int32])
self.assertEqual((), c_val.shape)
self.assertEqual(3, c_val)
self.assertEqual([3], c_val)
def testCast(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = math_ops.cast(constant_op.constant(np_val), dtypes.float64)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = math_ops.cast(constant_op.constant(np_val), dtypes.float64)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
def testConcat(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = array_ops.concat(
[np_val[0:1, :, :], np_val[1:2, :, :], np_val[2:3, :, :]], 0)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val, c_val)
# This test needs a placeholder which means we need to construct a graph.
with ops.Graph().as_default():
tf_val = array_ops.concat(
[np_val[0, :, :], np_val[1, :, :], np_val[2, :, :]],
array_ops.placeholder(dtypes.int32))
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
tf_val = array_ops.concat([
np_val[0, :, :],
array_ops.placeholder(dtypes.float32), np_val[2, :, :]
], 1)
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
def testPack_Axis0(self):
inputs = [np.random.rand(4, 7) for _ in range(3)]
np_val = np.array(inputs)
tf_val = array_ops_stack.stack(inputs)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val, c_val)
# This test needs a placeholder which means we need to construct a graph.
with ops.Graph().as_default():
tf_val = array_ops_stack.stack(
[inputs[0],
array_ops.placeholder(dtypes.float32), inputs[2]])
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
def testPack_Axis1(self):
# This test needs a placeholder which means we need to construct a graph.
with ops.Graph().as_default():
inputs = [np.random.rand(4, 7) for _ in range(3)]
tf_val = array_ops_stack.stack(inputs, axis=1)
c_val = tensor_util.constant_value(tf_val)
self.assertIsNone(c_val)
tf_val = array_ops_stack.stack(
[inputs[0],
array_ops.placeholder(dtypes.float32), inputs[2]], axis=1)
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
def testPack_Partial_Axis0(self):
input_ = np.random.rand(4, 7)
# This test needs a placeholder which means we need to construct a graph.
with ops.Graph().as_default():
tf_val = array_ops_stack.stack(
[input_, array_ops.placeholder(dtypes.float32)])
c_val = tensor_util.constant_value(tf_val, partial=True)
self.assertAllClose(input_, c_val[0])
self.assertIsNone(c_val[1])
def testPack_Partial_Axis1(self):
input_ = np.random.rand(4, 7)
# This test needs a placeholder which means we need to construct a graph.
with ops.Graph().as_default():
tf_val = array_ops_stack.stack(
[input_, array_ops.placeholder(dtypes.float32)], axis=1)
c_val = tensor_util.constant_value(tf_val, partial=True)
self.assertIsNone(c_val)
def testUnpack_Axis0(self):
inputs = np.random.rand(3, 4, 7)
tf_vals = array_ops_stack.unstack(inputs)
c_vals = [tensor_util.constant_value(x) for x in tf_vals]
self.assertAllClose(inputs, c_vals)
def testUnpack_Partial_Axis0(self):
input_ = np.random.rand(4, 7)
# This test needs a placeholder which means we need to construct a graph.
with ops.Graph().as_default():
packed = array_ops_stack.stack(
[input_, array_ops.placeholder(dtypes.float32)])
tf_vals = array_ops_stack.unstack(packed)
c_vals = [tensor_util.constant_value(x, partial=True) for x in tf_vals]
self.assertAllClose(input_, c_vals[0])
self.assertIsNone(c_vals[1])
def testSplit_Axis0(self):
inputs = np.random.rand(6, 5, 7)
tf_vals = array_ops.split(inputs, 3)
c_vals = [tensor_util.constant_value(x) for x in tf_vals]
self.assertAllClose(np.split(inputs, 3), c_vals)
def testSplit_Partial_Axis0(self):
input_ = np.random.rand(4, 7)
# This test needs a placeholder which means we need to construct a graph.
with ops.Graph().as_default():
placeholder = array_ops.placeholder(dtypes.float32, shape=(4, 7))
# it'd be better to use concat here, but concat doesn't support partial
packed = array_ops_stack.stack([input_, placeholder])
tf_vals = array_ops.split(packed, 2)
c_vals = [tensor_util.constant_value(x, partial=True) for x in tf_vals]
self.assertAllClose(input_, c_vals[0][0])
self.assertIsNone(c_vals[1][0])
def testEqual(self):
# Scalar inputs.
tf_val = math_ops.equal(constant_op.constant(1), constant_op.constant(1))
self.assertEqual(tensor_util.constant_value(tf_val), True)
tf_val = math_ops.equal(constant_op.constant(1), constant_op.constant(0))
self.assertEqual(tensor_util.constant_value(tf_val), False)
# Shaped inputs with broadcast semantics.
tf_val = math_ops.equal(constant_op.constant([[0, 1]]),
constant_op.constant([[0], [1]]))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(c_val, [[True, False], [False, True]])
def testNotEqual(self):
# Scalar inputs.
tf_val = math_ops.not_equal(constant_op.constant(1),
constant_op.constant(1))
self.assertEqual(tensor_util.constant_value(tf_val), False)
tf_val = math_ops.not_equal(constant_op.constant(1),
constant_op.constant(0))
self.assertEqual(tensor_util.constant_value(tf_val), True)
# Shaped inputs with broadcast semantics.
tf_val = math_ops.not_equal(constant_op.constant([[0, 1]]),
constant_op.constant([[0], [1]]))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(c_val, [[False, True], [True, False]])
def testStopGradient(self):
input_ = np.random.rand(4, 7)
tf_val = array_ops.stop_gradient(input_)
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(input_, c_val)
def testIdentity(self):
input_ = np.random.rand(4, 7)
tf_val = array_ops.identity(input_)
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(input_, c_val)
def testLiteral(self):
x = "hi"
self.assertIs(x, tensor_util.constant_value(x))
def testNumpyNdarray(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
self.assertIs(np_val, tensor_util.constant_value(np_val))
def testVariable(self):
var = variables.Variable(1.0, name="variable_node")
self.assertIsNone(tensor_util.constant_value(var))
def testVariableV1(self):
var = variable_v1.VariableV1(1.0, name="variable_node")
self.assertIsNone(tensor_util.constant_value(var))
| ConstantValueTest |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/components_tests/test_utils/basic_components.py | {
"start": 547,
"end": 931
} | class ____(dg.Component, dg.Resolvable):
a_string: str
an_int: int
throw: Annotated[bool, dg.Resolver(_maybe_throw)] = False
def build_defs(self, context: ComponentLoadContext) -> dg.Definitions:
return dg.Definitions()
@classmethod
def get_additional_scope(cls) -> Mapping[str, Any]:
return {
"error": _error,
}
| MyComponent |
python | jazzband__django-simple-history | simple_history/registry_tests/migration_test_app/models.py | {
"start": 292,
"end": 767
} | class ____(models.ForeignKey):
def __init__(self, *args, **kwargs):
self.attr_name = kwargs.pop("attr_name", None)
super().__init__(*args, **kwargs)
def get_attname(self):
return self.attr_name or super().get_attname()
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.attr_name:
kwargs["attr_name"] = self.attr_name
return name, path, args, kwargs
| CustomAttrNameForeignKey |
python | walkccc__LeetCode | solutions/1163. Last Substring in Lexicographical Order/1163.py | {
"start": 0,
"end": 889
} | class ____:
def lastSubstring(self, s: str) -> str:
i = 0
j = 1
k = 0 # the number of the same letters of s[i..n) and s[j..n)
while j + k < len(s):
if s[i + k] == s[j + k]:
k += 1
elif s[i + k] > s[j + k]:
# Skip s[j..j + k) and advance to s[j + k + 1] to find a possible
# lexicographically larger substring since s[i..i + k) == s[j..j + k)
# and s[i + k] > s[j + k).
j = j + k + 1
k = 0
else:
# Skip s[i..i + k) and advance to s[i + k + 1] or s[j] to find a
# possible lexicographically larger substring since
# s[i..i + k) == s[j..j + k) and s[i + k] < s[j + k).
# Note that it's unnecessary to explore s[i + k + 1..j) if
# i + k + 1 < j since they are already explored by j.
i = max(i + k + 1, j)
j = i + 1
k = 0
return s[i:]
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_logging_sink.py | {
"start": 5903,
"end": 8280
} | class ____(GoogleCloudBaseOperator):
"""
Deletes a Cloud Logging export sink from a GCP project.
:param sink_name: Required. Name of the sink to delete.
:param project_id: Required. The ID of the Google Cloud project.
:param gcp_conn_id: Optional. The connection ID to use for connecting to Google Cloud.
Defaults to "google_cloud_default".
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ("sink_name", "project_id", "gcp_conn_id", "impersonation_chain")
def __init__(
self,
sink_name: str,
project_id: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.sink_name = sink_name
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
"""Execute the operator."""
_validate_inputs(self, ["sink_name", "project_id"])
hook = CloudLoggingHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
try:
self.log.info("Deleting log sink '%s' from project '%s'", self.sink_name, self.project_id)
hook.delete_sink(sink_name=self.sink_name, project_id=self.project_id)
self.log.info("Log sink '%s' deleted successfully", self.sink_name)
except google.cloud.exceptions.NotFound as e:
self.log.error("An error occurred. Not Found.")
raise e
except google.cloud.exceptions.GoogleCloudError as e:
self.log.error("An error occurred. Exiting.")
raise e
| CloudLoggingDeleteSinkOperator |
python | psf__black | tests/data/cases/stub.py | {
"start": 207,
"end": 602
} | class ____:
attr: int
attr2: str
def f(self) -> int:
...
def g(self) -> str: ...
def g():
...
def h(): ...
if sys.version_info >= (3, 8):
class E:
def f(self): ...
class F:
def f(self): ...
class G: ...
class H: ...
else:
class I: ...
class J: ...
def f(): ...
class K:
def f(self): ...
def f(): ...
| A |
python | getsentry__sentry | src/sentry/db/postgres/schema.py | {
"start": 2885,
"end": 4781
} | class ____(DatabaseSchemaEditorMixin, PostgresDatabaseSchemaEditor):
add_field = translate_unsafeoperation_exception(PostgresDatabaseSchemaEditor.add_field)
alter_field = translate_unsafeoperation_exception(PostgresDatabaseSchemaEditor.alter_field)
alter_db_tablespace = translate_unsafeoperation_exception(
PostgresDatabaseSchemaEditor.alter_db_tablespace
)
def alter_db_table(self, model, old_db_table, new_db_table):
"""
This didn't work correctly in django_zero_downtime_migrations, so implementing here. This
method is only used to modify table name, so we just need to raise.
"""
raise UnsafeOperationException(
f"Renaming table for model {model.__name__} from {old_db_table} to {new_db_table} is unsafe.\n"
"More info here: https://develop.sentry.dev/database-migrations/#renaming-tables"
)
def delete_model(self, model, is_safe=False):
"""
It's never safe to delete a model using the standard migration process
"""
if not is_safe:
raise UnsafeOperationException(
f"Deleting the {model.__name__} model is unsafe.\n"
"More info here: https://develop.sentry.dev/database-migrations/#deleting-tables"
)
super(DatabaseSchemaEditorMixin, self).delete_model(model)
def remove_field(self, model, field, is_safe=False):
"""
It's never safe to remove a field using the standard migration process
"""
if not is_safe:
raise UnsafeOperationException(
f"Removing the {model.__name__}.{field.name} field is unsafe.\n"
"More info here: https://develop.sentry.dev/database-migrations/#deleting-columns"
)
super(DatabaseSchemaEditorMixin, self).remove_field(model, field)
| SafePostgresDatabaseSchemaEditor |
python | huggingface__transformers | src/transformers/models/vjepa2/modeling_vjepa2.py | {
"start": 37737,
"end": 42102
} | class ____(VJEPA2PreTrainedModel):
def __init__(self, config: VJEPA2Config):
super().__init__(config)
self.config = config
self.encoder = VJEPA2Encoder(config)
self.predictor = VJEPA2Predictor(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> VJEPA2PatchEmbeddings3D:
return self.encoder.embeddings.patch_embeddings
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values_videos: torch.Tensor,
context_mask: Optional[list[torch.Tensor]] = None,
target_mask: Optional[list[torch.Tensor]] = None,
skip_predictor: bool = False,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
**kwargs,
) -> VJEPA2WithMaskedInputModelOutput:
r"""
context_mask (`torch.Tensor` with shape `[batch_size, patch_size, 1]`, *optional*):
The mask position ids indicating which encoder output patches are going to be exposed to the predictor.
By default, this mask is created as torch.arange(N).unsqueeze(0).repeat(B,1), indicating full context
available to the predictor.
target_mask (`torch.Tensor` with shape `[batch_size, patch_size, 1]`, *optional*):
The mask position ids indicating which encoder output patches are going to be used as a prediction target
for the predictor. By default, this mask is created as torch.arange(N).unsqueeze(0).repeat(B,1), indicating
that the predictor should predict all encoder patches.
skip_predictor (bool):
flag to skip the predictor forward, useful if you just need the encoder outputs
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if pixel_values_videos is None:
raise ValueError("You have to specify pixel_values_videos")
encoder_outputs: BaseModelOutput = self.encoder(
pixel_values_videos=pixel_values_videos,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = encoder_outputs.last_hidden_state
if context_mask is None and target_mask is None:
B = pixel_values_videos.size(0)
N = sequence_output.size(1) # ensure we are using dynamic patch size
context_mask = [torch.arange(N, device=pixel_values_videos.device).unsqueeze(0).repeat((B, 1))]
target_mask = [torch.arange(N, device=pixel_values_videos.device).unsqueeze(0).repeat((B, 1))]
if not skip_predictor:
predictor_outputs: BaseModelOutput = self.predictor(
encoder_hidden_states=sequence_output,
context_mask=context_mask,
target_mask=target_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
predictor_output = VJEPA2WithMaskedInputPredictorOutput(
last_hidden_state=predictor_outputs.last_hidden_state,
target_hidden_state=apply_masks(sequence_output, target_mask),
hidden_states=predictor_outputs.hidden_states,
attentions=predictor_outputs.attentions,
)
else:
predictor_output = None
encoder_output = VJEPA2WithMaskedInputModelOutput(
last_hidden_state=sequence_output,
masked_hidden_state=apply_masks(sequence_output, context_mask),
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
predictor_output=predictor_output,
)
return encoder_output
def get_vision_features(self, pixel_values_videos) -> torch.Tensor:
encoder_output = self.forward(pixel_values_videos, skip_predictor=True)
return encoder_output.last_hidden_state
@auto_docstring(
custom_intro="""
V-JEPA 2 Model transformer with a video classification head on top (a linear layer on top of the attentive pooler).
"""
)
| VJEPA2Model |
python | walkccc__LeetCode | solutions/2146. K Highest Ranked Items Within a Price Range/2146.py | {
"start": 0,
"end": 1168
} | class ____:
def highestRankedKItems(
self,
grid: list[list[int]],
pricing: list[int],
start: list[int],
k: int
) -> list[list[int]]:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
m = len(grid)
n = len(grid[0])
low, high = pricing
row, col = start
ans = []
if low <= grid[row][col] <= high:
ans.append([row, col])
if k == 1:
return ans
q = collections.deque([(row, col)])
seen = {(row, col)} # Mark as visited.
while q:
neighbors = []
for _ in range(len(q)):
i, j = q.popleft()
for t in range(4):
x = i + DIRS[t][0]
y = j + DIRS[t][1]
if x < 0 or x == m or y < 0 or y == n:
continue
if not grid[x][y] or (x, y) in seen:
continue
if low <= grid[x][y] <= high:
neighbors.append([x, y])
q.append((x, y))
seen.add((x, y))
neighbors.sort(key=lambda x: (grid[x[0]][x[1]], x[0], x[1]))
for neighbor in neighbors:
if len(ans) < k:
ans.append(neighbor)
if len(ans) == k:
return ans
return ans
| Solution |
python | astropy__astropy | astropy/units/tests/test_quantity_erfa_ufuncs.py | {
"start": 19681,
"end": 24288
} | class ____:
@classmethod
def setup_class(cls):
# Values from apco13 and apio test cases in t_erfa_c.c
# with units changed so we can check conversion.
cls.utc1 = 2456384.5
cls.utc2 = 0.969254051
cls.dut1 = (0.1550675 * u.s).to(u.ms)
cls.sp = (-3.01974337e-11 * u.rad).to(u.deg)
cls.theta = (3.14540971 * u.rad).to(u.mdeg)
cls.elong = (-0.527800806 * u.rad).to(u.Marcsec)
cls.phi = (-1.2345856 * u.rad).to(u.arcmin)
cls.hm = (2738.0 * u.m).to(u.km)
cls.phpa = (731.0 * u.hPa).to(u.Pa)
cls.tc = (12.8 * u.deg_C).to(u.K, equivalencies=u.temperature())
cls.rh = (0.59 * u.one).to(u.percent)
cls.wl = (0.55 * u.micron).to(u.AA)
# For apio.
cls.xp = (2.47230737e-7 * u.rad).to(u.arcsec)
cls.yp = (1.82640464e-6 * u.rad).to(u.arcmin)
cls.refa = (0.000201418779 * u.rad).to(u.mas)
cls.refb = (-2.36140831e-7 * u.rad).to(u.uas)
cls.apco13_args = [
getattr(cls, name)
for name in [
"utc1",
"utc2",
"dut1",
"elong",
"phi",
"hm",
"xp",
"yp",
"phpa",
"tc",
"rh",
"wl",
]
]
cls.apio_args = [
getattr(cls, name)
for name in [
"sp",
"theta",
"elong",
"phi",
"hm",
"xp",
"yp",
"refa",
"refb",
]
]
def test_apco13(self):
astrom, eo, status = erfa_ufunc.apco13(*self.apco13_args)
assert status == 0
vvd(eo, -0.003020548354802412839, 1e-14, "eraApco13", "eo", status)
assert astrom.unit == ASTROM_UNIT
for name, expected in {
"pmt": 13.25248468622475727,
"eb": [-0.9741827107320875162,
-0.2115130190489716682,
-0.09179840189496755339],
"eh": [-0.9736425572586935247,
-0.2092452121603336166,
-0.09075578153885665295],
"em": 0.9998233240913898141,
"v": [0.2078704994520489246e-4,
-0.8955360133238868938e-4,
-0.3863338993055887398e-4],
"bm1": 0.9999999950277561004,
"bpn": np.array(
[[ 0.9999991390295147999,
0.4978650075315529277e-7,
0.001312227200850293372],
[-0.1136336652812486604e-7,
0.9999999995713154865,
-0.2928086230975367296e-4],
[-0.001312227201745553566,
0.2928082218847679162e-4,
0.9999991386008312212],
]).T,
"along": -0.5278008060295995733,
"xpl": 0.1133427418130752958e-5,
"ypl": 0.1453347595780646207e-5,
"sphi": -0.9440115679003211329,
"cphi": 0.3299123514971474711,
"diurab": 0,
"eral": 2.617608909189664000,
"refa": 0.2014187785940396921e-3,
"refb": -0.2361408314943696227e-6,
}.items(): # fmt: skip
assert_quantity_allclose(astrom[name], expected * ASTROM_UNIT[name])
def test_apco13_no_time_units(self):
msg = "cannot pass in units for 2-part time in apco13"
with pytest.raises(TypeError, match=msg):
erfa_ufunc.apco13(self.utc1 * u.day, *self.apco13_args[1:])
with pytest.raises(TypeError, match=msg):
erfa_ufunc.apco13(self.utc1, self.utc2 * u.day, *self.apco13_args[2:])
with pytest.raises(TypeError, match=msg):
erfa_ufunc.apco13(
self.utc1 * u.day, self.utc2 * u.day, *self.apco13_args[2:]
)
def test_apio(self):
astrom = erfa_ufunc.apio(*self.apio_args)
assert astrom.unit == ASTROM_UNIT
for name, expected in {
"along": -0.5278008060295995734,
"xpl": 0.1133427418130752958e-5,
"ypl": 0.1453347595780646207e-5,
"sphi": -0.9440115679003211329,
"cphi": 0.3299123514971474711,
"diurab": 0.5135843661699913529e-6,
"eral": 2.617608903970400427,
"refa": 0.2014187790000000000e-3,
"refb": -0.2361408310000000000e-6,
}.items():
assert_quantity_allclose(astrom[name], expected * ASTROM_UNIT[name])
| TestAp |
python | Pylons__pyramid | src/pyramid/authentication.py | {
"start": 5787,
"end": 12550
} | class ____(CallbackAuthenticationPolicy):
"""A :app:`Pyramid` :term:`authentication policy` which
obtains data from the :mod:`repoze.who` 1.X WSGI 'API' (the
``repoze.who.identity`` key in the WSGI environment).
Constructor Arguments
``identifier_name``
Default: ``auth_tkt``. The :mod:`repoze.who` plugin name that
performs remember/forget. Optional.
``callback``
Default: ``None``. A callback passed the :mod:`repoze.who` identity
and the :term:`request`, expected to return ``None`` if the user
represented by the identity doesn't exist or a sequence of principal
identifiers (possibly empty) representing groups if the user does
exist. If ``callback`` is None, the userid will be assumed to exist
with no group principals.
Objects of this class implement the interface described by
:class:`pyramid.interfaces.IAuthenticationPolicy`.
"""
def __init__(self, identifier_name='auth_tkt', callback=None):
self.identifier_name = identifier_name
self.callback = callback
def _get_identity(self, request):
return request.environ.get('repoze.who.identity')
def _get_identifier(self, request):
plugins = request.environ.get('repoze.who.plugins')
if plugins is None:
return None
identifier = plugins[self.identifier_name]
return identifier
def authenticated_userid(self, request):
"""Return the authenticated userid or ``None``.
If no callback is registered, this will be the same as
``unauthenticated_userid``.
If a ``callback`` is registered, this will return the userid if
and only if the callback returns a value that is not ``None``.
"""
identity = self._get_identity(request)
if identity is None:
self.debug and self._log(
'repoze.who identity is None, returning None',
'authenticated_userid',
request,
)
return None
userid = identity['repoze.who.userid']
if userid is None:
self.debug and self._log(
'repoze.who.userid is None, returning None' % userid,
'authenticated_userid',
request,
)
return None
if self._clean_principal(userid) is None:
self.debug and self._log(
(
'use of userid %r is disallowed by any built-in Pyramid '
'security policy, returning None' % userid
),
'authenticated_userid',
request,
)
return None
if self.callback is None:
return userid
if self.callback(identity, request) is not None: # is not None!
return userid
def unauthenticated_userid(self, request):
"""Return the ``repoze.who.userid`` key from the detected identity."""
identity = self._get_identity(request)
if identity is None:
return None
return identity['repoze.who.userid']
def effective_principals(self, request):
"""A list of effective principals derived from the identity.
This will return a list of principals including, at least,
:data:`pyramid.authorization.Everyone`. If there is no identity, or
the ``callback`` returns ``None``, this will be the only principal.
If the ``callback`` does not return ``None`` and an identity is
found, then the principals will include
:data:`pyramid.authorization.Authenticated`, the
``authenticated_userid`` and the list of principals returned by the
``callback``.
"""
effective_principals = [Everyone]
identity = self._get_identity(request)
if identity is None:
self.debug and self._log(
(
'repoze.who identity was None; returning %r'
% effective_principals
),
'effective_principals',
request,
)
return effective_principals
if self.callback is None:
groups = []
else:
groups = self.callback(identity, request)
if groups is None: # is None!
self.debug and self._log(
(
'security policy groups callback returned None; returning '
'%r' % effective_principals
),
'effective_principals',
request,
)
return effective_principals
userid = identity['repoze.who.userid']
if userid is None:
self.debug and self._log(
(
'repoze.who.userid was None; returning %r'
% effective_principals
),
'effective_principals',
request,
)
return effective_principals
if self._clean_principal(userid) is None:
self.debug and self._log(
(
'unauthenticated_userid returned disallowed %r; returning '
'%r as if it was None' % (userid, effective_principals)
),
'effective_principals',
request,
)
return effective_principals
effective_principals.append(Authenticated)
effective_principals.append(userid)
effective_principals.extend(groups)
return effective_principals
def remember(self, request, userid, **kw):
"""Store the ``userid`` as ``repoze.who.userid``.
The identity to authenticated to :mod:`repoze.who`
will contain the given userid as ``userid``, and
provide all keyword arguments as additional identity
keys. Useful keys could be ``max_age`` or ``userdata``.
"""
identifier = self._get_identifier(request)
if identifier is None:
return []
environ = request.environ
identity = kw
identity['repoze.who.userid'] = userid
return identifier.remember(environ, identity)
def forget(self, request):
"""Forget the current authenticated user.
Return headers that, if included in a response, will delete the
cookie responsible for tracking the current user.
"""
identifier = self._get_identifier(request)
if identifier is None:
return []
identity = self._get_identity(request)
return identifier.forget(request.environ, identity)
@implementer(IAuthenticationPolicy)
| RepozeWho1AuthenticationPolicy |
python | PyCQA__pylint | tests/functional/u/unused/unused_variable.py | {
"start": 672,
"end": 5288
} | class ____:
def test(self):
__class__ = 42 # [unused-variable]
def best(self):
self.test()
def locals_example_defined_before():
value = 42 # [possibly-unused-variable]
return locals()
def locals_example_defined_after():
local_variables = locals()
value = 42 # [unused-variable]
return local_variables
def locals_does_not_account_for_subscopes():
value = 42 # [unused-variable]
def some_other_scope():
return locals()
return some_other_scope
def unused_import_from():
from functools import wraps as abc # [unused-import]
from collections import namedtuple # [unused-import]
def unused_import_in_function(value):
from string import digits, hexdigits # [unused-import]
return value if value in digits else "Nope"
def hello(arg):
my_var = 'something' # [unused-variable]
if arg:
return True
raise Exception
# pylint: disable=wrong-import-position
PATH = OS = collections = deque = None
def function(matches):
""""yo"""
aaaa = 1 # [unused-variable]
index = -1
for match in matches:
index += 1
print(match)
from astroid import nodes
def visit_if(self, node: nodes.If) -> None:
"""increments the branches counter"""
branches = 1
# don't double count If nodes coming from some 'elif'
if node.orelse and len(node.orelse) > 1:
branches += 1
self.inc_branch(branches)
self.stmts += branches
def test_global():
""" Test various assignments of global
variables through imports.
"""
# pylint: disable=redefined-outer-name
global PATH, OS, collections, deque # [global-statement]
from os import path as PATH # [unused-import]
import os as OS # [unused-import]
import collections # [unused-import]
from collections import deque # [unused-import]
# make sure that these triggers unused-variable
from sys import platform # [unused-import]
from sys import version as VERSION # [unused-import]
import this # [unused-import]
import re as RE # [unused-import]
# test cases that include exceptions
def function2():
unused = 1 # [unused-variable]
try:
1 / 0
except ZeroDivisionError as error:
try:
1 / 0
except ZeroDivisionError as error: # [redefined-outer-name]
raise Exception("") from error
def func():
try:
1 / 0
except ZeroDivisionError as error:
try:
1 / 0
except error:
print("error")
def func2():
try:
1 / 0
except ZeroDivisionError as error:
try:
1 / 0
except:
raise Exception("") from error
def func3():
try:
1 / 0
except ZeroDivisionError as error:
print(f"{error}")
try:
1 / 2
except TypeError as error: # [unused-variable, redefined-outer-name]
print("warning")
def func4():
try:
1 / 0
except ZeroDivisionError as error: # [unused-variable]
try:
1 / 0
except ZeroDivisionError as error: # [redefined-outer-name]
print("error")
def main(lst):
"""https://github.com/pylint-dev/astroid/pull/1111#issuecomment-890367609"""
try:
raise ValueError
except ValueError as e: # [unused-variable]
pass
for e in lst:
pass
# e will be undefined if lst is empty
print(e) # [undefined-loop-variable]
main([])
def func5():
"""No unused-variable for a container if iterated in comprehension"""
x = []
# Test case requires homonym between "for x" and "in x"
assert [True for x in x]
def sibling_except_handlers():
try:
pass
except ValueError as e:
print(e)
try:
pass
except ValueError as e:
print(e)
def test_multiple_except_handlers_under_try():
try:
pass
except TypeError as exc:
print(exc)
except ValueError as exc:
print(exc)
except ArithmeticError as exc:
print(exc)
def func6():
a = 1
def nonlocal_writer():
nonlocal a
for a in range(10):
pass
nonlocal_writer()
assert a == 9, a
def test_regression_8595():
# pylint: disable=broad-exception-caught
import logging
def compute():
pass
try:
compute()
error = False
except Exception as e:
logging.error(e)
error = True
if error:
try:
compute()
except Exception as e: # [unused-variable]
pass
| HasUnusedDunderClass |
python | PrefectHQ__prefect | src/prefect/server/orchestration/core_policy.py | {
"start": 44948,
"end": 45897
} | class ____(TaskRunOrchestrationRule):
"""
Ensures a task's parameters ID is copied from Scheduled to Pending and from
Pending to Running states.
If a parameters ID has been included on the proposed state, the parameters ID
on the initial state will be ignored.
"""
FROM_STATES = {StateType.SCHEDULED, StateType.PENDING}
TO_STATES = {StateType.PENDING, StateType.RUNNING}
async def before_transition(
self,
initial_state: states.State[Any] | None,
proposed_state: states.State[Any] | None,
context: OrchestrationContext[orm_models.TaskRun, core.TaskRunPolicy],
) -> None:
if initial_state is None or proposed_state is None:
return
if not proposed_state.state_details.task_parameters_id:
proposed_state.state_details.task_parameters_id = (
initial_state.state_details.task_parameters_id
)
| CopyTaskParametersID |
python | wandb__wandb | wandb/vendor/pygments/lexers/ruby.py | {
"start": 18321,
"end": 22141
} | class ____(RegexLexer):
"""
Pygments Lexer For `Fancy <http://www.fancy-lang.org/>`_.
Fancy is a self-hosted, pure object-oriented, dynamic,
class-based, concurrent general-purpose programming language
running on Rubinius, the Ruby VM.
.. versionadded:: 1.5
"""
name = 'Fancy'
filenames = ['*.fy', '*.fancypack']
aliases = ['fancy', 'fy']
mimetypes = ['text/x-fancysrc']
tokens = {
# copied from PerlLexer:
'balanced-regex': [
(r'/(\\\\|\\/|[^/])*/[egimosx]*', String.Regex, '#pop'),
(r'!(\\\\|\\!|[^!])*![egimosx]*', String.Regex, '#pop'),
(r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'),
(r'\{(\\\\|\\\}|[^}])*\}[egimosx]*', String.Regex, '#pop'),
(r'<(\\\\|\\>|[^>])*>[egimosx]*', String.Regex, '#pop'),
(r'\[(\\\\|\\\]|[^\]])*\][egimosx]*', String.Regex, '#pop'),
(r'\((\\\\|\\\)|[^)])*\)[egimosx]*', String.Regex, '#pop'),
(r'@(\\\\|\\@|[^@])*@[egimosx]*', String.Regex, '#pop'),
(r'%(\\\\|\\%|[^%])*%[egimosx]*', String.Regex, '#pop'),
(r'\$(\\\\|\\\$|[^$])*\$[egimosx]*', String.Regex, '#pop'),
],
'root': [
(r'\s+', Text),
# balanced delimiters (copied from PerlLexer):
(r's\{(\\\\|\\\}|[^}])*\}\s*', String.Regex, 'balanced-regex'),
(r's<(\\\\|\\>|[^>])*>\s*', String.Regex, 'balanced-regex'),
(r's\[(\\\\|\\\]|[^\]])*\]\s*', String.Regex, 'balanced-regex'),
(r's\((\\\\|\\\)|[^)])*\)\s*', String.Regex, 'balanced-regex'),
(r'm?/(\\\\|\\/|[^/\n])*/[gcimosx]*', String.Regex),
(r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'),
# Comments
(r'#(.*?)\n', Comment.Single),
# Symbols
(r'\'([^\'\s\[\](){}]+|\[\])', String.Symbol),
# Multi-line DoubleQuotedString
(r'"""(\\\\|\\"|[^"])*"""', String),
# DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"', String),
# keywords
(r'(def|class|try|catch|finally|retry|return|return_local|match|'
r'case|->|=>)\b', Keyword),
# constants
(r'(self|super|nil|false|true)\b', Name.Constant),
(r'[(){};,/?|:\\]', Punctuation),
# names
(words((
'Object', 'Array', 'Hash', 'Directory', 'File', 'Class', 'String',
'Number', 'Enumerable', 'FancyEnumerable', 'Block', 'TrueClass',
'NilClass', 'FalseClass', 'Tuple', 'Symbol', 'Stack', 'Set',
'FancySpec', 'Method', 'Package', 'Range'), suffix=r'\b'),
Name.Builtin),
# functions
(r'[a-zA-Z](\w|[-+?!=*/^><%])*:', Name.Function),
# operators, must be below functions
(r'[-+*/~,<>=&!?%^\[\].$]+', Operator),
('[A-Z]\w*', Name.Constant),
('@[a-zA-Z_]\w*', Name.Variable.Instance),
('@@[a-zA-Z_]\w*', Name.Variable.Class),
('@@?', Operator),
('[a-zA-Z_]\w*', Name),
# numbers - / checks are necessary to avoid mismarking regexes,
# see comment in RubyLexer
(r'(0[oO]?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?',
bygroups(Number.Oct, Text, Operator)),
(r'(0[xX][0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?',
bygroups(Number.Hex, Text, Operator)),
(r'(0[bB][01]+(?:_[01]+)*)(\s*)([/?])?',
bygroups(Number.Bin, Text, Operator)),
(r'([\d]+(?:_\d+)*)(\s*)([/?])?',
bygroups(Number.Integer, Text, Operator)),
(r'\d+([eE][+-]?[0-9]+)|\d+\.\d+([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer)
]
}
| FancyLexer |
python | wandb__wandb | wandb/sdk/artifacts/_generated/update_artifact_sequence_type.py | {
"start": 361,
"end": 618
} | class ____(GQLResult):
artifact_collection: Optional[ArtifactCollectionFragment] = Field(
alias="artifactCollection"
)
UpdateArtifactSequenceType.model_rebuild()
UpdateArtifactSequenceTypeResult.model_rebuild()
| UpdateArtifactSequenceTypeResult |
python | numba__numba | numba/cuda/tests/cudapy/test_complex.py | {
"start": 4588,
"end": 5251
} | class ____(BaseComplexTest):
def check_real_image(self, pyfunc):
values = self.basic_values()
self.run_unary(pyfunc,
[tp.underlying_float(tp)
for tp in (types.complex64, types.complex128)],
values)
def test_real(self):
self.check_real_image(real_usecase)
def test_imag(self):
self.check_real_image(imag_usecase)
def test_conjugate(self):
pyfunc = conjugate_usecase
values = self.basic_values()
self.run_unary(pyfunc,
[types.complex64, types.complex128],
values)
| TestComplex |
python | huggingface__transformers | tests/models/mimi/test_modeling_mimi.py | {
"start": 5421,
"end": 13418
} | class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (MimiModel,) if is_torch_available() else ()
is_encoder_decoder = True
test_resize_embeddings = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
# model does support returning hidden states
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if "output_attentions" in inputs_dict:
inputs_dict.pop("output_attentions")
if "output_hidden_states" in inputs_dict:
inputs_dict.pop("output_hidden_states")
return inputs_dict
def setUp(self):
self.model_tester = MimiModelTester(self)
self.config_tester = ConfigTester(
self, config_class=MimiConfig, hidden_size=37, common_properties=[], has_text_modality=False
)
def test_config(self):
self.config_tester.run_common_tests()
def test_model_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_forward(*config_and_inputs)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["input_values", "padding_mask", "num_quantizers"]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
@unittest.skip(reason="The MimiModel does not have `inputs_embeds` logics")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="The MimiModel does not have `inputs_embeds` logics")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="The MimiModel does not have the usual `attention` logic")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="The MimiModel does not have the usual `attention` logic")
def test_attention_outputs(self):
pass
@unittest.skip(reason="The MimiModel does not have the usual `hidden_states` logic")
def test_hidden_states_output(self):
pass
# Copied from transformers.tests.encodec.test_modeling_encodec.MimiModelTest.test_determinism
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_determinism(first, second):
# outputs are not tensors but list (since each sequence don't have the same frame_length)
out_1 = first.cpu().numpy()
out_2 = second.cpu().numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
first = model(**self._prepare_for_class(inputs_dict, model_class))[0]
second = model(**self._prepare_for_class(inputs_dict, model_class))[0]
if isinstance(first, tuple) and isinstance(second, tuple):
for tensor1, tensor2 in zip(first, second):
check_determinism(tensor1, tensor2)
else:
check_determinism(first, second)
# Copied from transformers.tests.encodec.test_modeling_encodec.MimiModelTest.test_model_outputs_equivalence
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs)
self.assertTrue(isinstance(tuple_output, tuple))
self.assertTrue(isinstance(dict_output, dict))
for tuple_value, dict_value in zip(tuple_output, dict_output.values()):
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_value), set_nan_tensor_to_zero(dict_value), atol=1e-5
),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_value - dict_value))}. Tuple has `nan`:"
f" {torch.isnan(tuple_value).any()} and `inf`: {torch.isinf(tuple_value)}. Dict has"
f" `nan`: {torch.isnan(dict_value).any()} and `inf`: {torch.isinf(dict_value)}."
),
)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
# Copied from transformers.tests.encodec.test_modeling_encodec.MimiModelTest.test_identity_shortcut
def test_identity_shortcut(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
config.use_conv_shortcut = False
self.model_tester.create_and_check_model_forward(config, inputs_dict)
@require_flash_attn
@require_torch_gpu
@mark.flash_attn_test
@slow
@is_flaky()
def test_flash_attn_2_inference_equivalence(self):
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_fa = model_class.from_pretrained(
tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2"
)
model_fa.to(torch_device)
model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16)
model.to(torch_device)
dummy_input = inputs_dict[model.main_input_name][:1]
if dummy_input.dtype in [torch.float32, torch.float16]:
dummy_input = dummy_input.to(torch.bfloat16)
outputs = model(dummy_input)
outputs_fa = model_fa(dummy_input)
logits = outputs[1]
logits_fa = outputs_fa[1]
assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2)
@unittest.skip(reason="The MimiModel does not support right padding")
def test_flash_attn_2_inference_equivalence_right_padding(self):
pass
@unittest.skip(reason="The MimiModel does not have support dynamic compile yet")
@pytest.mark.torch_compile_test
def test_sdpa_can_compile_dynamic(self):
pass
# Copied from transformers.tests.encodec.test_modeling_encodec.normalize
def normalize(arr):
norm = np.linalg.norm(arr)
normalized_arr = arr / norm
return normalized_arr
# Copied from transformers.tests.encodec.test_modeling_encodec.compute_rmse
def compute_rmse(arr1, arr2):
arr1_normalized = normalize(arr1)
arr2_normalized = normalize(arr2)
return np.sqrt(((arr1_normalized - arr2_normalized) ** 2).mean())
@slow
@require_torch
| MimiModelTest |
python | apache__airflow | providers/discord/tests/unit/discord/operators/test_discord_webhook.py | {
"start": 1028,
"end": 2161
} | class ____:
_config = {
"http_conn_id": "discord-webhook-default",
"webhook_endpoint": "webhooks/11111/some-discord-token_111",
"message": "your message here",
"username": "Airflow Webhook",
"avatar_url": "https://static-cdn.avatars.com/my-avatar-path",
"tts": False,
"proxy": "https://proxy.proxy.com:8888",
}
def setup_method(self):
args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
def test_execute(self):
operator = DiscordWebhookOperator(task_id="discord_webhook_task", dag=self.dag, **self._config)
assert self._config["http_conn_id"] == operator.http_conn_id
assert self._config["webhook_endpoint"] == operator.webhook_endpoint
assert self._config["message"] == operator.message
assert self._config["username"] == operator.username
assert self._config["avatar_url"] == operator.avatar_url
assert self._config["tts"] == operator.tts
assert self._config["proxy"] == operator.proxy
| TestDiscordWebhookOperator |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.