language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | cython__cython | Tools/cevaltrace.py | {
"start": 851,
"end": 4952
} | class ____(Exception):
def __init__(self, message="Failed to parse ceval.c"):
super(ParseError, self).__init__(message)
def parse_ceval(file_path):
snippets = {}
with open(file_path) as f:
lines = iter(f)
for line in lines:
if _is_start(line):
break
else:
raise ParseError()
targets = []
code_lines = []
for line in lines:
target_match = _match_target(line)
if target_match:
if code_lines:
code = ''.join(code_lines).rstrip()
for target in targets:
snippets[target] = code
del code_lines[:], targets[:]
targets.append(target_match.group(1))
elif _ignored(line):
pass
elif _is_end(line):
break
else:
code_lines.append(line)
else:
if not snippets:
raise ParseError()
return snippets
def translate(func, ceval_snippets):
start_offset = 0
code_obj = getattr(func, '__code__', None)
if code_obj and os.path.exists(code_obj.co_filename):
start_offset = code_obj.co_firstlineno
with open(code_obj.co_filename) as f:
code_line_at = {
i: line.strip()
for i, line in enumerate(f, 1)
if line.strip()
}.get
else:
code_line_at = lambda _: None
for instr in get_instructions(func):
code_line = code_line_at(instr.starts_line)
line_no = (instr.starts_line or start_offset) - start_offset
yield line_no, code_line, instr, ceval_snippets.get(instr.opname)
def main():
import sys
import importlib.util
if len(sys.argv) < 3:
print("Usage: %s path/to/Python/ceval.c script.py ..." % sys.argv[0], file=sys.stderr)
return
ceval_source_file = sys.argv[1]
version_header = os.path.join(os.path.dirname(ceval_source_file), '..', 'Include', 'patchlevel.h')
if os.path.exists(version_header):
with open(version_header) as f:
py_version = _find_pyversion(f.read())
if py_version:
py_version = py_version[0]
if not sys.version.startswith(py_version + ' '):
print("Warning: disassembling with Python %s, but ceval.c has version %s" % (
sys.version.split(None, 1)[0],
py_version,
), file=sys.stderr)
snippets = parse_ceval(ceval_source_file)
for code in _COLLAPSE:
if code in snippets:
snippets[code] = ''
for file_path in sys.argv[2:]:
module_name = os.path.basename(file_path)
print("/*######## MODULE %s ########*/" % module_name)
print('')
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
for func_name, item in sorted(vars(module).items()):
if not callable(item):
continue
print("/* FUNCTION %s */" % func_name)
print("static void") # assuming that it highlights in editors
print("%s() {" % func_name)
last_line = None
for line_no, code_line, instr, snippet in translate(item, snippets):
if last_line != line_no:
if code_line:
print('')
print('/*# %3d %s */' % (line_no, code_line))
print('')
last_line = line_no
print(" %s:%s {%s" % (
instr.opname,
' /* %s */' % instr.argrepr if instr.arg is not None else '',
' /* ??? */' if snippet is None else ' /* ... */ }' if snippet == '' else '',
))
print(snippet or '')
print("} /* FUNCTION %s */" % func_name)
if __name__ == '__main__':
main()
| ParseError |
python | ray-project__ray | release/nightly_tests/multimodal_inference_benchmarks/image_classification/ray_data_main.py | {
"start": 1535,
"end": 3762
} | class ____:
def __init__(self):
self.weights = weights
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = resnet18(weights=self.weights).to(self.device)
self.model.eval()
def __call__(self, batch):
torch_batch = torch.from_numpy(batch["norm_image"]).to(self.device)
# NOTE: Remove the `norm_image` column since we don't need it anymore. This is
# done by the system automatically on Ray Data 2.51+ with the `with_column`
# API.
del batch["norm_image"]
with torch.inference_mode():
prediction = self.model(torch_batch)
predicted_classes = prediction.argmax(dim=1).detach().cpu()
predicted_labels = [
self.weights.meta["categories"][i] for i in predicted_classes
]
batch["label"] = predicted_labels
return batch
start_time = time.time()
# You can use `download` on Ray 2.50+.
if version.parse(ray.__version__) > version.parse("2.49.2"):
ds = (
ray.data.read_parquet(INPUT_PATH)
# NOTE: Limit to the 803,580 images Daft uses in their benchmark.
.limit(803_580)
.with_column("bytes", download("image_url"))
.map(fn=deserialize_image)
.map(fn=transform_image)
.map_batches(
fn=ResNetActor,
batch_size=BATCH_SIZE,
num_gpus=1.0,
concurrency=NUM_GPU_NODES,
)
.select_columns(["image_url", "label"])
)
ds.write_parquet(OUTPUT_PATH)
else:
# NOTE: Limit to the 803,580 images Daft uses in their benchmark.
paths = ray.data.read_parquet(INPUT_PATH).limit(803_580).take_all()
paths = [row["image_url"] for row in paths]
ds = (
ray.data.read_images(
paths, include_paths=True, ignore_missing_paths=True, mode="RGB"
)
.map(fn=transform_image)
.map_batches(
fn=ResNetActor,
batch_size=BATCH_SIZE,
num_gpus=1.0,
concurrency=NUM_GPU_NODES,
)
.select_columns(["path", "label"])
)
ds.write_parquet(OUTPUT_PATH)
print("Runtime:", time.time() - start_time)
| ResNetActor |
python | apache__avro | lang/py/avro/protocol.py | {
"start": 1510,
"end": 5889
} | class ____:
"""
Avro protocols describe RPC interfaces. Like schemas, they are defined with JSON text.
A protocol is a JSON object with the following attributes:
- protocol, a string, the name of the protocol (required);
- namespace, an optional string that qualifies the name;
- doc, an optional string describing this protocol;
- types, an optional list of definitions of named types (records, enums, fixed and errors). An error definition is just like a record definition except it uses "error" instead of "record". Note that forward references to named types are not permitted.
- messages, an optional JSON object whose keys are message names and whose values are objects whose attributes are described below. No two messages may have the same name.
The name and namespace qualification rules defined for schema objects apply to protocols as well.
"""
__slots__ = [
"_md5",
"_messages",
"_name",
"_namespace",
"_types",
"_validate_names",
]
_md5: bytes
_messages: Optional[Mapping[str, "Message"]]
_name: str
_namespace: Optional[str]
_types: Optional[Sequence[avro.schema.NamedSchema]]
_validate_names: bool
def __init__(
self,
name: str,
namespace: Optional[str] = None,
types: Optional[Sequence[str]] = None,
messages: Optional[Mapping[str, "MessageObject"]] = None,
validate_names: bool = True,
) -> None:
if not name:
raise avro.errors.ProtocolParseException("Protocols must have a non-empty name.")
if not isinstance(name, str):
raise avro.errors.ProtocolParseException("The name property must be a string.")
if not (namespace is None or isinstance(namespace, str)):
raise avro.errors.ProtocolParseException("The namespace property must be a string.")
if not (types is None or isinstance(types, list)):
raise avro.errors.ProtocolParseException("The types property must be a list.")
if not (messages is None or callable(getattr(messages, "get", None))):
raise avro.errors.ProtocolParseException("The messages property must be a JSON object.")
self._validate_names = validate_names
type_names = avro.name.Names(validate_names=self._validate_names)
self._name = name
self._namespace = type_names.default_namespace = namespace
self._types = _parse_types(types, type_names, self._validate_names) if types else None
self._messages = _parse_messages(messages, type_names, self._validate_names) if messages else None
self._md5 = hashlib.md5(str(self).encode()).digest()
@property
def name(self) -> str:
return self._name
@property
def namespace(self) -> Optional[str]:
return self._namespace
@property
def fullname(self) -> Optional[str]:
return avro.name.Name(self.name, self.namespace, None, validate_name=self._validate_names).fullname
@property
def types(self) -> Optional[Sequence[avro.schema.NamedSchema]]:
return self._types
@property
def types_dict(self) -> Optional[Mapping[str, avro.schema.NamedSchema]]:
return None if self.types is None else {type_.name: type_ for type_ in self.types}
@property
def messages(self) -> Optional[Mapping[str, "Message"]]:
return self._messages
@property
def md5(self) -> bytes:
return self._md5
def to_json(self) -> Mapping[str, Union[str, Sequence[object], Mapping[str, "MessageObject"]]]:
names = avro.name.Names(default_namespace=self.namespace, validate_names=self._validate_names)
return {
"protocol": self.name,
**({"namespace": self.namespace} if self.namespace else {}),
**({"types": [t.to_json(names) for t in self.types]} if self.types else {}),
**({"messages": {name: body.to_json(names) for name, body in self.messages.items()}} if self.messages else {}),
}
def __str__(self) -> str:
return json.dumps(self.to_json())
def __eq__(self, that: object) -> bool:
this_ = json.loads(str(self))
try:
that_ = json.loads(str(that))
except json.decoder.JSONDecodeError:
return False
return cast(bool, this_ == that_)
| Protocol |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/output_manager.py | {
"start": 454,
"end": 1918
} | class ____(ResourceDefinition, IOutputManagerDefinition):
"""Definition of an output manager resource.
An OutputManagerDefinition is a :py:class:`ResourceDefinition` whose resource_fn returns an
:py:class:`OutputManager`. OutputManagers are used to handle the outputs of solids.
"""
def __init__(
self,
resource_fn=None,
config_schema=None,
description=None,
output_config_schema=None,
required_resource_keys=None,
version=None,
):
self._output_config_schema = convert_user_facing_definition_config_schema(
output_config_schema
)
super().__init__(
resource_fn=resource_fn, # pyright: ignore[reportArgumentType]
config_schema=config_schema,
description=description,
required_resource_keys=required_resource_keys,
version=version,
)
@property
def output_config_schema(self): # pyright: ignore[reportIncompatibleMethodOverride]
return self._output_config_schema
def copy_for_configured(self, description, config_schema):
return OutputManagerDefinition(
config_schema=config_schema,
description=description or self.description,
resource_fn=self.resource_fn,
required_resource_keys=self.required_resource_keys,
output_config_schema=self.output_config_schema,
)
| OutputManagerDefinition |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/execution_tests/pipes_tests/in_process_client.py | {
"start": 1613,
"end": 1874
} | class ____(PipesParamsLoader):
def load_context_params(self) -> PipesParams:
return {}
def load_messages_params(self) -> PipesParams:
return {}
def is_dagster_pipes_process(self) -> bool:
return True
| InProcessPipesParamLoader |
python | wandb__wandb | wandb/vendor/pygments/formatters/other.py | {
"start": 505,
"end": 927
} | class ____(Formatter):
"""
Output the text unchanged without any formatting.
"""
name = 'Text only'
aliases = ['text', 'null']
filenames = ['*.txt']
def format(self, tokensource, outfile):
enc = self.encoding
for ttype, value in tokensource:
if enc:
outfile.write(value.encode(enc))
else:
outfile.write(value)
| NullFormatter |
python | gevent__gevent | src/gevent/_config.py | {
"start": 589,
"end": 3869
} | class ____(type):
# pylint:disable=bad-mcs-classmethod-argument
def __new__(cls, name, bases, cls_dict):
if name == 'Setting':
return type.__new__(cls, name, bases, cls_dict)
cls_dict["order"] = len(ALL_SETTINGS)
if 'name' not in cls_dict:
cls_dict['name'] = name.lower()
if 'environment_key' not in cls_dict:
cls_dict['environment_key'] = 'GEVENT_' + cls_dict['name'].upper()
new_class = type.__new__(cls, name, bases, cls_dict)
new_class.fmt_desc(cls_dict.get("desc", ""))
new_class.__doc__ = new_class.desc
ALL_SETTINGS.append(new_class)
if new_class.document:
setting_name = cls_dict['name']
def getter(self):
return self.settings[setting_name].get()
def setter(self, value): # pragma: no cover
# The setter should never be hit, Config has a
# __setattr__ that would override. But for the sake
# of consistency we provide one.
self.settings[setting_name].set(value)
prop = property(getter, setter, doc=new_class.__doc__)
setattr(Config, cls_dict['name'], prop)
return new_class
def fmt_desc(cls, desc):
desc = textwrap.dedent(desc).strip()
if hasattr(cls, 'shortname_map'):
desc += (
"\n\nThis is an importable value. It can be "
"given as a string naming an importable object, "
"or a list of strings in preference order and the first "
"successfully importable object will be used. (Separate values "
"in the environment variable with commas.) "
"It can also be given as the callable object itself (in code). "
)
if cls.shortname_map:
desc += "Shorthand names for default objects are %r" % (list(cls.shortname_map),)
if getattr(cls.validate, '__doc__'):
desc += '\n\n' + textwrap.dedent(cls.validate.__doc__).strip()
if isinstance(cls.default, str) and hasattr(cls, 'shortname_map'):
default = "`%s`" % (cls.default,)
else:
default = "`%r`" % (cls.default,)
desc += "\n\nThe default value is %s" % (default,)
desc += ("\n\nThe environment variable ``%s`` "
"can be used to control this." % (cls.environment_key,))
setattr(cls, "desc", desc)
return desc
def validate_invalid(value):
raise ValueError("Not a valid value: %r" % (value,))
def validate_bool(value):
"""
This is a boolean value.
In the environment variable, it may be given as ``1``, ``true``,
``on`` or ``yes`` for `True`, or ``0``, ``false``, ``off``, or
``no`` for `False`.
"""
if isinstance(value, string_types):
value = value.lower().strip()
if value in ('1', 'true', 'on', 'yes'):
value = True
elif value in ('0', 'false', 'off', 'no') or not value:
value = False
else:
raise ValueError("Invalid boolean string: %r" % (value,))
return bool(value)
def validate_anything(value):
return value
convert_str_value_as_is = validate_anything
| SettingType |
python | kamyu104__LeetCode-Solutions | Python/shortest-distance-to-target-color.py | {
"start": 29,
"end": 1028
} | class ____(object):
def shortestDistanceColor(self, colors, queries):
"""
:type colors: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
dp = [[-1 for _ in xrange(len(colors))] for _ in xrange(3)]
dp[colors[0]-1][0] = 0
for i in xrange(1, len(colors)):
for color in xrange(3):
dp[color][i] = dp[color][i-1]
dp[colors[i]-1][i] = i
dp[colors[len(colors)-1]-1][len(colors)-1] = len(colors)-1
for i in reversed(xrange(len(colors)-1)):
for color in xrange(3):
if dp[color][i+1] == -1:
continue
if dp[color][i] == -1 or \
abs(dp[color][i+1]-i) < abs(dp[color][i]-i):
dp[color][i] = dp[color][i+1]
dp[colors[i]-1][i] = i
return [abs(dp[color-1][i]-i) if dp[color-1][i] != -1 else -1 \
for i, color in queries]
| Solution |
python | allegroai__clearml | examples/hyperdatasets/finetune_qa_lora.py | {
"start": 6938,
"end": 19607
} | class ____(IterableDataset):
"""Wrap an iterable dataset and stop after yielding ``max_items`` samples."""
def __init__(self, dataset: IterableDataset, max_items: Optional[int] = None) -> None:
self._dataset = dataset
self._max_items = max_items
def __iter__(self) -> Iterable[Dict[str, torch.Tensor]]:
if self._max_items is None:
yield from self._dataset
return
for idx, sample in enumerate(self._dataset):
if idx >= self._max_items:
break
yield sample
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Fine-tune a causal LM using HyperDataset Q&A entries with LoRA (Trainer variant)"
)
name_group = parser.add_argument_group("Name-based selection")
name_group.add_argument("--project", default=None, help="ClearML project containing the HyperDataset")
name_group.add_argument("--dataset-name", default=None, help="HyperDataset collection name")
name_group.add_argument("--version-name", default=None, help="HyperDataset version name")
id_group = parser.add_argument_group("ID-based selection")
id_group.add_argument("--dataset-id", default=None, help="HyperDataset collection id")
id_group.add_argument("--version-id", default=None, help="HyperDataset version id")
parser.add_argument("--model", default="Qwen/Qwen2.5-0.5B-Instruct", help="Base causal LM to fine-tune")
parser.add_argument("--batch-size", type=int, default=2, help="Per-device training batch size")
parser.add_argument("--max-length", type=int, default=1024, help="Maximum sequence length")
parser.add_argument("--learning-rate", type=float, default=5e-5, help="Optimizer learning rate")
parser.add_argument("--weight-decay", type=float, default=0.0, help="Weight decay value")
parser.add_argument("--max-steps", type=int, default=100, help="Total optimization steps (set -1 to disable)")
parser.add_argument("--warmup-steps", type=int, default=10, help="Scheduler warmup steps")
parser.add_argument("--gradient-accumulation", type=int, default=1, help="Gradient accumulation steps")
parser.add_argument("--num-workers", type=int, default=0, help="DataLoader worker processes")
parser.add_argument("--max-epochs", type=int, default=3, help="Maximum passes over the dataset")
parser.add_argument(
"--prompt-template", default="### Question:\n{question}\n\n### Answer:\n{answer}\n", help="Prompt template"
)
parser.add_argument("--output-dir", default=None, help="Where to store the LoRA adapters after training")
parser.add_argument("--device", default=None, help="Torch device override (must be 'cuda' or omitted)")
parser.add_argument("--seed", type=int, default=42, help="Random seed")
parser.add_argument("--log-interval", type=int, default=10, help="Trainer logging interval (in optimizer steps)")
parser.add_argument("--lora-r", type=int, default=8, help="LoRA rank")
parser.add_argument("--lora-alpha", type=float, default=32.0, help="LoRA alpha scaling")
parser.add_argument("--lora-dropout", type=float, default=0.05, help="LoRA dropout probability")
parser.add_argument(
"--lora-target-modules",
nargs="+",
default=["q_proj", "k_proj", "v_proj", "o_proj"],
help="LoRA target modules within the attention blocks",
)
parser.add_argument(
"--save-model",
action="store_true",
help="Merge LoRA adapters into the base model and upload it as a ClearML OutputModel",
)
# ClearML / distributed
parser.add_argument("--queue", type=str, default=None, help="ClearML queue to enqueue to (runs remotely)")
parser.add_argument("--num-nodes", type=int, default=1, help="Total number of nodes")
parser.add_argument("--devices-per-node", type=int, default=-1, help="-1: all visible GPUs; else number")
parser.add_argument("--dist-port", type=int, default=29500, help="Rendezvous port (local multi-GPU)")
# Eval dataset (optional)
parser.add_argument("--skip-eval", action="store_true", help="Skip evaluation after training")
parser.add_argument("--eval-batch-limit", type=int, default=None, help="Evaluate on at most this many batches")
parser.add_argument(
"--eval-project", default=None, help="Project for the evaluation dataset (defaults to training project)"
)
parser.add_argument("--eval-dataset-name", default=None, help="Name of the evaluation dataset")
parser.add_argument("--eval-version-name", default=None, help="Version name of the evaluation dataset")
parser.add_argument("--eval-dataset-id", default=None, help="ID of the evaluation dataset")
parser.add_argument("--eval-version-id", default=None, help="Version ID of the evaluation dataset")
return parser.parse_args()
def _build_datasets(
args: argparse.Namespace,
tokenizer,
query_kwargs: Dict[str, str],
eval_query_kwargs: Optional[Dict[str, str]],
) -> tuple[IterableDataset, Optional[IterableDataset]]:
train_dataset = QALoraIterableDataset(
query_kwargs=query_kwargs,
tokenizer=tokenizer,
max_length=args.max_length,
prompt_template=args.prompt_template,
dist_world_size=1,
dist_rank=0,
)
if args.skip_eval or eval_query_kwargs is None:
return train_dataset, None
eval_dataset: IterableDataset = QALoraIterableDataset(
query_kwargs=eval_query_kwargs,
tokenizer=tokenizer,
max_length=args.max_length,
prompt_template=args.prompt_template,
dist_world_size=1,
dist_rank=0,
)
if args.eval_batch_limit:
max_items = args.eval_batch_limit * args.batch_size
eval_dataset = _LimitedIterableDataset(eval_dataset, max_items=max_items)
return train_dataset, eval_dataset
def _trainer_process_entry(
local_rank: int,
args: argparse.Namespace,
task_id: str,
node_rank: int,
gpus_per_node: int,
world_size: int,
) -> None:
global_rank = int(os.environ.get("RANK", "0"))
if world_size > 1:
os.environ["LOCAL_RANK"] = str(local_rank)
if args.device and args.device.lower() not in {None, "cuda"}:
raise ValueError("Only CUDA devices are supported in this script. Omit --device or set it to 'cuda'.")
if not torch.cuda.is_available():
raise RuntimeError("CUDA must be available to run this script; NCCL backend requires GPUs.")
torch.cuda.set_device(local_rank)
if world_size > 1 and dist.is_available() and not dist.is_initialized():
dist.init_process_group(
backend="nccl",
init_method="env://",
world_size=world_size,
rank=global_rank,
)
task = Task.current_task() or Task.init(project_name="Finetune", task_name="Finetune", task_id=task_id)
task.get_logger()
set_seed(args.seed + global_rank)
dataset = resolve_dataset(args)
eval_dataset = dataset
has_eval_override = any(
getattr(args, name) is not None
for name in ("eval_dataset_name", "eval_dataset_id", "eval_version_name", "eval_version_id", "eval_project")
)
if has_eval_override:
eval_dataset = resolve_dataset(args, prefix="eval_")
model, tokenizer = prepare_model(args)
query_kwargs = {
"project_id": dataset.project_id or "*",
"dataset_id": dataset.dataset_id,
"version_id": dataset.version_id,
}
eval_query_kwargs = None
if not args.skip_eval:
eval_query_kwargs = {
"project_id": eval_dataset.project_id or query_kwargs["project_id"],
"dataset_id": eval_dataset.dataset_id,
"version_id": eval_dataset.version_id,
}
train_dataset, eval_dataset_obj = _build_datasets(args, tokenizer, query_kwargs, eval_query_kwargs)
default_output_dir = args.output_dir or os.path.join("outputs", task_id)
if global_rank == 0:
os.makedirs(default_output_dir, exist_ok=True)
training_args = TrainingArguments(
output_dir=default_output_dir,
per_device_train_batch_size=args.batch_size,
per_device_eval_batch_size=args.batch_size,
gradient_accumulation_steps=args.gradient_accumulation,
num_train_epochs=args.max_epochs,
max_steps=args.max_steps,
warmup_steps=args.warmup_steps,
learning_rate=args.learning_rate,
weight_decay=args.weight_decay,
logging_steps=max(1, args.log_interval),
logging_first_step=True,
save_strategy="no",
dataloader_num_workers=args.num_workers,
dataloader_pin_memory=True,
report_to=["clearml"],
lr_scheduler_type="linear",
fp16=torch.cuda.is_available(),
remove_unused_columns=False,
seed=args.seed,
gradient_checkpointing=False,
max_grad_norm=1.0,
disable_tqdm=not (global_rank == 0),
ddp_find_unused_parameters=False,
ddp_backend="nccl" if world_size > 1 else None,
)
trainer = Trainer(
model=model,
args=training_args,
tokenizer=tokenizer,
train_dataset=train_dataset,
eval_dataset=eval_dataset_obj,
data_collator=default_data_collator,
)
train_result = trainer.train()
trainer.log_metrics("train", train_result.metrics)
trainer.save_state()
if not args.skip_eval and eval_dataset_obj is not None:
metrics = trainer.evaluate(eval_dataset=eval_dataset_obj)
trainer.log_metrics("eval", metrics)
if trainer.is_world_process_zero():
trainer.save_model(default_output_dir)
tokenizer.save_pretrained(default_output_dir)
if getattr(args, "save_model", False):
merged_dir = os.path.join(default_output_dir, "merged_model")
os.makedirs(merged_dir, exist_ok=True)
try:
merged_model = trainer.model.merge_and_unload()
merged_model.save_pretrained(merged_dir)
tokenizer.save_pretrained(merged_dir)
model_name = f"{os.path.basename(args.model)}-finetuned"
output_model = OutputModel(task=task, name=model_name, framework="PyTorch")
output_model.update_weights_package(weights_path=merged_dir, async_enable=False)
task.get_logger().report_text(
f"Merged model uploaded to ClearML model repository as '{output_model.name or model_name}'."
)
except Exception as exc: # pragma: no cover
task.get_logger().report_text(f"Failed to merge and upload merged model: {exc}")
raise
if dist.is_available() and dist.is_initialized():
dist.barrier()
dist.destroy_process_group()
def main() -> None:
args = parse_args()
if args.num_nodes < 1:
raise ValueError("--num-nodes must be at least 1")
if args.devices_per_node == 0 or args.devices_per_node < -1:
raise ValueError("--devices-per-node must be -1 (all GPUs) or a positive integer")
Task.set_resource_monitor_iteration_timeout(
seconds_from_start=1,
wait_for_first_iteration_to_start_sec=1,
max_wait_for_first_iteration_to_start_sec=1,
)
task = Task.init(project_name="Finetune", task_name="Finetune", output_uri=True)
if args.queue:
task.set_packages("./requirements.txt")
task.set_base_docker(
docker_image="pytorch/pytorch:2.8.0-cuda12.8-cudnn9-runtime",
)
task.execute_remotely(queue_name=args.queue)
node_rank = 0
if args.num_nodes > 1:
task.launch_multi_node(args.num_nodes, port=args.dist_port, devices=args.devices_per_node, wait=True)
node_rank = int(os.environ.get("NODE_RANK", "0"))
visible_gpus = torch.cuda.device_count()
if visible_gpus < 1:
raise RuntimeError("No CUDA devices visible inside the process/container.")
gpus_per_node = visible_gpus if args.devices_per_node in (-1, None) else min(args.devices_per_node, visible_gpus)
if gpus_per_node < 1:
raise RuntimeError("devices_per_node resolved to <1 GPU. Set --devices-per-node or expose GPUs.")
world_size = gpus_per_node * max(1, args.num_nodes)
if world_size > 1:
mp.spawn(
_trainer_process_entry,
nprocs=gpus_per_node,
args=(args, task.id, node_rank, gpus_per_node, world_size),
join=True,
)
else:
_trainer_process_entry(0, args, task.id, node_rank, gpus_per_node, world_size)
if __name__ == "__main__":
main()
| _LimitedIterableDataset |
python | huggingface__transformers | src/transformers/models/deepseek_v3/modeling_deepseek_v3.py | {
"start": 22305,
"end": 24235
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: DeepseekV3Config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = DeepseekV3Attention(config=config, layer_idx=layer_idx)
if layer_idx >= config.first_k_dense_replace:
self.mlp = DeepseekV3MoE(config)
else:
self.mlp = DeepseekV3MLP(config)
self.input_layernorm = DeepseekV3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = DeepseekV3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
| DeepseekV3DecoderLayer |
python | gevent__gevent | src/gevent/pywsgi.py | {
"start": 18459,
"end": 52991
} | class ____(object):
"""
Handles HTTP requests from a socket, creates the WSGI environment, and
interacts with the WSGI application.
This is the default value of :attr:`WSGIServer.handler_class`.
This class may be subclassed carefully, and that class set on a
:class:`WSGIServer` instance through a keyword argument at
construction time.
Instances are constructed with the same arguments as passed to the
server's :meth:`WSGIServer.handle` method followed by the server
itself. The application and environment are obtained from the server.
"""
# pylint:disable=too-many-instance-attributes
protocol_version = 'HTTP/1.1'
def MessageClass(self, *args):
return headers_factory(*args)
# Attributes reset at various times for each request; not public
# documented. Class attributes to keep the constructor fast
# (but not make lint tools complain)
status = None # byte string: b'200 OK'
_orig_status = None # native string: '200 OK'
response_headers = None # list of tuples (b'name', b'value')
code = None # Integer parsed from status
provided_date = None
provided_content_length = None
close_connection = False
time_start = 0 # time.time() when begin handling request
time_finish = 0 # time.time() when done handling request
headers_sent = False # Have we already sent headers?
response_use_chunked = False # Write with transfer-encoding chunked
# Was the connection upgraded? We shouldn't try to chunk writes in that
# case.
connection_upgraded = False
environ = None # Dict from self.get_environ
application = None # application callable from self.server.application
requestline = None # native str 'GET / HTTP/1.1'
response_length = 0 # How much data we sent
result = None # The return value of the WSGI application
wsgi_input = None # Instance of Input()
content_length = 0 # From application-provided headers Incoming
# request headers, instance of MessageClass (gunicorn uses hasattr
# on this so the default value needs to be compatible with the
# API)
headers = headers_factory(BytesIO())
request_version = None # str: 'HTTP 1.1'
command = None # str: 'GET'
path = None # str: '/'
def __init__(self, sock, address, server, rfile=None):
# Deprecation: The rfile kwarg was introduced in 1.0a1 as part
# of a refactoring. It was never documented or used. It is
# considered DEPRECATED and may be removed in the future. Its
# use is not supported.
self.socket = sock
self.client_address = address
self.server = server
if rfile is None:
self.rfile = sock.makefile('rb', -1)
else:
self.rfile = rfile
def handle(self):
"""
The main request handling method, called by the server.
This method runs a request handling loop, calling
:meth:`handle_one_request` until all requests on the
connection have been handled (that is, it implements
keep-alive).
"""
try:
while self.socket is not None:
self.time_start = time.time()
self.time_finish = 0
result = self.handle_one_request()
if result is None:
break
if result is True:
continue
self.status, response_body = result # pylint:disable=unpacking-non-sequence
self.socket.sendall(response_body)
if self.time_finish == 0:
self.time_finish = time.time()
self.log_request()
break
finally:
if self.socket is not None:
_sock = getattr(self.socket, '_sock', None) # Python 3
try:
# read out request data to prevent error: [Errno 104] Connection reset by peer
if _sock:
try:
# socket.recv would hang
_sock.recv(16384)
finally:
_sock.close()
self.socket.close()
except socket.error:
pass
self.__dict__.pop('socket', None)
self.__dict__.pop('rfile', None)
self.__dict__.pop('wsgi_input', None)
def _check_http_version(self):
version_str = self.request_version
if not version_str.startswith("HTTP/"):
return False
version = tuple(int(x) for x in version_str[5:].split(".")) # "HTTP/"
if version[1] < 0 or version < (0, 9) or version >= (2, 0):
return False
return True
def read_request(self, raw_requestline):
"""
Parse the incoming request.
Parses various headers into ``self.headers`` using
:attr:`MessageClass`. Other attributes that are set upon a successful
return of this method include ``self.content_length`` and ``self.close_connection``.
:param str raw_requestline: A native :class:`str` representing
the request line. A processed version of this will be stored
into ``self.requestline``.
:raises ValueError: If the request is invalid. This error will
not be logged as a traceback (because it's a client issue, not a server problem).
:return: A boolean value indicating whether the request was successfully parsed.
This method should either return a true value or have raised a ValueError
with details about the parsing error.
.. versionchanged:: 1.1b6
Raise the previously documented :exc:`ValueError` in more cases instead of returning a
false value; this allows subclasses more opportunity to customize behaviour.
"""
# pylint:disable=too-many-branches
self.requestline = raw_requestline.rstrip()
words = self.requestline.split()
if len(words) == 3:
self.command, self.path, self.request_version = words
if not self._check_http_version():
raise _InvalidClientRequest('Invalid http version: %r' % (raw_requestline,))
elif len(words) == 2:
self.command, self.path = words
if self.command != "GET":
raise _InvalidClientRequest('Expected GET method; Got command=%r; path=%r; raw=%r' % (
self.command, self.path, raw_requestline,))
self.request_version = "HTTP/0.9"
# QQQ I'm pretty sure we can drop support for HTTP/0.9
else:
raise _InvalidClientRequest('Invalid HTTP method: %r' % (raw_requestline,))
self.headers = self.MessageClass(self.rfile, 0)
if self.headers.status:
raise _InvalidClientRequest('Invalid headers status: %r' % (self.headers.status,))
if self.headers.get("transfer-encoding", "").lower() == "chunked":
try:
del self.headers["content-length"]
except KeyError:
pass
content_length = self.headers.get("content-length")
if content_length is not None:
content_length = int(content_length)
if content_length < 0:
raise _InvalidClientRequest('Invalid Content-Length: %r' % (content_length,))
if content_length and self.command in ('HEAD', ):
raise _InvalidClientRequest('Unexpected Content-Length')
self.content_length = content_length
if self.request_version == "HTTP/1.1":
conntype = self.headers.get("Connection", "").lower()
self.close_connection = (conntype == 'close') # pylint:disable=superfluous-parens
elif self.request_version == 'HTTP/1.0':
conntype = self.headers.get("Connection", "close").lower()
self.close_connection = (conntype != 'keep-alive') # pylint:disable=superfluous-parens
else:
# XXX: HTTP 0.9. We should drop support
self.close_connection = True
return True
_print_unexpected_exc = staticmethod(traceback.print_exc)
def log_error(self, msg, *args):
if not args:
# Already fully formatted, no need to do it again; msg
# might contain % chars that would lead to a formatting
# error.
message = msg
else:
try:
message = msg % args
except Exception: # pylint:disable=broad-except
self._print_unexpected_exc()
message = '%r %r' % (msg, args)
try:
message = '%s: %s' % (self.socket, message)
except Exception: # pylint:disable=broad-except
pass
try:
self.server.error_log.write(message + '\n')
except Exception: # pylint:disable=broad-except
self._print_unexpected_exc()
def read_requestline(self):
"""
Read and return the HTTP request line.
Under both Python 2 and 3, this should return the native
``str`` type; under Python 3, this probably means the bytes read
from the network need to be decoded (using the ISO-8859-1 charset, aka
latin-1).
"""
line = self.rfile.readline(MAX_REQUEST_LINE)
line = line.decode('latin-1')
return line
def handle_one_request(self):
"""
Handles one HTTP request using ``self.socket`` and ``self.rfile``.
Each invocation of this method will do several things, including (but not limited to):
- Read the request line using :meth:`read_requestline`;
- Read the rest of the request, including headers, with :meth:`read_request`;
- Construct a new WSGI environment in ``self.environ`` using :meth:`get_environ`;
- Store the application in ``self.application``, retrieving it from the server;
- Handle the remainder of the request, including invoking the application,
with :meth:`handle_one_response`
There are several possible return values to indicate the state
of the client connection:
- ``None``
The client connection is already closed or should
be closed because the WSGI application or client set the
``Connection: close`` header. The request handling
loop should terminate and perform cleanup steps.
- (status, body)
An HTTP status and body tuple. The request was in error,
as detailed by the status and body. The request handling
loop should terminate, close the connection, and perform
cleanup steps. Note that the ``body`` is the complete contents
to send to the client, including all headers and the initial
status line.
- ``True``
The literal ``True`` value. The request was successfully handled
and the response sent to the client by :meth:`handle_one_response`.
The connection remains open to process more requests and the connection
handling loop should call this method again. This is the typical return
value.
.. seealso:: :meth:`handle`
.. versionchanged:: 1.1b6
Funnel exceptions having to do with invalid HTTP requests through
:meth:`_handle_client_error` to allow subclasses to customize. Note that
this is experimental and may change in the future.
"""
# pylint:disable=too-many-return-statements
if self.rfile.closed:
return
try:
self.requestline = self.read_requestline()
# Account for old subclasses that haven't done this
if isinstance(self.requestline, bytes):
self.requestline = self.requestline.decode('latin-1')
except socket.error:
# "Connection reset by peer" or other socket errors aren't interesting here
return
if not self.requestline:
return
self.response_length = 0
if len(self.requestline) >= MAX_REQUEST_LINE:
return ('414', _REQUEST_TOO_LONG_RESPONSE)
try:
# for compatibility with older versions of pywsgi, we pass self.requestline as an argument there
# NOTE: read_request is supposed to raise ValueError on invalid input; allow old
# subclasses that return a False value instead.
# NOTE: This can mutate the value of self.headers, so self.get_environ() must not be
# called until AFTER this call is done.
if not self.read_request(self.requestline):
return ('400', _BAD_REQUEST_RESPONSE)
except Exception as ex: # pylint:disable=broad-except
# Notice we don't use self.handle_error because it reports
# a 500 error to the client, and this is almost certainly
# a client error.
# Provide a hook for subclasses.
return self._handle_client_error(ex)
self.environ = self.get_environ()
self.application = self.server.application
self.handle_one_response()
if self.close_connection:
return
if self.rfile.closed:
return
return True # read more requests
def _connection_upgrade_requested(self):
if self.headers.get('Connection', '').lower() == 'upgrade':
return True
if self.headers.get('Upgrade', '').lower() == 'websocket':
return True
return False
def finalize_headers(self):
if self.provided_date is None:
self.response_headers.append((b'Date', format_date_time(time.time())))
self.connection_upgraded = self.code == 101
if self.code not in (304, 204):
# the reply will include message-body; make sure we have either Content-Length or chunked
if self.provided_content_length is None:
if hasattr(self.result, '__len__'):
total_len = sum(len(chunk) for chunk in self.result)
total_len_str = str(total_len)
total_len_str = total_len_str.encode("latin-1")
self.response_headers.append((b'Content-Length', total_len_str))
else:
self.response_use_chunked = (
not self.connection_upgraded
and self.request_version != 'HTTP/1.0'
)
if self.response_use_chunked:
self.response_headers.append((b'Transfer-Encoding', b'chunked'))
def _sendall(self, data):
try:
self.socket.sendall(data)
except socket.error as ex:
self.status = 'socket error: %s' % ex
if self.code > 0:
self.code = -self.code
raise
self.response_length += len(data)
def _write(self, data,
_bytearray=bytearray):
if not data:
# The application/middleware are allowed to yield
# empty bytestrings.
return
if self.response_use_chunked:
# Write the chunked encoding header
header_str = b'%x\r\n' % len(data)
towrite = _bytearray(header_str)
# data
towrite += data
# trailer
towrite += b'\r\n'
self._sendall(towrite)
else:
self._sendall(data)
ApplicationError = AssertionError
def write(self, data):
# The write() callable we return from start_response.
# https://www.python.org/dev/peps/pep-3333/#the-write-callable
# Supposed to do pretty much the same thing as yielding values
# from the application's return.
if self.code in (304, 204) and data:
raise self.ApplicationError('The %s response must have no body' % self.code)
if self.headers_sent:
self._write(data)
else:
if not self.status:
raise self.ApplicationError("The application did not call start_response()")
self._write_with_headers(data)
def _write_with_headers(self, data):
self.headers_sent = True
self.finalize_headers()
# self.response_headers and self.status are already in latin-1, as encoded by self.start_response
towrite = bytearray(b'HTTP/1.1 ')
towrite += self.status
towrite += b'\r\n'
for header, value in self.response_headers:
towrite += header
towrite += b': '
towrite += value
towrite += b"\r\n"
towrite += b'\r\n'
self._sendall(towrite)
# No need to copy the data into towrite; we may make an extra syscall
# but the copy time could be substantial too, and it reduces the chances
# of sendall being able to send everything in one go
self._write(data)
def start_response(self, status, headers, exc_info=None):
"""
.. versionchanged:: 1.2a1
Avoid HTTP header injection by raising a :exc:`ValueError`
if *status* or any *header* name or value contains a carriage
return or newline.
.. versionchanged:: 1.1b5
Pro-actively handle checking the encoding of the status line
and headers during this method. On Python 2, avoid some
extra encodings.
"""
# pylint:disable=too-many-branches,too-many-statements
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
reraise(*exc_info)
finally:
# Avoid dangling circular ref
exc_info = None
# Pep 3333, "The start_response callable":
# https://www.python.org/dev/peps/pep-3333/#the-start-response-callable
# "Servers should check for errors in the headers at the time
# start_response is called, so that an error can be raised
# while the application is still running." Here, we check the encoding.
# This aids debugging: headers especially are generated programmatically
# and an encoding error in a loop or list comprehension yields an opaque
# UnicodeError without any clue which header was wrong.
# Note that this results in copying the header list at this point, not modifying it,
# although we are allowed to do so if needed. This slightly increases memory usage.
# We also check for HTTP Response Splitting vulnerabilities
response_headers = []
header = None
value = None
try:
for header, value in headers:
if not isinstance(header, str):
raise UnicodeError("The header must be a native string", header, value)
if not isinstance(value, str):
raise UnicodeError("The value must be a native string", header, value)
if '\r' in header or '\n' in header:
raise ValueError('carriage return or newline in header name', header)
if '\r' in value or '\n' in value:
raise ValueError('carriage return or newline in header value', value)
# Either we're on Python 2, in which case bytes is correct, or
# we're on Python 3 and the user screwed up (because it should be a native
# string). In either case, make sure that this is latin-1 compatible. Under
# Python 2, bytes.encode() will take a round-trip through the system encoding,
# which may be ascii, which is not really what we want. However, the latin-1 encoding
# can encode everything except control characters and the block from 0x7F to 0x9F, so
# explicitly round-tripping bytes through the encoding is unlikely to be of much
# benefit, so we go for speed (the WSGI spec specifically calls out allowing the range
# from 0x00 to 0xFF, although the HTTP spec forbids the control characters).
# Note: Some Python 2 implementations, like Jython, may allow non-octet (above 255) values
# in their str implementation; this is mentioned in the WSGI spec, but we don't
# run on any platform like that so we can assume that a str value is pure bytes.
response_headers.append((header.encode("latin-1"),
value.encode("latin-1")))
except UnicodeEncodeError:
# If we get here, we're guaranteed to have a header and value
raise UnicodeError("Non-latin1 header", repr(header), repr(value))
# Same as above
if not isinstance(status, str):
raise UnicodeError("The status string must be a native string")
if '\r' in status or '\n' in status:
raise ValueError("carriage return or newline in status", status)
# don't assign to anything until the validation is complete, including parsing the
# code
code = int(status.split(' ', 1)[0])
self.status = status.encode("latin-1")
self._orig_status = status # Preserve the native string for logging
self.response_headers = response_headers
self.code = code
provided_connection = None # Did the wsgi app give us a Connection header?
self.provided_date = None
self.provided_content_length = None
for header, value in headers:
header = header.lower()
if header == 'connection':
provided_connection = value
elif header == 'date':
self.provided_date = value
elif header == 'content-length':
self.provided_content_length = value
if self.request_version == 'HTTP/1.0' and provided_connection is None:
conntype = b'close' if self.close_connection else b'keep-alive'
response_headers.append((b'Connection', conntype))
elif provided_connection == 'close':
self.close_connection = True
if self.code in (304, 204):
if self.provided_content_length is not None and self.provided_content_length != '0':
msg = 'Invalid Content-Length for %s response: %r (must be absent or zero)' % (self.code, self.provided_content_length)
msg = msg.encode('latin-1')
raise self.ApplicationError(msg)
return self.write
def log_request(self):
self.server.log.write(self.format_request() + '\n')
def format_request(self):
now = datetime.now().replace(microsecond=0)
length = self.response_length or '-'
if self.time_finish:
delta = '%.6f' % (self.time_finish - self.time_start)
else:
delta = '-'
client_address = self.client_address[0] if isinstance(self.client_address, tuple) else self.client_address
return '%s - - [%s] "%s" %s %s %s' % (
client_address or '-',
now,
self.requestline or '',
# Use the native string version of the status, saved so we don't have to
# decode. But fallback to the encoded 'status' in case of subclasses
# (Is that really necessary? At least there's no overhead.)
(self._orig_status or self.status or '000').split()[0],
length,
delta)
def process_result(self):
for data in self.result:
if data:
self.write(data)
if self.status and not self.headers_sent:
# In other words, the application returned an empty
# result iterable (and did not use the write callable)
# Trigger the flush of the headers.
self.write(b'')
if self.response_use_chunked:
self._sendall(b'0\r\n\r\n')
def run_application(self):
assert self.result is None
try:
self.result = self.application(self.environ, self.start_response)
self.process_result()
finally:
close = getattr(self.result, 'close', None)
try:
if close is not None:
close()
finally:
# Discard the result. If it's a generator this can
# free a lot of hidden resources (if we failed to iterate
# all the way through it---the frames are automatically
# cleaned up when StopIteration is raised); but other cases
# could still free up resources sooner than otherwise.
close = None
self.result = None
#: These errors are silently ignored by :meth:`handle_one_response` to avoid producing
#: excess log entries on normal operating conditions. They indicate
#: a remote client has disconnected and there is little or nothing
#: this process can be expected to do about it. You may change this
#: value in a subclass.
#:
#: The default value includes :data:`errno.EPIPE` and :data:`errno.ECONNRESET`.
#: On Windows this also includes :data:`errno.WSAECONNABORTED`.
#:
#: This is a provisional API, subject to change. See :pr:`377`, :pr:`999`
#: and :issue:`136`.
#:
#: .. versionadded:: 1.3
ignored_socket_errors = (errno.EPIPE, errno.ECONNRESET)
try:
ignored_socket_errors += (errno.WSAECONNABORTED,)
except AttributeError:
pass # Not windows
def handle_one_response(self):
"""
Invoke the application to produce one response.
This is called by :meth:`handle_one_request` after all the
state for the request has been established. It is responsible
for error handling.
"""
self.time_start = time.time()
self.status = None
self.headers_sent = False
self.result = None
self.response_use_chunked = False
self.connection_upgraded = False
self.response_length = 0
try:
try:
self.run_application()
finally:
try:
self.wsgi_input._discard()
except _InvalidClientInput:
# This one is deliberately raised to the outer
# scope, because, with the incoming stream in some bad state,
# we can't be sure we can synchronize and properly parse the next
# request.
raise
except socket.error:
# Don't let socket exceptions during discarding
# input override any exception that may have been
# raised by the application, such as our own _InvalidClientInput.
# In the general case, these aren't even worth logging (see the comment
# just below)
pass
except _InvalidClientInput as ex:
# DO log this one because:
# - Some of the data may have been read and acted on by the
# application;
# - The response may or may not have been sent;
# - It's likely that the client is bad, or malicious, and
# users might wish to take steps to block the client.
self._handle_client_error(ex)
self.close_connection = True
self._send_error_response_if_possible(400)
except socket.error as ex:
if ex.args[0] in self.ignored_socket_errors:
# See description of self.ignored_socket_errors.
self.close_connection = True
else:
self.handle_error(*sys.exc_info())
except: # pylint:disable=bare-except
self.handle_error(*sys.exc_info())
finally:
self.time_finish = time.time()
self.log_request()
def _send_error_response_if_possible(self, error_code):
if self.response_length:
self.close_connection = True
else:
status, headers, body = _ERRORS[error_code]
try:
self.start_response(status, headers[:])
self.write(body)
except socket.error:
self.close_connection = True
def _log_error(self, t, v, tb):
# TODO: Shouldn't we dump this to wsgi.errors? If we did that now, it would
# wind up getting logged twice
if not issubclass(t, GreenletExit):
context = self.environ
if not isinstance(context, self.server.secure_environ_class):
context = self.server.secure_environ_class(context)
self.server.loop.handle_error(context, t, v, tb)
def handle_error(self, t, v, tb):
# Called for internal, unexpected errors, NOT invalid client input
self._log_error(t, v, tb)
t = v = tb = None
self._send_error_response_if_possible(500)
def _handle_client_error(self, ex):
# Called for invalid client input
# Returns the appropriate error response.
if not isinstance(ex, (ValueError, _InvalidClientInput)):
# XXX: Why not self._log_error to send it through the loop's
# handle_error method?
# _InvalidClientRequest is a ValueError; _InvalidClientInput is an IOError.
traceback.print_exc()
if isinstance(ex, _InvalidClientRequest):
# No formatting needed, that's already been handled. In fact, because the
# formatted message contains user input, it might have a % in it, and attempting
# to format that with no arguments would be an error.
# However, the error messages do not include the requesting IP
# necessarily, so we do add that.
self.log_error('(from %s) %s', self.client_address, ex.formatted_message)
else:
self.log_error('Invalid request (from %s): %s',
self.client_address,
str(ex) or ex.__class__.__name__)
return ('400', _BAD_REQUEST_RESPONSE)
def _headers(self):
key = None
value = None
IGNORED_KEYS = (None, 'CONTENT_TYPE', 'CONTENT_LENGTH')
for header in self.headers.headers:
if key is not None and header[:1] in " \t":
value += header
continue
if key not in IGNORED_KEYS:
yield 'HTTP_' + key, value.strip()
key, value = header.split(':', 1)
if '_' in key:
# strip incoming bad veaders
key = None
else:
key = key.replace('-', '_').upper()
if key not in IGNORED_KEYS:
yield 'HTTP_' + key, value.strip()
def get_environ(self):
"""
Construct and return a new WSGI environment dictionary for a specific request.
This should begin with asking the server for the base environment
using :meth:`WSGIServer.get_environ`, and then proceed to add the
request specific values.
By the time this method is invoked the request line and request shall have
been parsed and ``self.headers`` shall be populated.
"""
env = self.server.get_environ()
env['REQUEST_METHOD'] = self.command
# SCRIPT_NAME is explicitly implementation defined. Using an
# empty value for SCRIPT_NAME is both explicitly allowed by
# both the CGI standard and WSGI PEPs, and also the thing that
# makes the most sense from a generic server perspective (we
# have no hierarchy or understanding of URLs or files, just a
# single application to call. The empty string represents the
# application root, which is what we have). Different WSGI
# implementations handle this very differently, so portable
# applications that rely on SCRIPT_NAME will have to use a
# WSGI middleware to set it to a defined value, or otherwise
# rely on server-specific mechanisms (e.g, on waitress, use
# ``--url-prefix``, in gunicorn set the ``SCRIPT_NAME`` header
# or process environment variable, in gevent subclass
# WSGIHandler.)
#
# See https://github.com/gevent/gevent/issues/1667 for discussion.
env['SCRIPT_NAME'] = ''
path, query = self.path.split('?', 1) if '?' in self.path else (self.path, '')
# Note that self.path contains the original str object; if it contains
# encoded escapes, it will NOT match PATH_INFO.
env['PATH_INFO'] = unquote_latin1(path)
env['QUERY_STRING'] = query
if self.headers.typeheader is not None:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
env['SERVER_PROTOCOL'] = self.request_version
client_address = self.client_address
if isinstance(client_address, tuple):
env['REMOTE_ADDR'] = str(client_address[0])
env['REMOTE_PORT'] = str(client_address[1])
for key, value in self._headers():
if key in env:
if 'COOKIE' in key:
env[key] += '; ' + value
else:
env[key] += ',' + value
else:
env[key] = value
sock = self.socket if env.get('HTTP_EXPECT') == '100-continue' else None
chunked = env.get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked'
# Input refuses to read if the data isn't chunked, and there is no content_length
# provided. For 'Upgrade: Websocket' requests, neither of those things is true.
handling_reads = not self._connection_upgrade_requested()
self.wsgi_input = Input(self.rfile, self.content_length, socket=sock, chunked_input=chunked)
env['wsgi.input'] = self.wsgi_input if handling_reads else self.rfile
# This is a non-standard flag indicating that our input stream is
# self-terminated (returns EOF when consumed).
# See https://github.com/gevent/gevent/issues/1308
env['wsgi.input_terminated'] = handling_reads
return env
| WSGIHandler |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 9215,
"end": 9642
} | class ____(ProxyModelBase):
field2 = models.CharField(max_length=30)
# test bad field name
# class TestBadFieldModel(ShowFieldType, PolymorphicModel):
# instance_of = models.CharField(max_length=30)
# validation error: "polymorphic.relatednameclash: Accessor for field 'polymorphic_ctype' clashes
# with related field 'ContentType.relatednameclash_set'." (reported by Andrew Ingram)
# fixed with related_name
| ProxyModelB |
python | django__django | django/db/models/fields/json.py | {
"start": 5782,
"end": 6395
} | class ____(FieldGetDbPrepValueMixin, PostgresOperatorLookup):
lookup_name = "contains"
postgres_operator = "@>"
def as_sql(self, compiler, connection):
if not connection.features.supports_json_field_contains:
raise NotSupportedError(
"contains lookup is not supported on this database backend."
)
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = tuple(lhs_params) + tuple(rhs_params)
return "JSON_CONTAINS(%s, %s)" % (lhs, rhs), params
| DataContains |
python | getsentry__sentry | src/sentry/workflow_engine/typings/notification_action.py | {
"start": 15497,
"end": 15662
} | class ____(TicketActionTranslator):
@property
def action_type(self) -> ActionType:
return ActionType.GITHUB_ENTERPRISE
| GithubEnterpriseActionTranslator |
python | tensorflow__tensorflow | tensorflow/compiler/tests/lstm_test.py | {
"start": 10381,
"end": 13363
} | class ____(test.Benchmark):
"""Mcro-benchmarks for a single layer of LSTM cells."""
def _LayerBuilder(self, do_training):
out_seq, weights = lstm.BuildLSTMLayer(FLAGS.batch_size, FLAGS.seq_length,
FLAGS.num_inputs, FLAGS.num_nodes)
name, fetches = ('lstm_layer_inference', out_seq)
if do_training:
# Not a real loss function, but good enough for benchmarking backprop.
loss = math_ops.reduce_sum(math_ops.add_n(out_seq))
dw = gradients_impl.gradients(loss, weights)
name, fetches = ('lstm_layer_training', dw)
_DumpGraph(ops.get_default_graph(),
'%s_%d_%d_%d_%d' % (name, FLAGS.batch_size, FLAGS.seq_length,
FLAGS.num_inputs, FLAGS.num_nodes))
return name, fetches
def benchmarkLayerInference(self):
xla_test.Benchmark(self, lambda: self._LayerBuilder(False), False,
FLAGS.device)
def benchmarkLayerInferenceXLA(self):
xla_test.Benchmark(self, lambda: self._LayerBuilder(False), True,
FLAGS.device)
def benchmarkLayerTraining(self):
xla_test.Benchmark(self, lambda: self._LayerBuilder(True), False,
FLAGS.device)
def benchmarkLayerTrainingXLA(self):
xla_test.Benchmark(self, lambda: self._LayerBuilder(True), True,
FLAGS.device)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--batch_size',
type=int,
default=128,
help="""\
Inputs are fed in batches of this size, for both inference and training.
Larger values cause the matmul in each LSTM cell to have higher
dimensionality.\
"""
)
parser.add_argument(
'--seq_length',
type=int,
default=60,
help="""\
Length of the unrolled sequence of LSTM cells in a layer.Larger values
cause more LSTM matmuls to be run.\
"""
)
parser.add_argument(
'--num_inputs',
type=int,
default=1024,
help='Dimension of inputs that are fed into each LSTM cell.'
)
parser.add_argument(
'--num_nodes',
type=int,
default=1024,
help='Number of nodes in each LSTM cell.'
)
parser.add_argument(
'--device',
type=str,
default='gpu',
help="""\
TensorFlow device to assign ops to, e.g. "gpu", "cpu". For details see
documentation for tf.Graph.device.\
"""
)
parser.add_argument(
'--dump_graph_dir',
type=str,
default='',
help='If non-empty, dump graphs in *.pbtxt format to this directory.'
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
# This test is using Tensorflow sessions which are not compatible with eager
# mode.
ops.disable_eager_execution()
test.main(argv=[sys.argv[0]] + unparsed)
| LSTMBenchmark |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/unit_tests/integration/api/bulk.py | {
"start": 8154,
"end": 9289
} | class ____:
def __init__(self) -> None:
self._records = []
def _any_record(self, updated_at: str = "2024-05-05T01:09:50Z") -> str:
an_id = str(randint(1000000000000, 9999999999999))
a_parent_id = str(randint(1000000000000, 9999999999999))
return f"""{{"__typename":"Customer","id":"gid://shopify/Customer/{a_parent_id}","defaultAddress":{{"id":"gid://shopify/MailingAddress/{an_id}?model_name=CustomerAddress"}},"addresses":[{{"address1":"7162JakobHaven","address2":null,"city":null,"country":"Canada","countryCode":"CA","company":null,"firstName":"Test","id":"gid://shopify/MailingAddress/{an_id}?model_name=CustomerAddress","lastName":"Test","name":"TestTest","phone":"555555","province":null,"provinceCode":null,"zip":null}}],"updatedAt":"{updated_at}"}}
"""
def with_record(self, updated_at: str = "2024-05-05T01:09:50Z") -> "MetafieldOrdersJobResponseBuilder":
self._records.append(self._any_record(updated_at=updated_at))
return self
def build(self) -> HttpResponse:
return HttpResponse("".join(self._records), status_code=200)
| CustomerAddressResponseBuilder |
python | PrefectHQ__prefect | tests/deployment/test_steps.py | {
"start": 44233,
"end": 44486
} | class ____:
def __init__(self, returncode=0):
self.returncode = returncode
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
pass
async def wait(self):
pass
| MockProcess |
python | django__django | tests/admin_changelist/models.py | {
"start": 1672,
"end": 1858
} | class ____(models.Model):
music = models.ForeignKey(Musician, models.CASCADE)
group = models.ForeignKey(Group, models.CASCADE)
role = models.CharField(max_length=15)
| Membership |
python | ray-project__ray | python/ray/tune/search/zoopt/zoopt_search.py | {
"start": 695,
"end": 12441
} | class ____(Searcher):
"""A wrapper around ZOOpt to provide trial suggestions.
ZOOptSearch is a library for derivative-free optimization. It is backed by
the `ZOOpt <https://github.com/polixir/ZOOpt>`__ package. Currently,
Asynchronous Sequential RAndomized COordinate Shrinking (ASRacos)
is implemented in Tune.
To use ZOOptSearch, install zoopt (>=0.4.1): ``pip install -U zoopt``.
Tune automatically converts search spaces to ZOOpt"s format:
.. code-block:: python
from ray import tune
from ray.tune.search.zoopt import ZOOptSearch
"config": {
"iterations": 10, # evaluation times
"width": tune.uniform(-10, 10),
"height": tune.uniform(-10, 10)
}
zoopt_search_config = {
"parallel_num": 8, # how many workers to parallel
}
zoopt_search = ZOOptSearch(
algo="Asracos", # only support Asracos currently
budget=20, # must match `num_samples` in `tune.TuneConfig()`.
dim_dict=dim_dict,
metric="mean_loss",
mode="min",
**zoopt_search_config
)
tuner = tune.Tuner(
my_objective,
tune_config=tune.TuneConfig(
search_alg=zoopt_search,
num_samples=20
),
run_config=tune.RunConfig(
name="zoopt_search",
stop={"timesteps_total": 10}
),
param_space=config
)
tuner.fit()
If you would like to pass the search space manually, the code would
look like this:
.. code-block:: python
from ray import tune
from ray.tune.search.zoopt import ZOOptSearch
from zoopt import ValueType
dim_dict = {
"height": (ValueType.CONTINUOUS, [-10, 10], 1e-2),
"width": (ValueType.DISCRETE, [-10, 10], False),
"layers": (ValueType.GRID, [4, 8, 16])
}
"config": {
"iterations": 10, # evaluation times
}
zoopt_search_config = {
"parallel_num": 8, # how many workers to parallel
}
zoopt_search = ZOOptSearch(
algo="Asracos", # only support Asracos currently
budget=20, # must match `num_samples` in `tune.TuneConfig()`.
dim_dict=dim_dict,
metric="mean_loss",
mode="min",
**zoopt_search_config
)
tuner = tune.Tuner(
my_objective,
tune_config=tune.TuneConfig(
search_alg=zoopt_search,
num_samples=20
),
run_config=tune.RunConfig(
name="zoopt_search",
stop={"timesteps_total": 10}
),
)
tuner.fit()
Parameters:
algo: To specify an algorithm in zoopt you want to use.
Only support ASRacos currently.
budget: Number of samples.
dim_dict: Dimension dictionary.
For continuous dimensions: (continuous, search_range, precision);
For discrete dimensions: (discrete, search_range, has_order);
For grid dimensions: (grid, grid_list).
More details can be found in zoopt package.
metric: The training result objective value attribute. If None
but a mode was passed, the anonymous metric `_metric` will be used
per default.
mode: One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
points_to_evaluate: Initial parameter suggestions to be run
first. This is for when you already have some good parameters
you want to run first to help the algorithm make better suggestions
for future parameters. Needs to be a list of dicts containing the
configurations.
parallel_num: How many workers to parallel. Note that initial
phase may start less workers than this number. More details can
be found in zoopt package.
"""
optimizer = None
def __init__(
self,
algo: str = "asracos",
budget: Optional[int] = None,
dim_dict: Optional[Dict] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[Dict]] = None,
parallel_num: int = 1,
**kwargs
):
assert (
zoopt is not None
), "ZOOpt not found - please install zoopt by `pip install -U zoopt`."
assert budget is not None, "`budget` should not be None!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
_algo = algo.lower()
assert _algo in [
"asracos",
"sracos",
], "`algo` must be in ['asracos', 'sracos'] currently"
self._algo = _algo
if isinstance(dim_dict, dict) and dim_dict:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(dim_dict)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(par="dim_dict", cls=type(self))
)
dim_dict = self.convert_search_space(dim_dict, join=True)
self._dim_dict = dim_dict
self._budget = budget
self._metric = metric
if mode == "max":
self._metric_op = -1.0
elif mode == "min":
self._metric_op = 1.0
self._points_to_evaluate = copy.deepcopy(points_to_evaluate)
self._live_trial_mapping = {}
self._dim_keys = []
self.solution_dict = {}
self.best_solution_list = []
self.optimizer = None
self.kwargs = kwargs
self.parallel_num = parallel_num
super(ZOOptSearch, self).__init__(metric=self._metric, mode=mode)
if self._dim_dict:
self._setup_zoopt()
def _setup_zoopt(self):
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
_dim_list = []
for k in self._dim_dict:
self._dim_keys.append(k)
_dim_list.append(self._dim_dict[k])
init_samples = None
if self._points_to_evaluate:
logger.warning(
"`points_to_evaluate` is ignored by ZOOpt in versions <= 0.4.1."
)
init_samples = [
Solution(x=tuple(point[dim] for dim in self._dim_keys))
for point in self._points_to_evaluate
]
dim = zoopt.Dimension2(_dim_list)
par = zoopt.Parameter(budget=self._budget, init_samples=init_samples)
if self._algo == "sracos" or self._algo == "asracos":
from zoopt.algos.opt_algorithms.racos.sracos import SRacosTune
self.optimizer = SRacosTune(
dimension=dim,
parameter=par,
parallel_num=self.parallel_num,
**self.kwargs
)
def set_search_properties(
self, metric: Optional[str], mode: Optional[str], config: Dict, **spec
) -> bool:
if self._dim_dict:
return False
space = self.convert_search_space(config)
self._dim_dict = space
if metric:
self._metric = metric
if mode:
self._mode = mode
if self._mode == "max":
self._metric_op = -1.0
elif self._mode == "min":
self._metric_op = 1.0
self._setup_zoopt()
return True
def suggest(self, trial_id: str) -> Optional[Dict]:
if not self._dim_dict or not self.optimizer:
raise RuntimeError(
UNDEFINED_SEARCH_SPACE.format(
cls=self.__class__.__name__, space="dim_dict"
)
)
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(
cls=self.__class__.__name__, metric=self._metric, mode=self._mode
)
)
_solution = self.optimizer.suggest()
if _solution == "FINISHED":
if ray.__version__ >= "0.8.7":
return Searcher.FINISHED
else:
return None
if _solution:
self.solution_dict[str(trial_id)] = _solution
_x = _solution.get_x()
new_trial = dict(zip(self._dim_keys, _x))
self._live_trial_mapping[trial_id] = new_trial
return unflatten_dict(new_trial)
def on_trial_complete(
self, trial_id: str, result: Optional[Dict] = None, error: bool = False
):
"""Notification for the completion of trial."""
if result:
_solution = self.solution_dict[str(trial_id)]
_best_solution_so_far = self.optimizer.complete(
_solution, self._metric_op * result[self._metric]
)
if _best_solution_so_far:
self.best_solution_list.append(_best_solution_so_far)
del self._live_trial_mapping[trial_id]
def save(self, checkpoint_path: str):
save_object = self.__dict__
with open(checkpoint_path, "wb") as outputFile:
pickle.dump(save_object, outputFile)
def restore(self, checkpoint_path: str):
with open(checkpoint_path, "rb") as inputFile:
save_object = pickle.load(inputFile)
self.__dict__.update(save_object)
@staticmethod
def convert_search_space(spec: Dict, join: bool = False) -> Dict[str, Tuple]:
spec = copy.deepcopy(spec)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if not domain_vars and not grid_vars:
return {}
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a ZOOpt search space."
)
def resolve_value(domain: Domain) -> Tuple:
quantize = None
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
quantize = sampler.q
sampler = sampler.sampler
if isinstance(domain, Float):
precision = quantize or 1e-12
if isinstance(sampler, Uniform):
return (
ValueType.CONTINUOUS,
[domain.lower, domain.upper],
precision,
)
elif isinstance(domain, Integer):
if isinstance(sampler, Uniform):
return (ValueType.DISCRETE, [domain.lower, domain.upper - 1], True)
elif isinstance(domain, Categorical):
# Categorical variables would use ValueType.DISCRETE with
# has_partial_order=False, however, currently we do not
# keep track of category values and cannot automatically
# translate back and forth between them.
if isinstance(sampler, Uniform):
return (ValueType.GRID, domain.categories)
raise ValueError(
"ZOOpt does not support parameters of type "
"`{}` with samplers of type `{}`".format(
type(domain).__name__, type(domain.sampler).__name__
)
)
conv_spec = {
"/".join(path): resolve_value(domain) for path, domain in domain_vars
}
if join:
spec.update(conv_spec)
conv_spec = spec
return conv_spec
| ZOOptSearch |
python | spyder-ide__spyder | spyder/plugins/toolbar/container.py | {
"start": 1276,
"end": 1335
} | class ____:
ShowToolbars = "show toolbars"
| ToolbarActions |
python | scikit-learn__scikit-learn | sklearn/compose/tests/test_column_transformer.py | {
"start": 1054,
"end": 1417
} | class ____(TransformerMixin, BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# 1D Series -> 2D DataFrame
if hasattr(X, "to_frame"):
return X.to_frame()
# 1D array -> 2D array
if getattr(X, "ndim", 2) == 1:
return np.atleast_2d(X).T
return X
| Trans |
python | apache__airflow | airflow-core/tests/unit/utils/test_dot_renderer.py | {
"start": 1595,
"end": 10647
} | class ____:
def setup_class(self):
clear_db_dags()
def teardown_method(self):
clear_db_dags()
def test_should_render_dag_dependencies(self):
dag_dep_1 = DagDependency(
source="dag_one",
target="dag_two",
label="task_1",
dependency_type="sensor",
dependency_id="task_1",
)
dag_dep_2 = DagDependency(
source="dag_two",
target="dag_three",
label="task_2",
dependency_type="sensor",
dependency_id="task_2",
)
dag_dependency_list = [dag_dep_1, dag_dep_2]
dag_dependency_dict = {}
dag_dependency_dict["dag_one"] = dag_dependency_list
dot = dot_renderer.render_dag_dependencies(dag_dependency_dict)
assert "dag_one -> task_1" in dot.source
assert "task_1 -> dag_two" in dot.source
assert "dag_two -> task_2" in dot.source
assert "task_2 -> dag_three" in dot.source
def test_should_render_dag(self):
with DAG(dag_id="DAG_ID", schedule=None) as dag:
task_1 = BashOperator(start_date=START_DATE, task_id="first", bash_command="echo 1")
task_2 = BashOperator(start_date=START_DATE, task_id="second", bash_command="echo 1")
task_3 = PythonOperator(start_date=START_DATE, task_id="third", python_callable=mock.MagicMock())
task_1 >> task_2
task_1 >> task_3
dot = dot_renderer.render_dag(dag)
source = dot.source
# Should render DAG title
assert "label=DAG_ID" in source
assert "first" in source
assert "second" in source
assert "third" in source
assert "first -> second" in source
assert "first -> third" in source
assert 'fillcolor="#f0ede4"' in source
assert 'fillcolor="#f0ede4"' in source
def test_should_render_dag_with_task_instances(self, session, dag_maker):
with dag_maker(dag_id="DAG_ID", session=session) as dag:
task_1 = BashOperator(start_date=START_DATE, task_id="first", bash_command="echo 1")
task_2 = BashOperator(start_date=START_DATE, task_id="second", bash_command="echo 1")
task_3 = PythonOperator(start_date=START_DATE, task_id="third", python_callable=mock.MagicMock())
task_1 >> task_2
task_1 >> task_3
tis = {ti.task_id: ti for ti in dag_maker.create_dagrun(logical_date=START_DATE).task_instances}
tis["first"].state = State.SCHEDULED
tis["second"].state = State.SUCCESS
tis["third"].state = State.RUNNING
dot = dot_renderer.render_dag(dag, tis=tis.values())
source = dot.source
# Should render DAG title
assert "label=DAG_ID" in source
assert (
'first [color=black fillcolor=tan label=first shape=rectangle style="filled,rounded"]' in source
)
assert (
'second [color=white fillcolor=green label=second shape=rectangle style="filled,rounded"]'
in source
)
assert (
'third [color=black fillcolor=lime label=third shape=rectangle style="filled,rounded"]' in source
)
def test_should_render_dag_with_mapped_operator(self, session, dag_maker):
with dag_maker(dag_id="DAG_ID", session=session) as dag:
BashOperator.partial(task_id="first", task_display_name="First Task").expand(
bash_command=["echo hello", "echo world"]
)
dot = dot_renderer.render_dag(dag)
source = dot.source
# Should render DAG title
assert "label=DAG_ID" in source
assert (
'first [color="#000000" fillcolor="#f0ede4" label="First Task" shape=rectangle style="filled,rounded"]'
in source
)
def test_render_task_group(self):
with DAG(dag_id="example_task_group", schedule=None, start_date=START_DATE) as dag:
start = EmptyOperator(task_id="start")
with TaskGroup("section_1", tooltip="Tasks for section_1") as section_1:
task_1 = EmptyOperator(task_id="task_1")
task_2 = BashOperator(task_id="task_2", bash_command="echo 1")
task_3 = EmptyOperator(task_id="task_3")
task_1 >> [task_2, task_3]
with TaskGroup("section_2", tooltip="Tasks for section_2") as section_2:
task_1 = EmptyOperator(task_id="task_1")
with TaskGroup("inner_section_2", tooltip="Tasks for inner_section2"):
task_2 = BashOperator(task_id="task_2", bash_command="echo 1")
task_3 = EmptyOperator(task_id="task_3")
task_4 = EmptyOperator(task_id="task_4")
[task_2, task_3] >> task_4
end = EmptyOperator(task_id="end")
start >> section_1 >> section_2 >> end
dot = dot_renderer.render_dag(dag)
assert dot.source.strip() == "\n".join(
[
"digraph example_task_group {",
"\tgraph [label=example_task_group labelloc=t rankdir=LR]",
'\tend [color="#000000" fillcolor="#e8f7e4" label=end shape=rectangle '
'style="filled,rounded"]',
"\tsubgraph cluster_section_1 {",
'\t\tcolor="#000000" fillcolor="#6495ed7f" label=section_1 shape=rectangle style=filled',
'\t\t"section_1.upstream_join_id" [color="#000000" fillcolor=CornflowerBlue height=0.2 '
'label="" shape=circle style="filled,rounded" width=0.2]',
'\t\t"section_1.downstream_join_id" [color="#000000" fillcolor=CornflowerBlue height=0.2 '
'label="" shape=circle style="filled,rounded" width=0.2]',
'\t\t"section_1.task_1" [color="#000000" fillcolor="#e8f7e4" label=task_1 shape=rectangle '
'style="filled,rounded"]',
'\t\t"section_1.task_2" [color="#000000" fillcolor="#f0ede4" label=task_2 shape=rectangle '
'style="filled,rounded"]',
'\t\t"section_1.task_3" [color="#000000" fillcolor="#e8f7e4" label=task_3 shape=rectangle '
'style="filled,rounded"]',
"\t}",
"\tsubgraph cluster_section_2 {",
'\t\tcolor="#000000" fillcolor="#6495ed7f" label=section_2 shape=rectangle style=filled',
'\t\t"section_2.upstream_join_id" [color="#000000" fillcolor=CornflowerBlue height=0.2 '
'label="" shape=circle style="filled,rounded" width=0.2]',
'\t\t"section_2.downstream_join_id" [color="#000000" fillcolor=CornflowerBlue height=0.2 '
'label="" shape=circle style="filled,rounded" width=0.2]',
'\t\tsubgraph "cluster_section_2.inner_section_2" {',
'\t\t\tcolor="#000000" fillcolor="#6495ed7f" label=inner_section_2 shape=rectangle '
"style=filled",
'\t\t\t"section_2.inner_section_2.task_2" [color="#000000" fillcolor="#f0ede4" label=task_2 '
'shape=rectangle style="filled,rounded"]',
'\t\t\t"section_2.inner_section_2.task_3" [color="#000000" fillcolor="#e8f7e4" label=task_3 '
'shape=rectangle style="filled,rounded"]',
'\t\t\t"section_2.inner_section_2.task_4" [color="#000000" fillcolor="#e8f7e4" label=task_4 '
'shape=rectangle style="filled,rounded"]',
"\t\t}",
'\t\t"section_2.task_1" [color="#000000" fillcolor="#e8f7e4" label=task_1 shape=rectangle '
'style="filled,rounded"]',
"\t}",
'\tstart [color="#000000" fillcolor="#e8f7e4" label=start shape=rectangle '
'style="filled,rounded"]',
'\t"section_1.downstream_join_id" -> "section_2.upstream_join_id"',
'\t"section_1.task_1" -> "section_1.task_2"',
'\t"section_1.task_1" -> "section_1.task_3"',
'\t"section_1.task_2" -> "section_1.downstream_join_id"',
'\t"section_1.task_3" -> "section_1.downstream_join_id"',
'\t"section_1.upstream_join_id" -> "section_1.task_1"',
'\t"section_2.downstream_join_id" -> end',
'\t"section_2.inner_section_2.task_2" -> "section_2.inner_section_2.task_4"',
'\t"section_2.inner_section_2.task_3" -> "section_2.inner_section_2.task_4"',
'\t"section_2.inner_section_2.task_4" -> "section_2.downstream_join_id"',
'\t"section_2.task_1" -> "section_2.downstream_join_id"',
'\t"section_2.upstream_join_id" -> "section_2.inner_section_2.task_2"',
'\t"section_2.upstream_join_id" -> "section_2.inner_section_2.task_3"',
'\t"section_2.upstream_join_id" -> "section_2.task_1"',
'\tstart -> "section_1.upstream_join_id"',
"}",
]
)
| TestDotRenderer |
python | great-expectations__great_expectations | great_expectations/metrics/metric.py | {
"start": 958,
"end": 1136
} | class ____(ValueError):
def __init__(self, metric_name: str) -> None:
super().__init__(f"Metric `{metric_name}` was not found in the registry.")
| UnregisteredMetricError |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/metrics_test.py | {
"start": 108507,
"end": 112589
} | class ____(test.TestCase):
def setUp(self):
self._predictions = ((0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9),
(0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6))
self._predictions_idx = ((9, 4, 6, 2, 0), (5, 7, 2, 9, 6))
indicator_labels = ((0, 0, 1, 0, 0, 0, 0, 1, 1, 0),
(0, 1, 1, 0, 0, 1, 0, 0, 0, 0))
class_labels = ((2, 7, 8), (1, 2, 5))
# Sparse vs dense labels should be handled the same.
self._labels = (_binary_2d_label_to_2d_sparse_value(indicator_labels),
np.array(
class_labels, dtype=np.int64))
self._test_recall_at_k = functools.partial(
_test_recall_at_k, test_case=self)
self._test_recall_at_top_k = functools.partial(
_test_recall_at_top_k, test_case=self)
@test_util.run_deprecated_v1
def test_at_k5_nan(self):
for labels in self._labels:
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_recall_at_k(
self._predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=NAN, class_id=class_id)
@test_util.run_deprecated_v1
def test_at_k5_no_predictions(self):
for labels in self._labels:
# Class 8: 1 label, no predictions.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=0.0 / 1, class_id=8)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=0.0 / 1, class_id=8)
@test_util.run_deprecated_v1
def test_at_k5(self):
for labels in self._labels:
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_recall_at_k(self._predictions, labels, k=5, expected=3.0 / 6)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=3.0 / 6)
@test_util.run_deprecated_v1
def test_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 8 labels, 3 correct.
self._test_recall_at_k(self._predictions, labels, k=5, expected=3.0 / 8)
self._test_recall_at_top_k(
self._predictions_idx, labels, k=5, expected=3.0 / 8)
| MultiLabel2dRecallAtKTest |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/partition_sets.py | {
"start": 10631,
"end": 10772
} | class ____(graphene.ObjectType):
results = non_null_list(GraphenePartition)
class Meta:
name = "Partitions"
| GraphenePartitions |
python | tensorflow__tensorflow | tensorflow/python/distribute/experimental/rpc/rpc_ops.py | {
"start": 16953,
"end": 19162
} | class ____(object):
"""Class representing result and status from RPC Call."""
def __init__(self, status_or, deleter, output_specs=None):
self._status_or = status_or
self._output_specs = output_specs
self._deleter = deleter
self._error_code: dtypes.int64 = None
self._error_message: dtypes.string = None
def _check_status(self):
if self._error_code is None:
self._error_code, self._error_message = gen_rpc_ops.rpc_check_status(
self._status_or)
def __del__(self):
# Make sure the resource is deleted in the same mode as it was created in.
if context.executing_eagerly():
with context.eager_mode():
gen_rpc_ops.delete_rpc_future_resource(
handle=self._status_or, deleter=self._deleter)
else:
with context.graph_mode():
gen_rpc_ops.delete_rpc_future_resource(
handle=self._status_or, deleter=self._deleter)
def is_ok(self):
"""Returns True if RPC is successful, otherwise returns False.
This call will block for RPC result.
"""
self._check_status()
return math_ops.equal(self._error_code,
constant_op.constant(0, dtype=dtypes.int64))
def get_error(self):
"""Returns (TF Error Code, Error Message) from RPC Response.
This call will block for RPC result.
"""
self._check_status()
return self._error_code, self._error_message
def get_value(self):
"""Returns the returned response value from RPC Call when RPC is successful.
The returned value is tensors in the output_specs format as returned from
the RPC call
This call will block for RPC result.
"""
self._check_status()
if self._output_specs is None or isinstance(self._output_specs,
none_tensor.NoneTensorSpec):
flat_output_dtypes = []
return_none = True
else:
return_none = False
flat_output_dtypes = [s.dtype for s in nest.flatten(self._output_specs)]
result = gen_rpc_ops.rpc_get_value(self._status_or, Tout=flat_output_dtypes)
if return_none:
return None
else:
return nest.pack_sequence_as(self._output_specs, result)
| StatusOrResult |
python | conda__conda | conda/plugins/types.py | {
"start": 11328,
"end": 12856
} | class ____(ABC):
"""
Base class for all reporter renderers.
"""
def render(self, data: Any, **kwargs) -> str:
return str(data)
@abstractmethod
def detail_view(self, data: dict[str, str | int | bool], **kwargs) -> str:
"""
Render the output in a "tabular" format.
"""
@abstractmethod
def envs_list(
self, data: Iterable[str] | dict[str, dict[str, str | bool | None]], **kwargs
) -> str:
"""
Render a list of environments
"""
@abstractmethod
def progress_bar(
self,
description: str,
**kwargs,
) -> ProgressBarBase:
"""
Return a :class:`~conda.plugins.types.ProgressBarBase~` object to use as a progress bar
"""
@classmethod
def progress_bar_context_manager(cls) -> AbstractContextManager:
"""
Returns a null context by default but allows plugins to define their own if necessary
"""
return nullcontext()
@abstractmethod
def spinner(self, message, failed_message) -> SpinnerBase:
"""
Return a :class:`~conda.plugins.types.SpinnerBase~` object to use as a spinner (i.e.
loading dialog)
"""
@abstractmethod
def prompt(
self,
message: str = "Proceed",
choices=("yes", "no"),
default: str = "yes",
) -> str:
"""
Allows for defining an implementation of a "yes/no" confirmation function
"""
@dataclass
| ReporterRendererBase |
python | pandas-dev__pandas | pandas/tests/arithmetic/test_period.py | {
"start": 1545,
"end": 6147
} | class ____:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
@pytest.mark.parametrize("other", ["2017", Period("2017", freq="D")])
def test_eq_scalar(self, other, box_with_array):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
idx = tm.box_expected(idx, box_with_array)
xbox = get_upcast_box(idx, other, True)
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
result = idx == other
tm.assert_equal(result, expected)
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
pi = period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
xbox = get_upcast_box(pi, other, True)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"scalar",
[
"foo",
Timestamp("2021-01-01"),
Timedelta(days=4),
9,
9.5,
2000, # specifically don't consider 2000 to match Period("2000", "D")
False,
None,
],
)
def test_compare_invalid_scalar(self, box_with_array, scalar):
# GH#28980
# comparison with scalar that cannot be interpreted as a Period
pi = period_range("2000", periods=4)
parr = tm.box_expected(pi, box_with_array)
assert_invalid_comparison(parr, scalar, box_with_array)
@pytest.mark.parametrize(
"other",
[
pd.date_range("2000", periods=4).array,
pd.timedelta_range("1D", periods=4).array,
np.arange(4),
np.arange(4).astype(np.float64),
list(range(4)),
# match Period semantics by not treating integers as Periods
[2000, 2001, 2002, 2003],
np.arange(2000, 2004),
np.arange(2000, 2004).astype(object),
pd.Index([2000, 2001, 2002, 2003]),
],
)
def test_compare_invalid_listlike(self, box_with_array, other):
pi = period_range("2000", periods=4)
parr = tm.box_expected(pi, box_with_array)
assert_invalid_comparison(parr, other, box_with_array)
@pytest.mark.parametrize("other_box", [list, np.array, lambda x: x.astype(object)])
def test_compare_object_dtype(self, box_with_array, other_box):
pi = period_range("2000", periods=5)
parr = tm.box_expected(pi, box_with_array)
other = other_box(pi)
xbox = get_upcast_box(parr, other, True)
expected = np.array([True, True, True, True, True])
expected = tm.box_expected(expected, xbox)
result = parr == other
tm.assert_equal(result, expected)
result = parr <= other
tm.assert_equal(result, expected)
result = parr >= other
tm.assert_equal(result, expected)
result = parr != other
tm.assert_equal(result, ~expected)
result = parr < other
tm.assert_equal(result, ~expected)
result = parr > other
tm.assert_equal(result, ~expected)
other = other_box(pi[::-1])
expected = np.array([False, False, True, False, False])
expected = tm.box_expected(expected, xbox)
result = parr == other
tm.assert_equal(result, expected)
expected = np.array([True, True, True, False, False])
expected = tm.box_expected(expected, xbox)
result = parr <= other
tm.assert_equal(result, expected)
expected = np.array([False, False, True, True, True])
expected = tm.box_expected(expected, xbox)
result = parr >= other
tm.assert_equal(result, expected)
expected = np.array([True, True, False, True, True])
expected = tm.box_expected(expected, xbox)
result = parr != other
tm.assert_equal(result, expected)
expected = np.array([True, True, False, False, False])
expected = tm.box_expected(expected, xbox)
result = parr < other
tm.assert_equal(result, expected)
expected = np.array([False, False, False, True, True])
expected = tm.box_expected(expected, xbox)
result = parr > other
tm.assert_equal(result, expected)
| TestPeriodArrayLikeComparisons |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 321642,
"end": 322131
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of UnresolveReviewThread"""
__schema__ = github_schema
__field_names__ = ("thread_id", "client_mutation_id")
thread_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="threadId")
"""The ID of the thread to unresolve"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UnresolveReviewThreadInput |
python | dagster-io__dagster | python_modules/dagster/dagster/_grpc/types.py | {
"start": 27224,
"end": 28001
} | class ____(
NamedTuple(
"_CancelExecutionResult",
[
("success", bool),
("message", Optional[str]),
("serializable_error_info", Optional[SerializableErrorInfo]),
],
)
):
def __new__(
cls,
success: bool,
message: Optional[str],
serializable_error_info: Optional[SerializableErrorInfo],
):
return super().__new__(
cls,
success=check.bool_param(success, "success"),
message=check.opt_str_param(message, "message"),
serializable_error_info=check.opt_inst_param(
serializable_error_info, "serializable_error_info", SerializableErrorInfo
),
)
@whitelist_for_serdes
| CancelExecutionResult |
python | Lightning-AI__lightning | tests/tests_pytorch/models/test_hparams.py | {
"start": 18989,
"end": 19124
} | class ____(BoringModel):
def __init__(self, arg1):
super().__init__()
self.save_hyperparameters(arg1)
| AnotherArgModel |
python | getsentry__sentry | tests/sentry/digests/test_utilities.py | {
"start": 5281,
"end": 21047
} | class ____(TestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.user1 = self.create_user()
self.user2 = self.create_user()
self.user3 = self.create_user()
self.user4 = self.create_user()
self.user5 = self.create_user() # this user has no events
self.team1 = self.create_team()
self.team2 = self.create_team()
self.team3 = self.create_team()
self.project = self.create_project(
teams=[self.team1, self.team2, self.team3], fire_project_created=True
)
self.create_member(user=self.user1, organization=self.organization, teams=[self.team1])
self.create_member(user=self.user2, organization=self.organization, teams=[self.team2])
self.create_member(
user=self.user3, organization=self.organization, teams=[self.team1, self.team2]
)
self.create_member(user=self.user4, organization=self.organization, teams=[self.team3])
self.create_member(user=self.user5, organization=self.organization, teams=[self.team3])
self.team1_events = self.create_events_from_filenames(
self.project, ["hello.py", "goodbye.py", "hola.py", "adios.py"]
)
self.team2_events = self.create_events_from_filenames(
self.project, ["old.cbl", "retro.cbl", "cool.cbl", "gem.cbl"]
)
self.user4_events = [
self.store_event(
data={
"stacktrace": {"frames": [{"lineno": 1, "filename": "foo.bar"}]},
"request": {"url": "helloworld.org"},
"timestamp": before_now(minutes=1).isoformat(),
"fingerprint": ["user4group1"],
},
project_id=self.project.id,
),
self.store_event(
data={
"stacktrace": {"frames": [{"lineno": 1, "filename": "bar.foo"}]},
"request": {"url": "helloworld.org"},
"timestamp": before_now(minutes=1).isoformat(),
"fingerprint": ["user4group2"],
},
project_id=self.project.id,
),
]
self.team1_matcher = Matcher("path", "*.py")
self.team2_matcher = Matcher("path", "*.cbl")
self.user4_matcher = Matcher("url", "*.org")
self.project_ownership = ProjectOwnership.objects.create(
project_id=self.project.id,
schema=dump_schema(
[
Rule(
self.team1_matcher,
[Owner("team", self.team1.slug), Owner("user", self.user3.email)],
),
Rule(self.team2_matcher, [Owner("team", self.team2.slug)]),
Rule(self.user4_matcher, [Owner("user", self.user4.email)]),
]
),
fallthrough=True,
)
self.rule = self.create_project_rule()
# Represents the "original" rule during dual-write
self.shadow_rule = self.create_project_rule()
self.workflow = self.create_workflow(organization=self.organization)
self.rule_with_workflow_id = self.create_project_rule(
action_data=[{"workflow_id": self.workflow.id}]
)
self.rule_with_legacy_rule_id = self.create_project_rule(
action_data=[{"legacy_rule_id": self.shadow_rule.id}]
)
def create_events_from_filenames(
self, project: Project, filenames: Sequence[str] | None = None
) -> list[Event]:
return [
self.store_event(
data={
"stacktrace": {"frames": [{"filename": label}]},
"fingerprint": [label],
"timestamp": before_now(minutes=1).isoformat(),
},
project_id=project.id,
assert_no_errors=False,
)
for index, label in enumerate(filenames or [])
]
def test_simple(self) -> None:
records = [
event_to_record(event, (self.rule,))
for event in self.team1_events + self.team2_events + self.user4_events
]
digest = build_digest(self.project, sort_records(records))[0]
expected_result = {
self.user2.id: set(self.team2_events),
self.user3.id: set(self.team1_events + self.team2_events),
self.user4.id: set(self.user4_events),
}
assert_get_personalized_digests(self.project, digest, expected_result)
@with_feature("organizations:workflow-engine-ui-links")
def test_simple_with_workflow_id(self) -> None:
records = [
event_to_record(event, (self.rule_with_workflow_id,))
for event in self.team1_events + self.team2_events + self.user4_events
]
digest = build_digest(self.project, sort_records(records))[0]
expected_result = {
self.user2.id: set(self.team2_events),
self.user3.id: set(self.team1_events + self.team2_events),
self.user4.id: set(self.user4_events),
}
assert_get_personalized_digests(self.project, digest, expected_result)
@with_feature("organizations:workflow-engine-trigger-actions")
def test_simple_with_legacy_rule_id(self) -> None:
records = [
event_to_record(event, (self.rule_with_legacy_rule_id,))
for event in self.team1_events + self.team2_events + self.user4_events
]
digest = build_digest(self.project, sort_records(records))[0]
expected_result = {
self.user2.id: set(self.team2_events),
self.user3.id: set(self.team1_events + self.team2_events),
self.user4.id: set(self.user4_events),
}
assert_get_personalized_digests(self.project, digest, expected_result)
assert_rule_ids(digest, [self.shadow_rule.id])
def test_direct_email(self) -> None:
"""When the action type is not Issue Owners, then the target actor gets a digest."""
self.project_ownership.update(fallthrough=False)
records = [event_to_record(event, (self.rule,)) for event in self.team1_events]
digest = build_digest(self.project, sort_records(records))[0]
expected_result = {self.user1.id: set(self.team1_events)}
assert_get_personalized_digests(
self.project, digest, expected_result, ActionTargetType.MEMBER, self.user1.id
)
@with_feature("organizations:workflow-engine-ui-links")
def test_direct_email_with_workflow_id(self) -> None:
"""When the action type is not Issue Owners, then the target actor gets a digest. - Workflow ID"""
self.project_ownership.update(fallthrough=False)
records = [
event_to_record(event, (self.rule_with_workflow_id,)) for event in self.team1_events
]
digest = build_digest(self.project, sort_records(records))[0]
expected_result = {self.user1.id: set(self.team1_events)}
assert_get_personalized_digests(
self.project, digest, expected_result, ActionTargetType.MEMBER, self.user1.id
)
@with_feature("organizations:workflow-engine-trigger-actions")
def test_direct_email_with_legacy_rule_id(self) -> None:
"""When the action type is not Issue Owners, then the target actor gets a digest."""
self.project_ownership.update(fallthrough=False)
records = [
event_to_record(event, (self.rule_with_legacy_rule_id,)) for event in self.team1_events
]
digest = build_digest(self.project, sort_records(records))[0]
expected_result = {self.user1.id: set(self.team1_events)}
assert_get_personalized_digests(
self.project, digest, expected_result, ActionTargetType.MEMBER, self.user1.id
)
assert_rule_ids(digest, [self.shadow_rule.id])
def test_team_without_members(self) -> None:
team = self.create_team()
project = self.create_project(teams=[team], fire_project_created=True)
rule = self.create_project_rule(project)
ProjectOwnership.objects.create(
project_id=project.id,
schema=dump_schema([Rule(Matcher("path", "*.cpp"), [Owner("team", team.slug)])]),
fallthrough=True,
)
records = [
event_to_record(event, (rule,))
for event in self.create_events_from_filenames(
project, ["hello.py", "goodbye.py", "hola.py", "adios.py"]
)
]
digest = build_digest(project, sort_records(records))[0]
user_ids = [member.user_id for member in team.member_set]
assert not user_ids
participants_by_provider_by_event = get_participants_by_event(digest, project)
assert not {
actor for actors in participants_by_provider_by_event.values() for actor in actors
} # no users in this team no digests should be processed
@with_feature("organizations:workflow-engine-ui-links")
def test_team_without_members_with_workflow_id(self) -> None:
team = self.create_team()
project = self.create_project(teams=[team], fire_project_created=True)
ProjectOwnership.objects.create(
project_id=project.id,
schema=dump_schema([Rule(Matcher("path", "*.cpp"), [Owner("team", team.slug)])]),
fallthrough=True,
)
records = [
event_to_record(event, (self.rule_with_workflow_id,))
for event in self.create_events_from_filenames(
project, ["hello.py", "goodbye.py", "hola.py", "adios.py"]
)
]
digest = build_digest(project, sort_records(records))[0]
user_ids = [member.user_id for member in team.member_set]
assert not user_ids
participants_by_provider_by_event = get_participants_by_event(digest, project)
assert not {
actor for actors in participants_by_provider_by_event.values() for actor in actors
} # no users in this team no digests should be processed
@with_feature("organizations:workflow-engine-trigger-actions")
def test_team_without_members_with_legacy_rule_id(self) -> None:
team = self.create_team()
project = self.create_project(teams=[team], fire_project_created=True)
self.shadow_rule.project_id = project.id
self.shadow_rule.save()
rule = self.create_project_rule(
project, action_data=[{"legacy_rule_id": self.shadow_rule.id}]
)
ProjectOwnership.objects.create(
project_id=project.id,
schema=dump_schema([Rule(Matcher("path", "*.cpp"), [Owner("team", team.slug)])]),
fallthrough=True,
)
records = [
event_to_record(event, (rule,))
for event in self.create_events_from_filenames(
project, ["hello.py", "goodbye.py", "hola.py", "adios.py"]
)
]
digest = build_digest(project, sort_records(records))[0]
user_ids = [member.user_id for member in team.member_set]
assert not user_ids
participants_by_provider_by_event = get_participants_by_event(digest, project)
assert not {
actor for actors in participants_by_provider_by_event.values() for actor in actors
} # no users in this team no digests should be processed
assert_rule_ids(digest, [self.shadow_rule.id])
def test_only_everyone(self) -> None:
events = self.create_events_from_filenames(
self.project, ["hello.moz", "goodbye.moz", "hola.moz", "adios.moz"]
)
records = [event_to_record(event, (self.rule,)) for event in events]
digest = build_digest(self.project, sort_records(records))[0]
expected_result = {
self.user1.id: set(events),
self.user2.id: set(events),
self.user3.id: set(events),
self.user4.id: set(events),
self.user5.id: set(events),
}
assert_get_personalized_digests(self.project, digest, expected_result)
def test_everyone_with_owners(self) -> None:
events = self.create_events_from_filenames(
self.project, ["hello.moz", "goodbye.moz", "hola.moz", "adios.moz"]
)
records = [event_to_record(event, (self.rule,)) for event in events + self.team1_events]
digest = build_digest(self.project, sort_records(records))[0]
expected_result = {
self.user1.id: set(events),
self.user2.id: set(events),
self.user3.id: set(events + self.team1_events),
self.user4.id: set(events),
self.user5.id: set(events),
}
assert_get_personalized_digests(self.project, digest, expected_result)
@with_feature("organizations:workflow-engine-ui-links")
def test_everyone_with_owners_with_workflow_id(self) -> None:
events = self.create_events_from_filenames(
self.project, ["hello.moz", "goodbye.moz", "hola.moz", "adios.moz"]
)
records = [
event_to_record(event, (self.rule_with_workflow_id,))
for event in events + self.team1_events
]
digest = build_digest(self.project, sort_records(records))[0]
expected_result = {
self.user1.id: set(events),
self.user2.id: set(events),
self.user3.id: set(events + self.team1_events),
self.user4.id: set(events),
self.user5.id: set(events),
}
assert_get_personalized_digests(self.project, digest, expected_result)
@with_feature("organizations:workflow-engine-trigger-actions")
def test_everyone_with_owners_with_legacy_rule_id(self) -> None:
events = self.create_events_from_filenames(
self.project, ["hello.moz", "goodbye.moz", "hola.moz", "adios.moz"]
)
records = [
event_to_record(event, (self.rule_with_legacy_rule_id,))
for event in events + self.team1_events
]
digest = build_digest(self.project, sort_records(records))[0]
expected_result = {
self.user1.id: set(events),
self.user2.id: set(events),
self.user3.id: set(events + self.team1_events),
self.user4.id: set(events),
self.user5.id: set(events),
}
assert_get_personalized_digests(self.project, digest, expected_result)
@with_feature("organizations:workflow-engine-trigger-actions")
def test_simple_with_workflow_id_flag_off_fallback(self) -> None:
"""
Test that when workflow_ids are present but the feature flag is off,
it falls back to using the linked AlertRule ID via AlertRuleWorkflow.
"""
self.create_alert_rule_workflow(workflow=self.workflow, rule_id=self.shadow_rule.id)
with self.feature("organizations:workflow-engine-ui-links"):
records = [
event_to_record(event, (self.rule_with_workflow_id,))
for event in self.team1_events + self.team2_events + self.user4_events
]
digest = build_digest(self.project, sort_records(records))[0]
assert_rule_ids(digest, [self.shadow_rule.id])
expected_result = {
self.user2.id: set(self.team2_events),
self.user3.id: set(self.team1_events + self.team2_events),
self.user4.id: set(self.user4_events),
}
assert_get_personalized_digests(self.project, digest, expected_result)
def test_empty_records(self) -> None:
assert build_digest(self.project, []) == DigestInfo({}, {}, {})
| GetPersonalizedDigestsTestCase |
python | modin-project__modin | modin/core/execution/ray/common/deferred_execution.py | {
"start": 1432,
"end": 17381
} | class ____:
"""
Deferred execution task.
This class represents a single node in the execution tree. The input is either
an object reference or another node on which this node depends.
The output is calculated by the specified Callable.
If the input is a DeferredExecution node, it is executed first and the execution
output is used as the input for this one. All the executions are performed in a
single batch (i.e. using a single remote call) and the results are saved in all
the nodes that have multiple subscribers.
Parameters
----------
data : ObjectRefType or DeferredExecution
The execution input.
func : callable or ObjectRefType
A function to be executed.
args : list or tuple
Additional positional arguments to be passed in `func`.
kwargs : dict
Additional keyword arguments to be passed in `func`.
num_returns : int, optional
The number of the return values.
Attributes
----------
data : ObjectRefType or DeferredExecution
The execution input.
func : callable or ObjectRefType
A function to be executed.
args : list or tuple
Additional positional arguments to be passed in `func`.
kwargs : dict
Additional keyword arguments to be passed in `func`.
num_returns : int
The number of the return values.
flat_args : bool
True means that there are no lists or DeferredExecution objects in `args`.
In this case, no arguments processing is performed and `args` is passed
to the remote method as is.
flat_kwargs : bool
The same as `flat_args` but for the `kwargs` values.
"""
def __init__(
self,
data: Union[
ObjectRefType,
"DeferredExecution",
List[Union[ObjectRefType, "DeferredExecution"]],
],
func: Union[Callable, ObjectRefType],
args: Union[List[Any], Tuple[Any]],
kwargs: Dict[str, Any],
num_returns=1,
):
if isinstance(data, DeferredExecution):
data.subscribe()
self.data = data
self.func = func
self.args = args
self.kwargs = kwargs
self.num_returns = num_returns
self.flat_args = self._flat_args(args)
self.flat_kwargs = self._flat_args(kwargs.values())
self.subscribers = 0
@classmethod
def _flat_args(cls, args: Iterable):
"""
Check if the arguments list is flat and subscribe to all `DeferredExecution` objects.
Parameters
----------
args : Iterable
Returns
-------
bool
"""
flat = True
for arg in args:
if isinstance(arg, DeferredExecution):
flat = False
arg.subscribe()
elif isinstance(arg, ListOrTuple):
flat = False
cls._flat_args(arg)
return flat
def exec(
self,
) -> Tuple[ObjectRefOrListType, Union["MetaList", List], Union[int, List[int]]]:
"""
Execute this task, if required.
Returns
-------
tuple
The execution result, MetaList, containing the length, width and
the worker's ip address (the last value in the list) and the values
offset in the list. I.e. length = meta_list[offset],
width = meta_list[offset + 1], ip = meta_list[-1].
"""
if self.has_result:
return self.data, self.meta, self.meta_offset
if (
not isinstance(self.data, DeferredExecution)
and self.flat_args
and self.flat_kwargs
and self.num_returns == 1
):
result, length, width, ip = remote_exec_func.options(
resources=RayTaskCustomResources.get()
).remote(self.func, self.data, *self.args, **self.kwargs)
meta = MetaList([length, width, ip])
self._set_result(result, meta, 0)
return result, meta, 0
# If there are no subscribers, we still need the result here. We don't need to decrement
# it back. After the execution, the result is saved and the counter has no effect.
self.subscribers += 2
consumers, output = self._deconstruct()
# The last result is the MetaList, so adding +1 here.
num_returns = sum(c.num_returns for c in consumers) + 1
results = self._remote_exec_chain(num_returns, *output)
meta = MetaList(results.pop())
meta_offset = 0
results = iter(results)
for de in consumers:
if de.num_returns == 1:
de._set_result(next(results), meta, meta_offset)
meta_offset += 2
else:
res = list(islice(results, num_returns))
offsets = list(range(0, 2 * num_returns, 2))
de._set_result(res, meta, offsets)
meta_offset += 2 * num_returns
return self.data, self.meta, self.meta_offset
@property
def has_result(self):
"""
Return true if this task has already been executed and the result is set.
Returns
-------
bool
"""
return not hasattr(self, "func")
def subscribe(self):
"""
Increment the `subscribers` counter.
Subscriber is any instance that could trigger the execution of this task.
In case of a multiple subscribers, the execution could be triggerred multiple
times. To prevent the multiple executions, the execution result is returned
from the worker and saved in this instance. Subsequent calls to `execute()`
return the previously saved result.
"""
self.subscribers += 1
def unsubscribe(self):
"""Decrement the `subscribers` counter."""
self.subscribers -= 1
assert self.subscribers >= 0
def _deconstruct(self) -> Tuple[List["DeferredExecution"], List[Any]]:
"""
Convert the specified execution tree to a flat list.
This is required for the automatic Ray object references
materialization before passing the list to a Ray worker.
The format of the list is the following:
<input object> sequence<<function> <n><args> <n><kwargs> <ref> <nret>>...
If <n> before <args> is >= 0, then the next n objects are the function arguments.
If it is -1, it means that the method arguments contain list and/or
DeferredExecution (chain) objects. In this case the next values are read
one by one until `_Tag.END` is encountered. If the value is `_Tag.LIST`,
then the next sequence of values up to `_Tag.END` is converted to list.
If the value is `_Tag.CHAIN`, then the next sequence of values up to
`_Tag.END` has exactly the same format, as described here.
If the value is `_Tag.REF`, then the next value is a reference id, i.e.
the actual value should be retrieved by this id from the previously
saved objects. The <input object> could also be `_Tag.REF` or `_Tag.LIST`.
If <n> before <kwargs> is >=0, then the next 2*n values are the argument
names and values in the following format - [name1, value1, name2, value2...].
If it's -1, then the next values are converted to list in the same way as
<args> and the argument names are the next len(<args>) values.
<ref> is an integer reference id. If it's not 0, then there is another
chain referring to the execution result of this method and, thus, it must
be saved so that other chains could retrieve the object by the id.
<nret> field contains either the `num_returns` value or 0. If it's 0, the
execution result is not returned, but is just passed to the next task in the
chain. If it's 1, the result is returned as is. Otherwise, it's expected that
the result is iterable and the specified number of values is returned from
the iterator. The values lengths and widths are added to the meta list.
Returns
-------
tuple of list
* The first list is the result consumers.
If a DeferredExecution has multiple subscribers, the execution result
should be returned and saved in order to avoid duplicate executions.
These DeferredExecution tasks are added to this list and, after the
execution, the results are passed to the ``_set_result()`` method of
each task.
* The second is a flat list of arguments that could be passed to the remote executor.
"""
stack = []
result_consumers = []
output = []
# Using stack and generators to avoid the ``RecursionError``s.
stack.append(self._deconstruct_chain(self, output, stack, result_consumers))
while stack:
try:
gen = stack.pop()
next_gen = next(gen)
stack.append(gen)
stack.append(next_gen)
except StopIteration:
pass
return result_consumers, output
@classmethod
def _deconstruct_chain(
cls,
de: "DeferredExecution",
output: List,
stack: List,
result_consumers: List["DeferredExecution"],
):
"""
Deconstruct the specified DeferredExecution chain.
Parameters
----------
de : DeferredExecution
The chain to be deconstructed.
output : list
Put the arguments to this list.
stack : list
Used to eliminate recursive calls, that may lead to the RecursionError.
result_consumers : list of DeferredExecution
The result consumers.
Yields
------
Generator
The ``_deconstruct_list()`` generator.
"""
out_append = output.append
out_extend = output.extend
while True:
de.unsubscribe()
if (out_pos := getattr(de, "out_pos", None)) and not de.has_result:
out_append(_Tag.REF)
out_append(out_pos)
output[out_pos] = out_pos
if de.subscribers == 0:
# We may have subscribed to the same node multiple times.
# It could happen, for example, if it's passed to the args
# multiple times, or it's one of the parent nodes and also
# passed to the args. In this case, there are no multiple
# subscribers, and we don't need to return the result.
output[out_pos + 1] = 0
result_consumers.remove(de)
break
elif not isinstance(data := de.data, DeferredExecution):
if isinstance(data, ListOrTuple):
yield cls._deconstruct_list(
data, output, stack, result_consumers, out_append
)
else:
out_append(data)
if not de.has_result:
stack.append(de)
break
else:
stack.append(de)
de = data
while stack and isinstance(stack[-1], DeferredExecution):
de: DeferredExecution = stack.pop()
args = de.args
kwargs = de.kwargs
out_append(de.func)
if de.flat_args:
out_append(len(args))
out_extend(args)
else:
out_append(-1)
yield cls._deconstruct_list(
args, output, stack, result_consumers, out_append
)
if de.flat_kwargs:
out_append(len(kwargs))
for item in kwargs.items():
out_extend(item)
else:
out_append(-1)
yield cls._deconstruct_list(
kwargs.values(), output, stack, result_consumers, out_append
)
out_extend(kwargs)
out_append(0) # Placeholder for ref id
if de.subscribers > 0:
# Ref id. This is the index in the output list.
de.out_pos = len(output) - 1
result_consumers.append(de)
out_append(de.num_returns) # Return result for this node
else:
out_append(0) # Do not return result for this node
@classmethod
def _deconstruct_list(
cls,
lst: Iterable,
output: List,
stack: List,
result_consumers: List["DeferredExecution"],
out_append: Callable,
):
"""
Deconstruct the specified list.
Parameters
----------
lst : list
output : list
stack : list
result_consumers : list
out_append : Callable
The reference to the ``list.append()`` method.
Yields
------
Generator
Either ``_deconstruct_list()`` or ``_deconstruct_chain()`` generator.
"""
for obj in lst:
if isinstance(obj, DeferredExecution):
if out_pos := getattr(obj, "out_pos", None):
obj.unsubscribe()
if obj.has_result:
out_append(obj.data)
else:
out_append(_Tag.REF)
out_append(out_pos)
output[out_pos] = out_pos
if obj.subscribers == 0:
output[out_pos + 1] = 0
result_consumers.remove(obj)
else:
out_append(_Tag.CHAIN)
yield cls._deconstruct_chain(obj, output, stack, result_consumers)
out_append(_Tag.END)
elif isinstance(obj, ListOrTuple):
out_append(_Tag.LIST)
yield cls._deconstruct_list(
obj, output, stack, result_consumers, out_append
)
else:
out_append(obj)
out_append(_Tag.END)
@staticmethod
def _remote_exec_chain(num_returns: int, *args: Tuple) -> List[Any]:
"""
Execute the deconstructed chain in a worker process.
Parameters
----------
num_returns : int
The number of return values.
*args : tuple
A deconstructed chain to be executed.
Returns
-------
list
The execution results. The last element of this list is the ``MetaList``.
"""
# Prefer _remote_exec_single_chain(). It has fewer arguments and
# does not require the num_returns to be specified in options.
if num_returns == 2:
return _remote_exec_single_chain.options(
resources=RayTaskCustomResources.get()
).remote(*args)
else:
return _remote_exec_multi_chain.options(
num_returns=num_returns, resources=RayTaskCustomResources.get()
).remote(num_returns, *args)
def _set_result(
self,
result: ObjectRefOrListType,
meta: "MetaList",
meta_offset: Union[int, List[int]],
):
"""
Set the execution result.
Parameters
----------
result : ObjectRefOrListType
meta : MetaList
meta_offset : int or list of int
"""
del self.func, self.args, self.kwargs, self.flat_args, self.flat_kwargs
self.data = result
self.meta = meta
self.meta_offset = meta_offset
def __reduce__(self):
"""Not serializable."""
raise NotImplementedError("DeferredExecution is not serializable!")
| DeferredExecution |
python | scikit-learn__scikit-learn | sklearn/decomposition/_pca.py | {
"start": 3737,
"end": 34219
} | class ____(_BasePCA):
"""Principal component analysis (PCA).
Linear dimensionality reduction using Singular Value Decomposition of the
data to project it to a lower dimensional space. The input data is centered
but not scaled for each feature before applying the SVD.
It uses the LAPACK implementation of the full SVD or a randomized truncated
SVD by the method of Halko et al. 2009, depending on the shape of the input
data and the number of components to extract.
With sparse inputs, the ARPACK implementation of the truncated SVD can be
used (i.e. through :func:`scipy.sparse.linalg.svds`). Alternatively, one
may consider :class:`TruncatedSVD` where the data are not centered.
Notice that this class only supports sparse inputs for some solvers such as
"arpack" and "covariance_eigh". See :class:`TruncatedSVD` for an
alternative with sparse data.
For a usage example, see
:ref:`sphx_glr_auto_examples_decomposition_plot_pca_iris.py`
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, float or 'mle', default=None
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
If ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's
MLE is used to guess the dimension. Use of ``n_components == 'mle'``
will interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``.
If ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the
number of components such that the amount of variance that needs to be
explained is greater than the percentage specified by n_components.
If ``svd_solver == 'arpack'``, the number of components must be
strictly less than the minimum of n_features and n_samples.
Hence, the None case results in::
n_components == min(n_samples, n_features) - 1
copy : bool, default=True
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, default=False
When True (False by default) the `components_` vectors are multiplied
by the square root of n_samples and then divided by the singular values
to ensure uncorrelated outputs with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
svd_solver : {'auto', 'full', 'covariance_eigh', 'arpack', 'randomized'},\
default='auto'
"auto" :
The solver is selected by a default 'auto' policy is based on `X.shape` and
`n_components`: if the input data has fewer than 1000 features and
more than 10 times as many samples, then the "covariance_eigh"
solver is used. Otherwise, if the input data is larger than 500x500
and the number of components to extract is lower than 80% of the
smallest dimension of the data, then the more efficient
"randomized" method is selected. Otherwise the exact "full" SVD is
computed and optionally truncated afterwards.
"full" :
Run exact full SVD calling the standard LAPACK solver via
`scipy.linalg.svd` and select the components by postprocessing
"covariance_eigh" :
Precompute the covariance matrix (on centered data), run a
classical eigenvalue decomposition on the covariance matrix
typically using LAPACK and select the components by postprocessing.
This solver is very efficient for n_samples >> n_features and small
n_features. It is, however, not tractable otherwise for large
n_features (large memory footprint required to materialize the
covariance matrix). Also note that compared to the "full" solver,
this solver effectively doubles the condition number and is
therefore less numerical stable (e.g. on input data with a large
range of singular values).
"arpack" :
Run SVD truncated to `n_components` calling ARPACK solver via
`scipy.sparse.linalg.svds`. It requires strictly
`0 < n_components < min(X.shape)`
"randomized" :
Run randomized SVD by the method of Halko et al.
.. versionadded:: 0.18.0
.. versionchanged:: 1.5
Added the 'covariance_eigh' solver.
tol : float, default=0.0
Tolerance for singular values computed by svd_solver == 'arpack'.
Must be of range [0.0, infinity).
.. versionadded:: 0.18.0
iterated_power : int or 'auto', default='auto'
Number of iterations for the power method computed by
svd_solver == 'randomized'.
Must be of range [0, infinity).
.. versionadded:: 0.18.0
n_oversamples : int, default=10
This parameter is only relevant when `svd_solver="randomized"`.
It corresponds to the additional number of random vectors to sample the
range of `X` so as to ensure proper conditioning. See
:func:`~sklearn.utils.extmath.randomized_svd` for more details.
.. versionadded:: 1.1
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Power iteration normalizer for randomized SVD solver.
Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd`
for more details.
.. versionadded:: 1.1
random_state : int, RandomState instance or None, default=None
Used when the 'arpack' or 'randomized' solvers are used. Pass an int
for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
.. versionadded:: 0.18.0
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Principal axes in feature space, representing the directions of
maximum variance in the data. Equivalently, the right singular
vectors of the centered input data, parallel to its eigenvectors.
The components are sorted by decreasing ``explained_variance_``.
explained_variance_ : ndarray of shape (n_components,)
The amount of variance explained by each of the selected components.
The variance estimation uses `n_samples - 1` degrees of freedom.
Equal to n_components largest eigenvalues
of the covariance matrix of X.
.. versionadded:: 0.18
explained_variance_ratio_ : ndarray of shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of the ratios is equal to 1.0.
singular_values_ : ndarray of shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
.. versionadded:: 0.19
mean_ : ndarray of shape (n_features,)
Per-feature empirical mean, estimated from the training set.
Equal to `X.mean(axis=0)`.
n_components_ : int
The estimated number of components. When n_components is set
to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this
number is estimated from input data. Otherwise it equals the parameter
n_components, or the lesser value of n_features and n_samples
if n_components is None.
n_samples_ : int
Number of samples in the training data.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
compute the estimated data covariance and score samples.
Equal to the average of (min(n_features, n_samples) - n_components)
smallest eigenvalues of the covariance matrix of X.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
KernelPCA : Kernel Principal Component Analysis.
SparsePCA : Sparse Principal Component Analysis.
TruncatedSVD : Dimensionality reduction using truncated SVD.
IncrementalPCA : Incremental Principal Component Analysis.
References
----------
For n_components == 'mle', this class uses the method from:
`Minka, T. P.. "Automatic choice of dimensionality for PCA".
In NIPS, pp. 598-604 <https://tminka.github.io/papers/pca/minka-pca.pdf>`_
Implements the probabilistic PCA model from:
`Tipping, M. E., and Bishop, C. M. (1999). "Probabilistic principal
component analysis". Journal of the Royal Statistical Society:
Series B (Statistical Methodology), 61(3), 611-622.
<http://www.miketipping.com/papers/met-mppca.pdf>`_
via the score and score_samples methods.
For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`.
For svd_solver == 'randomized', see:
:doi:`Halko, N., Martinsson, P. G., and Tropp, J. A. (2011).
"Finding structure with randomness: Probabilistic algorithms for
constructing approximate matrix decompositions".
SIAM review, 53(2), 217-288.
<10.1137/090771806>`
and also
:doi:`Martinsson, P. G., Rokhlin, V., and Tygert, M. (2011).
"A randomized algorithm for the decomposition of matrices".
Applied and Computational Harmonic Analysis, 30(1), 47-68.
<10.1016/j.acha.2010.02.003>`
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(n_components=2)
>>> print(pca.explained_variance_ratio_)
[0.9924 0.0075]
>>> print(pca.singular_values_)
[6.30061 0.54980]
>>> pca = PCA(n_components=2, svd_solver='full')
>>> pca.fit(X)
PCA(n_components=2, svd_solver='full')
>>> print(pca.explained_variance_ratio_)
[0.9924 0.00755]
>>> print(pca.singular_values_)
[6.30061 0.54980]
>>> pca = PCA(n_components=1, svd_solver='arpack')
>>> pca.fit(X)
PCA(n_components=1, svd_solver='arpack')
>>> print(pca.explained_variance_ratio_)
[0.99244]
>>> print(pca.singular_values_)
[6.30061]
"""
_parameter_constraints: dict = {
"n_components": [
Interval(Integral, 0, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="neither"),
StrOptions({"mle"}),
None,
],
"copy": ["boolean"],
"whiten": ["boolean"],
"svd_solver": [
StrOptions({"auto", "full", "covariance_eigh", "arpack", "randomized"})
],
"tol": [Interval(Real, 0, None, closed="left")],
"iterated_power": [
StrOptions({"auto"}),
Interval(Integral, 0, None, closed="left"),
],
"n_oversamples": [Interval(Integral, 1, None, closed="left")],
"power_iteration_normalizer": [StrOptions({"auto", "QR", "LU", "none"})],
"random_state": ["random_state"],
}
def __init__(
self,
n_components=None,
*,
copy=True,
whiten=False,
svd_solver="auto",
tol=0.0,
iterated_power="auto",
n_oversamples=10,
power_iteration_normalizer="auto",
random_state=None,
):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
self.svd_solver = svd_solver
self.tol = tol
self.iterated_power = iterated_power
self.n_oversamples = n_oversamples
self.power_iteration_normalizer = power_iteration_normalizer
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Ignored.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Ignored.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed values.
Notes
-----
This method returns a Fortran-ordered array. To convert it to a
C-ordered array, use 'np.ascontiguousarray'.
"""
U, S, _, X, x_is_centered, xp = self._fit(X)
if U is not None:
U = U[:, : self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0] - 1)
else:
# X_new = X * V = U * S * Vt * V = U * S
U *= S[: self.n_components_]
return U
else: # solver="covariance_eigh" does not compute U at fit time.
return self._transform(X, xp, x_is_centered=x_is_centered)
def _fit(self, X):
"""Dispatch to the right submethod depending on the chosen solver."""
xp, is_array_api_compliant = get_namespace(X)
# Raise an error for sparse input and unsupported svd_solver
if issparse(X) and self.svd_solver not in ["auto", "arpack", "covariance_eigh"]:
raise TypeError(
'PCA only support sparse inputs with the "arpack" and'
f' "covariance_eigh" solvers, while "{self.svd_solver}" was passed. See'
" TruncatedSVD for a possible alternative."
)
if self.svd_solver == "arpack" and is_array_api_compliant:
raise ValueError(
"PCA with svd_solver='arpack' is not supported for Array API inputs."
)
# Validate the data, without ever forcing a copy as any solver that
# supports sparse input data and the `covariance_eigh` solver are
# written in a way to avoid the need for any inplace modification of
# the input data contrary to the other solvers.
# The copy will happen
# later, only if needed, once the solver negotiation below is done.
X = validate_data(
self,
X,
dtype=[xp.float64, xp.float32],
force_writeable=True,
accept_sparse=("csr", "csc"),
ensure_2d=True,
copy=False,
)
self._fit_svd_solver = self.svd_solver
if self._fit_svd_solver == "auto" and issparse(X):
self._fit_svd_solver = "arpack"
if self.n_components is None:
if self._fit_svd_solver != "arpack":
n_components = min(X.shape)
else:
n_components = min(X.shape) - 1
else:
n_components = self.n_components
if self._fit_svd_solver == "auto":
# Tall and skinny problems are best handled by precomputing the
# covariance matrix.
if X.shape[1] <= 1_000 and X.shape[0] >= 10 * X.shape[1]:
self._fit_svd_solver = "covariance_eigh"
# Small problem or n_components == 'mle', just call full PCA
elif max(X.shape) <= 500 or n_components == "mle":
self._fit_svd_solver = "full"
elif 1 <= n_components < 0.8 * min(X.shape):
self._fit_svd_solver = "randomized"
# This is also the case of n_components in (0, 1)
else:
self._fit_svd_solver = "full"
# Call different fits for either full or truncated SVD
if self._fit_svd_solver in ("full", "covariance_eigh"):
return self._fit_full(X, n_components, xp, is_array_api_compliant)
elif self._fit_svd_solver in ["arpack", "randomized"]:
return self._fit_truncated(X, n_components, xp)
def _fit_full(self, X, n_components, xp, is_array_api_compliant):
"""Fit the model by computing full SVD on X."""
n_samples, n_features = X.shape
if n_components == "mle":
if n_samples < n_features:
raise ValueError(
"n_components='mle' is only supported if n_samples >= n_features"
)
elif not 0 <= n_components <= min(n_samples, n_features):
raise ValueError(
f"n_components={n_components} must be between 0 and "
f"min(n_samples, n_features)={min(n_samples, n_features)} with "
f"svd_solver={self._fit_svd_solver!r}"
)
self.mean_ = xp.mean(X, axis=0)
# When X is a scipy sparse matrix, self.mean_ is a numpy matrix, so we need
# to transform it to a 1D array. Note that this is not the case when X
# is a scipy sparse array.
# TODO: remove the following two lines when scikit-learn only depends
# on scipy versions that no longer support scipy.sparse matrices.
self.mean_ = xp.reshape(xp.asarray(self.mean_), (-1,))
if self._fit_svd_solver == "full":
X_centered = xp.asarray(X, copy=True) if self.copy else X
X_centered -= self.mean_
x_is_centered = not self.copy
if not is_array_api_compliant:
# Use scipy.linalg with NumPy/SciPy inputs for the sake of not
# introducing unanticipated behavior changes. In the long run we
# could instead decide to always use xp.linalg.svd for all inputs,
# but that would make this code rely on numpy's SVD instead of
# scipy's. It's not 100% clear whether they use the same LAPACK
# solver by default though (assuming both are built against the
# same BLAS).
U, S, Vt = linalg.svd(X_centered, full_matrices=False)
else:
U, S, Vt = xp.linalg.svd(X_centered, full_matrices=False)
explained_variance_ = (S**2) / (n_samples - 1)
else:
assert self._fit_svd_solver == "covariance_eigh"
# In the following, we center the covariance matrix C afterwards
# (without centering the data X first) to avoid an unnecessary copy
# of X. Note that the mean_ attribute is still needed to center
# test data in the transform method.
#
# Note: at the time of writing, `xp.cov` does not exist in the
# Array API standard:
# https://github.com/data-apis/array-api/issues/43
#
# Besides, using `numpy.cov`, as of numpy 1.26.0, would not be
# memory efficient for our use case when `n_samples >> n_features`:
# `numpy.cov` centers a copy of the data before computing the
# matrix product instead of subtracting a small `(n_features,
# n_features)` square matrix from the gram matrix X.T @ X, as we do
# below.
x_is_centered = False
C = X.T @ X
C -= (
n_samples
* xp.reshape(self.mean_, (-1, 1))
* xp.reshape(self.mean_, (1, -1))
)
C /= n_samples - 1
eigenvals, eigenvecs = xp.linalg.eigh(C)
# When X is a scipy sparse matrix, the following two datastructures
# are returned as instances of the soft-deprecated numpy.matrix
# class. Note that this problem does not occur when X is a scipy
# sparse array (or another other kind of supported array).
# TODO: remove the following two lines when scikit-learn only
# depends on scipy versions that no longer support scipy.sparse
# matrices.
eigenvals = xp.reshape(xp.asarray(eigenvals), (-1,))
eigenvecs = xp.asarray(eigenvecs)
eigenvals = xp.flip(eigenvals, axis=0)
eigenvecs = xp.flip(eigenvecs, axis=1)
# The covariance matrix C is positive semi-definite by
# construction. However, the eigenvalues returned by xp.linalg.eigh
# can be slightly negative due to numerical errors. This would be
# an issue for the subsequent sqrt, hence the manual clipping.
eigenvals[eigenvals < 0.0] = 0.0
explained_variance_ = eigenvals
# Re-construct SVD of centered X indirectly and make it consistent
# with the other solvers.
S = xp.sqrt(eigenvals * (n_samples - 1))
Vt = eigenvecs.T
U = None
# flip eigenvectors' sign to enforce deterministic output
U, Vt = svd_flip(U, Vt, u_based_decision=False)
components_ = Vt
# Get variance explained by singular values
total_var = xp.sum(explained_variance_)
explained_variance_ratio_ = explained_variance_ / total_var
singular_values_ = xp.asarray(S, copy=True) # Store the singular values.
# Postprocess the number of components required
if n_components == "mle":
n_components = _infer_dimension(explained_variance_, n_samples)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
# side='right' ensures that number of features selected
# their variance is always greater than n_components float
# passed. More discussion in issue: #15669
ratio_cumsum = xp.cumulative_sum(explained_variance_ratio_)
n_components = (
xp.searchsorted(
ratio_cumsum,
xp.asarray(n_components, device=device(ratio_cumsum)),
side="right",
)
+ 1
)
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = xp.mean(explained_variance_[n_components:])
else:
self.noise_variance_ = 0.0
self.n_samples_ = n_samples
self.n_components_ = n_components
# Assign a copy of the result of the truncation of the components in
# order to:
# - release the memory used by the discarded components,
# - ensure that the kept components are allocated contiguously in
# memory to make the transform method faster by leveraging cache
# locality.
self.components_ = xp.asarray(components_[:n_components, :], copy=True)
# We do the same for the other arrays for the sake of consistency.
self.explained_variance_ = xp.asarray(
explained_variance_[:n_components], copy=True
)
self.explained_variance_ratio_ = xp.asarray(
explained_variance_ratio_[:n_components], copy=True
)
self.singular_values_ = xp.asarray(singular_values_[:n_components], copy=True)
return U, S, Vt, X, x_is_centered, xp
def _fit_truncated(self, X, n_components, xp):
"""Fit the model by computing truncated SVD (by ARPACK or randomized)
on X.
"""
n_samples, n_features = X.shape
svd_solver = self._fit_svd_solver
if isinstance(n_components, str):
raise ValueError(
"n_components=%r cannot be a string with svd_solver='%s'"
% (n_components, svd_solver)
)
elif not 1 <= n_components <= min(n_samples, n_features):
raise ValueError(
"n_components=%r must be between 1 and "
"min(n_samples, n_features)=%r with "
"svd_solver='%s'"
% (n_components, min(n_samples, n_features), svd_solver)
)
elif svd_solver == "arpack" and n_components == min(n_samples, n_features):
raise ValueError(
"n_components=%r must be strictly less than "
"min(n_samples, n_features)=%r with "
"svd_solver='%s'"
% (n_components, min(n_samples, n_features), svd_solver)
)
random_state = check_random_state(self.random_state)
# Center data
total_var = None
if issparse(X):
self.mean_, var = mean_variance_axis(X, axis=0)
total_var = var.sum() * n_samples / (n_samples - 1) # ddof=1
X_centered = _implicit_column_offset(X, self.mean_)
x_is_centered = False
else:
self.mean_ = xp.mean(X, axis=0)
X_centered = xp.asarray(X, copy=True) if self.copy else X
X_centered -= self.mean_
x_is_centered = not self.copy
if svd_solver == "arpack":
v0 = _init_arpack_v0(min(X.shape), random_state)
U, S, Vt = svds(X_centered, k=n_components, tol=self.tol, v0=v0)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
S = S[::-1]
# flip eigenvectors' sign to enforce deterministic output
U, Vt = svd_flip(U[:, ::-1], Vt[::-1], u_based_decision=False)
elif svd_solver == "randomized":
# sign flipping is done inside
U, S, Vt = _randomized_svd(
X_centered,
n_components=n_components,
n_oversamples=self.n_oversamples,
n_iter=self.iterated_power,
power_iteration_normalizer=self.power_iteration_normalizer,
flip_sign=False,
random_state=random_state,
)
U, Vt = svd_flip(U, Vt, u_based_decision=False)
self.n_samples_ = n_samples
self.components_ = Vt
self.n_components_ = n_components
# Get variance explained by singular values
self.explained_variance_ = (S**2) / (n_samples - 1)
# Workaround in-place variance calculation since at the time numpy
# did not have a way to calculate variance in-place.
#
# TODO: update this code to either:
# * Use the array-api variance calculation, unless memory usage suffers
# * Update sklearn.utils.extmath._incremental_mean_and_var to support array-api
# See: https://github.com/scikit-learn/scikit-learn/pull/18689#discussion_r1335540991
if total_var is None:
N = X.shape[0] - 1
X_centered **= 2
total_var = xp.sum(X_centered) / N
self.explained_variance_ratio_ = self.explained_variance_ / total_var
self.singular_values_ = xp.asarray(S, copy=True) # Store the singular values.
if self.n_components_ < min(n_features, n_samples):
self.noise_variance_ = total_var - xp.sum(self.explained_variance_)
self.noise_variance_ /= min(n_features, n_samples) - n_components
else:
self.noise_variance_ = 0.0
return U, S, Vt, X, x_is_centered, xp
def score_samples(self, X):
"""Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
Returns
-------
ll : ndarray of shape (n_samples,)
Log-likelihood of each sample under the current model.
"""
check_is_fitted(self)
xp, _ = get_namespace(X)
X = validate_data(self, X, dtype=[xp.float64, xp.float32], reset=False)
Xr = X - self.mean_
n_features = X.shape[1]
precision = self.get_precision()
log_like = -0.5 * xp.sum(Xr * (Xr @ precision), axis=1)
log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
y : Ignored
Ignored.
Returns
-------
ll : float
Average log-likelihood of the samples under the current model.
"""
xp, _ = get_namespace(X)
return float(xp.mean(self.score_samples(X)))
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
solver = getattr(self, "_fit_svd_solver", self.svd_solver)
tags.array_api_support = solver not in ["arpack", "randomized"] or (
solver == "randomized" and self.power_iteration_normalizer == "QR"
)
tags.input_tags.sparse = self.svd_solver in (
"auto",
"arpack",
"covariance_eigh",
)
return tags
| PCA |
python | pytorch__pytorch | test/test_maskedtensor.py | {
"start": 35072,
"end": 40104
} | class ____(TestCase):
def _convert_mt_args(self, args, mask, layout):
return [
masked_tensor(
arg.sparse_mask(mask) if layout != torch.strided else arg, mask
)
if torch.is_tensor(arg)
else arg
for arg in args
]
def _test_unary_binary_equality(self, device, dtype, op, layout=torch.strided):
samples = op.sample_inputs(device, dtype, requires_grad=True)
for sample in samples:
input = sample.input
sample_args, sample_kwargs = sample.args, sample.kwargs
mask = (
_create_random_mask(input.shape, device)
if "mask" not in sample_kwargs
else sample_kwargs.pop("mask")
)
if layout == torch.sparse_coo:
mask = mask.to_sparse_coo().coalesce()
input = input.sparse_mask(mask)
elif layout == torch.sparse_csr:
if input.ndim != 2 or mask.ndim != 2:
continue
mask = mask.to_sparse_csr()
input = input.sparse_mask(mask)
# Binary operations currently only support same size masks
if is_binary(op):
if input.shape != sample_args[0].shape:
continue
# Binary operations also don't support kwargs right now
else:
sample_kwargs = {}
mt = masked_tensor(input, mask)
mt_args = self._convert_mt_args(sample_args, mask, layout)
mt_result = op(mt, *mt_args, **sample_kwargs)
t_result = op(sample.input, *sample_args, **sample_kwargs)
_compare_mt_t(mt_result, t_result)
# If the operation is binary, check that lhs = masked, rhs = regular tensor also works
if is_binary(op) and layout == torch.strided:
mt_result2 = op(mt, *sample_args, **sample_kwargs)
_compare_mt_t(mt_result2, t_result)
def _test_reduction_equality(self, device, dtype, op, layout=torch.strided):
samples = op.sample_inputs(device, dtype, requires_grad=True)
for sample in samples:
input = sample.input
# Reduction operations don't support more advanced args/kwargs right now
sample_args, sample_kwargs = (), {}
if input.dim() == 0 or input.numel() == 0:
continue
mask = _create_random_mask(input.shape, device)
if torch.count_nonzero(mask) == 0:
continue
tensor_input = _combine_input_and_mask(op.op, input, mask)
if layout == torch.sparse_coo:
mask = mask.to_sparse_coo().coalesce()
input = input.sparse_mask(mask)
elif layout == torch.sparse_csr:
if input.ndim != 2 or mask.ndim != 2:
continue
mask = mask.to_sparse_csr()
input = input.sparse_mask(mask)
mt = masked_tensor(input, mask)
mt_args = self._convert_mt_args(sample_args, mask, layout)
mt_result = op(mt, *mt_args, **sample_kwargs)
t_result = op(tensor_input, *sample_args, **sample_kwargs)
_compare_mt_t(mt_result, t_result)
@ops(mt_unary_ufuncs, allowed_dtypes=MASKEDTENSOR_FLOAT_TYPES) # type: ignore[arg-type]
@parametrize("layout", [torch.strided, torch.sparse_coo, torch.sparse_csr])
def test_unary_core(self, device, dtype, op, layout):
# Skip tests that don't have len(kwargs) == 0
skip_variants = {
"decimals_0",
"decimals_3",
"decimals_neg_3",
}
if op.name == "round" and op.variant_test_name in skip_variants:
return
self._test_unary_binary_equality(device, dtype, op)
@ops(mt_binary_ufuncs, allowed_dtypes=MASKEDTENSOR_FLOAT_TYPES) # type: ignore[arg-type]
@parametrize("layout", [torch.strided, torch.sparse_coo, torch.sparse_csr])
def test_binary_core(self, device, dtype, op, layout):
self._test_unary_binary_equality(device, dtype, op, layout)
@ops(mt_reduction_ufuncs, allowed_dtypes=MASKEDTENSOR_FLOAT_TYPES) # type: ignore[arg-type]
@parametrize("layout", [torch.strided, torch.sparse_coo, torch.sparse_csr])
def test_reduction_all(self, device, dtype, op, layout):
# argmin and argmax are not currently supported for torch.sparse_csr
if op.name in {"argmin", "argmax"} and layout == torch.sparse_csr:
return
self._test_reduction_equality(device, dtype, op, layout)
only_for = ("cpu", "cuda")
instantiate_device_type_tests(TestOperators, globals(), only_for=only_for)
instantiate_device_type_tests(TestBasics, globals(), only_for=only_for)
instantiate_parametrized_tests(TestUnary)
instantiate_parametrized_tests(TestBinary)
instantiate_parametrized_tests(TestReductions)
if __name__ == '__main__':
run_tests()
| TestOperators |
python | kamyu104__LeetCode-Solutions | Python/employee-free-time.py | {
"start": 222,
"end": 903
} | class ____(object):
def employeeFreeTime(self, schedule):
"""
:type schedule: List[List[Interval]]
:rtype: List[Interval]
"""
result = []
min_heap = [(emp[0].start, eid, 0) for eid, emp in enumerate(schedule)]
heapq.heapify(min_heap)
last_end = -1
while min_heap:
t, eid, i = heapq.heappop(min_heap)
if 0 <= last_end < t:
result.append(Interval(last_end, t))
last_end = max(last_end, schedule[eid][i].end)
if i+1 < len(schedule[eid]):
heapq.heappush(min_heap, (schedule[eid][i+1].start, eid, i+1))
return result
| Solution |
python | PyCQA__pylint | tests/functional/r/redefined/redefined_slots.py | {
"start": 1059,
"end": 1165
} | class ____:
"""No crash when the type of the slot is not a Const or a str"""
__slots__ = [str]
| MyClass |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/instigation_logger.py | {
"start": 3185,
"end": 7396
} | class ____(logging.Logger):
"""Logger exposed on the evaluation context of sensor/schedule evaluation functions. This is tied
to the Python logging system by setting up a custom logging handler that writes JSON payloads
representing the log events to the dagster-managed captured log manager. These log events are
persisted, using the given log_key, which is stored on the sensor/schedule tick. These logs can
then be retrieved using the log_key through captured log manager's API.
The instigation logger also adds a console logger to emit the logs in a structured way from the
evaluation process.
"""
def __init__(
self,
log_key: Optional[Sequence[str]] = None,
instance: Optional["DagsterInstance"] = None,
repository_name: Optional[str] = None,
instigator_name: Optional[str] = None,
level: int = logging.NOTSET,
logger_name: str = "dagster",
console_logger: Optional[logging.Logger] = None,
):
super().__init__(name=logger_name, level=coerce_valid_log_level(level))
self._log_key = log_key
self._instance = instance
self._repository_name = repository_name
self._instigator_name = instigator_name
self._exit_stack = ExitStack()
self._capture_handler = None
if console_logger is None:
console_logger = create_console_logger("dagster", logging.INFO)
self.addHandler(DispatchingLogHandler([console_logger]))
def __enter__(self):
if (
self._log_key
and self._instance
and isinstance(self._instance.compute_log_manager, ComputeLogManager)
):
try:
write_stream = self._exit_stack.enter_context(
self._instance.compute_log_manager.open_log_stream(
self._log_key, ComputeIOType.STDERR
)
)
except Exception:
sys.stderr.write(
f"Exception initializing logger write stream: {serializable_error_info_from_exc_info(sys.exc_info())}\n"
)
write_stream = None
if write_stream:
self._capture_handler = CapturedLogHandler(write_stream)
self.addHandler(self._capture_handler)
return self
def __exit__(self, _exception_type, _exception_value, _traceback):
try:
self._exit_stack.close()
except Exception:
sys.stderr.write(
f"Exception closing logger write stream: {serializable_error_info_from_exc_info(sys.exc_info())}\n"
)
def _annotate_record(self, record: logging.LogRecord) -> logging.LogRecord:
if self._repository_name and self._instigator_name:
message = record.getMessage()
setattr(
record,
LOG_RECORD_METADATA_ATTR,
{
"repository_name": self._repository_name,
"name": self._instigator_name,
"orig_message": message,
},
)
record.msg = " - ".join([self._repository_name, self._instigator_name, message])
record.args = tuple()
return record
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func, extra, sinfo): # pyright: ignore[reportIncompatibleMethodOverride]
record = super().makeRecord(name, level, fn, lno, msg, args, exc_info, func, extra, sinfo)
return self._annotate_record(record)
def has_captured_logs(self):
return self._capture_handler and self._capture_handler.has_logged
def get_instigation_log_records(
instance: "DagsterInstance", log_key: Sequence[str]
) -> Sequence[Mapping[str, Any]]:
log_data = instance.compute_log_manager.get_log_data(log_key)
raw_logs = log_data.stderr.decode("utf-8") if log_data.stderr else ""
records = []
for line in raw_logs.split("\n"):
if not line:
continue
try:
records.append(seven.json.loads(line))
except json.JSONDecodeError:
continue
return records
| InstigationLogger |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/tests/simple_test_envs.py | {
"start": 793,
"end": 10184
} | class ____(BaseEnv):
"""
Very simple "game" - the agent has a position on [-1, 1], gets a reward of 1 if it reaches 1, and a reward of -1 if
it reaches -1. The position is incremented by the action amount (clamped to [-step_size, step_size]).
"""
def __init__(
self,
brain_names,
step_size=STEP_SIZE,
num_visual=0,
num_vector=1,
num_var_len=0,
vis_obs_size=VIS_OBS_SIZE,
vec_obs_size=OBS_SIZE,
var_len_obs_size=VAR_LEN_SIZE,
action_sizes=(1, 0),
goal_indices=None,
):
super().__init__()
self.num_visual = num_visual
self.num_vector = num_vector
self.num_var_len = num_var_len
self.vis_obs_size = vis_obs_size
self.vec_obs_size = vec_obs_size
self.var_len_obs_size = var_len_obs_size
self.goal_indices = goal_indices
continuous_action_size, discrete_action_size = action_sizes
discrete_tuple = tuple(2 for _ in range(discrete_action_size))
action_spec = ActionSpec(continuous_action_size, discrete_tuple)
self.total_action_size = (
continuous_action_size + discrete_action_size
) # to set the goals/positions
self.action_spec = action_spec
self.behavior_spec = BehaviorSpec(self._make_observation_specs(), action_spec)
self.action_spec = action_spec
self.names = brain_names
self.positions: Dict[str, List[float]] = {}
self.step_count: Dict[str, float] = {}
# Concatenate the arguments for a consistent random seed
seed = (
brain_names,
step_size,
num_visual,
num_vector,
num_var_len,
vis_obs_size,
vec_obs_size,
var_len_obs_size,
action_sizes,
)
self.random = random.Random(str(seed))
self.goal: Dict[str, int] = {}
self.action = {}
self.rewards: Dict[str, float] = {}
self.final_rewards: Dict[str, List[float]] = {}
self.step_result: Dict[str, Tuple[DecisionSteps, TerminalSteps]] = {}
self.agent_id: Dict[str, int] = {}
self.step_size = step_size # defines the difficulty of the test
# Allow to be used as a UnityEnvironment during tests
self.academy_capabilities = None
for name in self.names:
self.agent_id[name] = 0
self.goal[name] = self.random.choice([-1, 1])
self.rewards[name] = 0
self.final_rewards[name] = []
self._reset_agent(name)
self.action[name] = None
self.step_result[name] = None
def _make_observation_specs(self) -> List[ObservationSpec]:
obs_shape: List[Any] = []
for _ in range(self.num_vector):
obs_shape.append((self.vec_obs_size,))
for _ in range(self.num_visual):
obs_shape.append(self.vis_obs_size)
for _ in range(self.num_var_len):
obs_shape.append(self.var_len_obs_size)
obs_spec = create_observation_specs_with_shapes(obs_shape)
if self.goal_indices is not None:
for i in range(len(obs_spec)):
if i in self.goal_indices:
obs_spec[i] = ObservationSpec(
shape=obs_spec[i].shape,
dimension_property=obs_spec[i].dimension_property,
observation_type=ObservationType.GOAL_SIGNAL,
name=obs_spec[i].name,
)
return obs_spec
def _make_obs(self, value: float) -> List[np.ndarray]:
obs = []
for _ in range(self.num_vector):
obs.append(np.ones((1, self.vec_obs_size), dtype=np.float32) * value)
for _ in range(self.num_visual):
obs.append(np.ones((1,) + self.vis_obs_size, dtype=np.float32) * value)
for _ in range(self.num_var_len):
obs.append(np.ones((1,) + self.var_len_obs_size, dtype=np.float32) * value)
return obs
@property
def behavior_specs(self):
behavior_dict = {}
for n in self.names:
behavior_dict[n] = self.behavior_spec
return BehaviorMapping(behavior_dict)
def set_action_for_agent(self, behavior_name, agent_id, action):
pass
def set_actions(self, behavior_name, action):
self.action[behavior_name] = action
def get_steps(self, behavior_name):
return self.step_result[behavior_name]
def _take_action(self, name: str) -> bool:
deltas = []
_act = self.action[name]
if self.action_spec.continuous_size > 0:
for _cont in _act.continuous[0]:
deltas.append(_cont)
if self.action_spec.discrete_size > 0:
for _disc in _act.discrete[0]:
deltas.append(1 if _disc else -1)
for i, _delta in enumerate(deltas):
_delta = clamp(_delta, -self.step_size, self.step_size)
self.positions[name][i] += _delta
self.positions[name][i] = clamp(self.positions[name][i], -1, 1)
self.step_count[name] += 1
# Both must be in 1.0 to be done
done = all(pos >= 1.0 or pos <= -1.0 for pos in self.positions[name])
return done
def _generate_mask(self):
action_mask = None
if self.action_spec.discrete_size > 0:
# LL-Python API will return an empty dim if there is only 1 agent.
ndmask = np.array(
2 * self.action_spec.discrete_size * [False], dtype=np.bool
)
ndmask = np.expand_dims(ndmask, axis=0)
action_mask = [ndmask]
return action_mask
def _compute_reward(self, name: str, done: bool) -> float:
if done:
reward = 0.0
for _pos in self.positions[name]:
reward += (SUCCESS_REWARD * _pos * self.goal[name]) / len(
self.positions[name]
)
else:
reward = -TIME_PENALTY
return reward
def _reset_agent(self, name):
self.goal[name] = self.random.choice([-1, 1])
self.positions[name] = [0.0 for _ in range(self.total_action_size)]
self.step_count[name] = 0
self.rewards[name] = 0
self.agent_id[name] = self.agent_id[name] + 1
def _make_batched_step(
self, name: str, done: bool, reward: float, group_reward: float
) -> Tuple[DecisionSteps, TerminalSteps]:
m_vector_obs = self._make_obs(self.goal[name])
m_reward = np.array([reward], dtype=np.float32)
m_agent_id = np.array([self.agent_id[name]], dtype=np.int32)
m_group_id = np.array([0], dtype=np.int32)
m_group_reward = np.array([group_reward], dtype=np.float32)
action_mask = self._generate_mask()
decision_step = DecisionSteps(
m_vector_obs, m_reward, m_agent_id, action_mask, m_group_id, m_group_reward
)
terminal_step = TerminalSteps.empty(self.behavior_spec)
if done:
self.final_rewards[name].append(self.rewards[name])
self._reset_agent(name)
new_vector_obs = self._make_obs(self.goal[name])
(
new_reward,
new_done,
new_agent_id,
new_action_mask,
new_group_id,
new_group_reward,
) = self._construct_reset_step(name)
decision_step = DecisionSteps(
new_vector_obs,
new_reward,
new_agent_id,
new_action_mask,
new_group_id,
new_group_reward,
)
terminal_step = TerminalSteps(
m_vector_obs,
m_reward,
np.array([False], dtype=np.bool),
m_agent_id,
m_group_id,
m_group_reward,
)
return (decision_step, terminal_step)
def _construct_reset_step(
self, name: str
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
new_reward = np.array([0.0], dtype=np.float32)
new_done = np.array([False], dtype=np.bool)
new_agent_id = np.array([self.agent_id[name]], dtype=np.int32)
new_action_mask = self._generate_mask()
new_group_id = np.array([0], dtype=np.int32)
new_group_reward = np.array([0.0], dtype=np.float32)
return (
new_reward,
new_done,
new_agent_id,
new_action_mask,
new_group_id,
new_group_reward,
)
def step(self) -> None:
assert all(action is not None for action in self.action.values())
for name in self.names:
done = self._take_action(name)
reward = self._compute_reward(name, done)
self.rewards[name] += reward
self.step_result[name] = self._make_batched_step(name, done, reward, 0.0)
def reset(self) -> None: # type: ignore
for name in self.names:
self._reset_agent(name)
self.step_result[name] = self._make_batched_step(name, False, 0.0, 0.0)
@property
def reset_parameters(self) -> Dict[str, str]:
return {}
def close(self):
pass
| SimpleEnvironment |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0106_add_addons_config.py | {
"start": 216,
"end": 4507
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0105_remove_project_urlconf"),
]
operations = [
migrations.CreateModel(
name="AddonsConfig",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name="created"
),
),
(
"modified",
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
(
"enabled",
models.BooleanField(
default=True,
help_text="Enable/Disable all the addons on this project",
),
),
("analytics_enabled", models.BooleanField(default=True)),
("doc_diff_enabled", models.BooleanField(default=True)),
("doc_diff_show_additions", models.BooleanField(default=True)),
("doc_diff_show_deletions", models.BooleanField(default=True)),
(
"doc_diff_root_selector",
models.CharField(blank=True, max_length=128, null=True),
),
("external_version_warning_enabled", models.BooleanField(default=True)),
("ethicalads_enabled", models.BooleanField(default=True)),
("flyout_enabled", models.BooleanField(default=True)),
("hotkeys_enabled", models.BooleanField(default=True)),
("search_enabled", models.BooleanField(default=True)),
(
"search_default_filter",
models.CharField(blank=True, max_length=128, null=True),
),
(
"stable_latest_version_warning_enabled",
models.BooleanField(default=True),
),
(
"project",
models.OneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="addons",
to="projects.project",
),
),
],
options={
"get_latest_by": "modified",
"abstract": False,
},
),
migrations.CreateModel(
name="AddonSearchFilter",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name="created"
),
),
(
"modified",
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
("name", models.CharField(max_length=128)),
("syntaxt", models.CharField(max_length=128)),
(
"addons",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="projects.addonsconfig",
),
),
],
options={
"get_latest_by": "modified",
"abstract": False,
},
),
]
| Migration |
python | plotly__plotly.py | plotly/graph_objs/volume/_contour.py | {
"start": 233,
"end": 3493
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "volume"
_path_str = "volume.contour"
_valid_props = {"color", "show", "width"}
@property
def color(self):
"""
Sets the color of the contour lines.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def show(self):
"""
Sets whether or not dynamic contours are shown on hover
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
@property
def width(self):
"""
Sets the width of the contour lines.
The 'width' property is a number and may be specified as:
- An int or float in the interval [1, 16]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the color of the contour lines.
show
Sets whether or not dynamic contours are shown on hover
width
Sets the width of the contour lines.
"""
def __init__(self, arg=None, color=None, show=None, width=None, **kwargs):
"""
Construct a new Contour object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.volume.Contour`
color
Sets the color of the contour lines.
show
Sets whether or not dynamic contours are shown on hover
width
Sets the width of the contour lines.
Returns
-------
Contour
"""
super().__init__("contour")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.volume.Contour
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.Contour`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("show", arg, show)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Contour |
python | pytest-dev__pytest | src/_pytest/runner.py | {
"start": 9357,
"end": 14823
} | class ____(Generic[TResult]):
"""Result/Exception info of a function invocation."""
_result: TResult | None
#: The captured exception of the call, if it raised.
excinfo: ExceptionInfo[BaseException] | None
#: The system time when the call started, in seconds since the epoch.
start: float
#: The system time when the call ended, in seconds since the epoch.
stop: float
#: The call duration, in seconds.
duration: float
#: The context of invocation: "collect", "setup", "call" or "teardown".
when: Literal["collect", "setup", "call", "teardown"]
def __init__(
self,
result: TResult | None,
excinfo: ExceptionInfo[BaseException] | None,
start: float,
stop: float,
duration: float,
when: Literal["collect", "setup", "call", "teardown"],
*,
_ispytest: bool = False,
) -> None:
check_ispytest(_ispytest)
self._result = result
self.excinfo = excinfo
self.start = start
self.stop = stop
self.duration = duration
self.when = when
@property
def result(self) -> TResult:
"""The return value of the call, if it didn't raise.
Can only be accessed if excinfo is None.
"""
if self.excinfo is not None:
raise AttributeError(f"{self!r} has no valid result")
# The cast is safe because an exception wasn't raised, hence
# _result has the expected function return type (which may be
# None, that's why a cast and not an assert).
return cast(TResult, self._result)
@classmethod
def from_call(
cls,
func: Callable[[], TResult],
when: Literal["collect", "setup", "call", "teardown"],
reraise: type[BaseException] | tuple[type[BaseException], ...] | None = None,
) -> CallInfo[TResult]:
"""Call func, wrapping the result in a CallInfo.
:param func:
The function to call. Called without arguments.
:type func: Callable[[], _pytest.runner.TResult]
:param when:
The phase in which the function is called.
:param reraise:
Exception or exceptions that shall propagate if raised by the
function, instead of being wrapped in the CallInfo.
"""
excinfo = None
instant = timing.Instant()
try:
result: TResult | None = func()
except BaseException:
excinfo = ExceptionInfo.from_current()
if reraise is not None and isinstance(excinfo.value, reraise):
raise
result = None
duration = instant.elapsed()
return cls(
start=duration.start.time,
stop=duration.stop.time,
duration=duration.seconds,
when=when,
result=result,
excinfo=excinfo,
_ispytest=True,
)
def __repr__(self) -> str:
if self.excinfo is None:
return f"<CallInfo when={self.when!r} result: {self._result!r}>"
return f"<CallInfo when={self.when!r} excinfo={self.excinfo!r}>"
def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport:
return TestReport.from_item_and_call(item, call)
def pytest_make_collect_report(collector: Collector) -> CollectReport:
def collect() -> list[Item | Collector]:
# Before collecting, if this is a Directory, load the conftests.
# If a conftest import fails to load, it is considered a collection
# error of the Directory collector. This is why it's done inside of the
# CallInfo wrapper.
#
# Note: initial conftests are loaded early, not here.
if isinstance(collector, Directory):
collector.config.pluginmanager._loadconftestmodules(
collector.path,
collector.config.getoption("importmode"),
rootpath=collector.config.rootpath,
consider_namespace_packages=collector.config.getini(
"consider_namespace_packages"
),
)
return list(collector.collect())
call = CallInfo.from_call(
collect, "collect", reraise=(KeyboardInterrupt, SystemExit)
)
longrepr: None | tuple[str, int, str] | str | TerminalRepr = None
if not call.excinfo:
outcome: Literal["passed", "skipped", "failed"] = "passed"
else:
skip_exceptions = [Skipped]
unittest = sys.modules.get("unittest")
if unittest is not None:
skip_exceptions.append(unittest.SkipTest)
if isinstance(call.excinfo.value, tuple(skip_exceptions)):
outcome = "skipped"
r_ = collector._repr_failure_py(call.excinfo, "line")
assert isinstance(r_, ExceptionChainRepr), repr(r_)
r = r_.reprcrash
assert r
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
errorinfo = collector.repr_failure(call.excinfo)
if not hasattr(errorinfo, "toterminal"):
assert isinstance(errorinfo, str)
errorinfo = CollectErrorRepr(errorinfo)
longrepr = errorinfo
result = call.result if not call.excinfo else None
rep = CollectReport(collector.nodeid, outcome, longrepr, result)
rep.call = call # type: ignore # see collect_one_node
return rep
| CallInfo |
python | openai__openai-python | src/openai/_base_client.py | {
"start": 2727,
"end": 3807
} | class ____:
"""Stores the necessary information to build the request to retrieve the next page.
Either `url` or `params` must be set.
"""
url: URL | NotGiven
params: Query | NotGiven
json: Body | NotGiven
@overload
def __init__(
self,
*,
url: URL,
) -> None: ...
@overload
def __init__(
self,
*,
params: Query,
) -> None: ...
@overload
def __init__(
self,
*,
json: Body,
) -> None: ...
def __init__(
self,
*,
url: URL | NotGiven = not_given,
json: Body | NotGiven = not_given,
params: Query | NotGiven = not_given,
) -> None:
self.url = url
self.json = json
self.params = params
@override
def __repr__(self) -> str:
if self.url:
return f"{self.__class__.__name__}(url={self.url})"
if self.json:
return f"{self.__class__.__name__}(json={self.json})"
return f"{self.__class__.__name__}(params={self.params})"
| PageInfo |
python | dagster-io__dagster | python_modules/libraries/dagster-gcp/dagster_gcp_tests/bigquery_tests/test_io_manager.py | {
"start": 6091,
"end": 8231
} | class ____(DbTypeHandler[int]):
def handle_output(self, context: OutputContext, table_slice: TableSlice, obj: int, connection):
connection.query(
f"SELECT * FROM {table_slice.database}.{table_slice.schema}.{table_slice.table}"
).result()
def load_input(self, context: InputContext, table_slice: TableSlice, connection) -> int:
return 7
@property
def supported_types(self):
return [int]
@pytest.mark.skipif(not IS_BUILDKITE, reason="Requires access to the BUILDKITE bigquery DB")
@pytest.mark.integration
def test_authenticate_via_config():
schema = "BIGQUERY_IO_MANAGER_SCHEMA"
with temporary_bigquery_table(
schema_name=schema,
column_str="FOO string",
) as table_name:
asset_info = dict()
@asset(name=table_name, key_prefix=schema)
def test_asset() -> int:
asset_info["gcp_creds_file"] = os.getenv("GOOGLE_APPLICATION_CREDENTIALS")
assert os.getenv("GOOGLE_APPLICATION_CREDENTIALS") is not None
return 1
old_gcp_creds_file = os.environ.pop("GOOGLE_APPLICATION_CREDENTIALS", None)
assert old_gcp_creds_file is not None
passed = False
try:
with open(old_gcp_creds_file) as f:
gcp_creds = f.read()
bq_io_manager = build_bigquery_io_manager([FakeHandler()]).configured(
{
**SHARED_BUILDKITE_BQ_CONFIG,
"gcp_credentials": base64.b64encode(str.encode(gcp_creds)).decode(),
}
)
resource_defs = {"io_manager": bq_io_manager}
assert os.getenv("GOOGLE_APPLICATION_CREDENTIALS") is None
result = materialize(
[test_asset],
resources=resource_defs,
)
passed = result.success
assert os.getenv("GOOGLE_APPLICATION_CREDENTIALS") is None
assert not os.path.exists(asset_info["gcp_creds_file"])
finally:
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = old_gcp_creds_file
assert passed
| FakeHandler |
python | streamlit__streamlit | lib/streamlit/errors.py | {
"start": 10856,
"end": 12167
} | class ____(LocalizableStreamlitException):
"""Exception raised mixing floats and ints in st.number_input."""
def __init__(
self,
value: int | float | Literal["min"] | None,
min_value: int | float | None,
max_value: int | float | None,
step: int | float | None,
) -> None:
value_type = None
min_value_type = None
max_value_type = None
step_type = None
error_message = "All numerical arguments must be of the same type."
if value:
value_type = type(value).__name__
error_message += "\n`value` has {value_type} type."
if min_value:
min_value_type = type(min_value).__name__
error_message += "\n`min_value` has {min_value_type} type."
if max_value:
max_value_type = type(max_value).__name__
error_message += "\n`max_value` has {max_value_type} type."
if step:
step_type = type(step).__name__
error_message += "\n`step` has {step_type} type."
super().__init__(
error_message,
value_type=value_type,
min_value_type=min_value_type,
max_value_type=max_value_type,
step_type=step_type,
)
| StreamlitMixedNumericTypesError |
python | Textualize__textual | tests/listview/test_inherit_listview.py | {
"start": 135,
"end": 655
} | class ____(ListView):
"""Test child class of a ListView."""
BINDINGS = [Binding(key="s", action="set", description="Set")]
def __init__(self, items: int = 0) -> None:
super().__init__()
self._items = items
self.action_fired = False
def compose(self) -> ComposeResult:
"""Compose the child widgets."""
for n in range(self._items):
yield ListItem(Label(f"This is item {n}"))
def action_set(self) -> None:
self.action_fired = True
| MyListView |
python | celery__celery | t/unit/backends/test_gcs.py | {
"start": 253,
"end": 22282
} | class ____:
def setup_method(self):
self.app.conf.gcs_bucket = 'bucket'
self.app.conf.gcs_project = 'project'
@pytest.fixture(params=['', 'test_folder/'])
def base_path(self, request):
return request.param
@pytest.fixture(params=[86400, None])
def gcs_ttl(self, request):
return request.param
def test_missing_storage_module(self):
with patch('celery.backends.gcs.storage', None):
with pytest.raises(
ImproperlyConfigured, match='You must install'
):
GCSBackend(app=self.app)
def test_missing_firestore_module(self):
with patch('celery.backends.gcs.firestore', None):
with pytest.raises(
ImproperlyConfigured, match='You must install'
):
GCSBackend(app=self.app)
def test_missing_bucket(self):
self.app.conf.gcs_bucket = None
with pytest.raises(ImproperlyConfigured, match='Missing bucket name'):
GCSBackend(app=self.app)
def test_missing_project(self):
self.app.conf.gcs_project = None
with pytest.raises(ImproperlyConfigured, match='Missing project'):
GCSBackend(app=self.app)
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
def test_firestore_project(self, mock_firestore_ttl):
mock_firestore_ttl.return_value = True
b = GCSBackend(app=self.app)
assert b.firestore_project == 'project'
self.app.conf.firestore_project = 'project2'
b = GCSBackend(app=self.app)
assert b.firestore_project == 'project2'
def test_invalid_ttl(self):
self.app.conf.gcs_bucket = 'bucket'
self.app.conf.gcs_project = 'project'
self.app.conf.gcs_ttl = -1
with pytest.raises(ImproperlyConfigured, match='Invalid ttl'):
GCSBackend(app=self.app)
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
def test_firestore_ttl_policy_disabled(self, mock_firestore_ttl):
self.app.conf.gcs_bucket = 'bucket'
self.app.conf.gcs_project = 'project'
self.app.conf.gcs_ttl = 0
mock_firestore_ttl.return_value = False
with pytest.raises(ImproperlyConfigured, match='Missing TTL policy'):
GCSBackend(app=self.app)
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
def test_parse_url(self, mock_firestore_ttl, base_path):
self.app.conf.gcs_bucket = None
self.app.conf.gcs_project = None
mock_firestore_ttl.return_value = True
backend = GCSBackend(
app=self.app,
url=f'gcs://bucket/{base_path}?gcs_project=project',
)
assert backend.bucket_name == 'bucket'
assert backend.base_path == base_path.strip('/')
@patch.object(GCSBackend, '_is_bucket_lifecycle_rule_exists')
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
def test_bucket_ttl_missing_lifecycle_rule(
self, mock_firestore_ttl, mock_lifecycle
):
self.app.conf.gcs_ttl = 86400
mock_lifecycle.return_value = False
mock_firestore_ttl.return_value = True
with pytest.raises(
ImproperlyConfigured, match='Missing lifecycle rule'
):
GCSBackend(app=self.app)
mock_lifecycle.assert_called_once()
@patch.object(GCSBackend, '_get_blob')
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
def test_get_key(self, mock_ttl, mock_get_blob, base_path):
self.app.conf.gcs_base_path = base_path
mock_ttl.return_value = True
mock_blob = Mock()
mock_get_blob.return_value = mock_blob
backend = GCSBackend(app=self.app)
backend.get(b"testkey1")
mock_get_blob.assert_called_once_with('testkey1')
mock_blob.download_as_bytes.assert_called_once()
@patch.object(GCSBackend, 'bucket')
@patch.object(GCSBackend, '_get_blob')
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
def test_set_key(
self,
mock_firestore_ttl,
mock_get_blob,
mock_bucket_prop,
base_path,
gcs_ttl,
):
self.app.conf.gcs_base_path = base_path
self.app.conf.gcs_ttl = gcs_ttl
mock_firestore_ttl.return_value = True
mock_blob = Mock()
mock_get_blob.return_value = mock_blob
mock_bucket_prop.lifecycle_rules = [{'action': {'type': 'Delete'}}]
backend = GCSBackend(app=self.app)
backend.set('testkey', 'testvalue')
mock_get_blob.assert_called_once_with('testkey')
mock_blob.upload_from_string.assert_called_once_with(
'testvalue', retry=backend._retry_policy
)
if gcs_ttl:
assert mock_blob.custom_time >= datetime.utcnow()
@patch.object(GCSBackend, '_get_blob')
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
def test_get_missing_key(self, mock_firestore_ttl, mock_get_blob):
self.app.conf.gcs_bucket = 'bucket'
self.app.conf.gcs_project = 'project'
mock_firestore_ttl.return_value = True
mock_blob = Mock()
mock_get_blob.return_value = mock_blob
mock_blob.download_as_bytes.side_effect = NotFound('not found')
gcs_backend = GCSBackend(app=self.app)
result = gcs_backend.get('some-key')
assert result is None
@patch.object(GCSBackend, '_get_blob')
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
def test_delete_existing_key(
self, mock_firestore_ttl, mock_get_blob, base_path
):
self.app.conf.gcs_base_path = base_path
mock_firestore_ttl.return_value = True
mock_blob = Mock()
mock_get_blob.return_value = mock_blob
mock_blob.exists.return_value = True
backend = GCSBackend(app=self.app)
backend.delete(b"testkey2")
mock_get_blob.assert_called_once_with('testkey2')
mock_blob.exists.assert_called_once()
mock_blob.delete.assert_called_once()
@patch.object(GCSBackend, '_get_blob')
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
def test_delete_missing_key(
self, mock_firestore_ttl, mock_get_blob, base_path
):
self.app.conf.gcs_base_path = base_path
mock_firestore_ttl.return_value = True
mock_blob = Mock()
mock_get_blob.return_value = mock_blob
mock_blob.exists.return_value = False
backend = GCSBackend(app=self.app)
backend.delete(b"testkey2")
mock_get_blob.assert_called_once_with('testkey2')
mock_blob.exists.assert_called_once()
mock_blob.delete.assert_not_called()
@patch.object(GCSBackend, 'get')
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
def test_mget(self, mock_firestore_ttl, mock_get, base_path):
self.app.conf.gcs_base_path = base_path
mock_firestore_ttl.return_value = True
backend = GCSBackend(app=self.app)
mock_get.side_effect = ['value1', 'value2']
result = backend.mget([b'key1', b'key2'])
mock_get.assert_has_calls(
[call(b'key1'), call(b'key2')], any_order=True
)
assert sorted(result) == sorted(['value1', 'value2'])
@patch.object(GCSBackend, 'client')
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
def test_bucket(self, mock_firestore_ttl, mock_client):
mock_bucket = MagicMock()
mock_client.bucket.return_value = mock_bucket
mock_firestore_ttl.return_value = True
backend = GCSBackend(app=self.app)
result = backend.bucket
mock_client.bucket.assert_called_once_with(backend.bucket_name)
assert result == mock_bucket
@patch.object(GCSBackend, 'bucket')
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
def test_get_blob(self, mock_firestore_ttl, mock_bucket):
key = 'test_key'
mock_blob = MagicMock()
mock_bucket.blob.return_value = mock_blob
mock_firestore_ttl.return_value = True
backend = GCSBackend(app=self.app)
result = backend._get_blob(key)
key_bucket_path = (
f'{backend.base_path}/{key}' if backend.base_path else key
)
mock_bucket.blob.assert_called_once_with(key_bucket_path)
assert result == mock_blob
@patch('celery.backends.gcs.Client')
@patch('celery.backends.gcs.getpid')
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
def test_new_client_after_fork(
self, mock_firestore_ttl, mock_pid, mock_client
):
mock_firestore_ttl.return_value = True
mock_pid.return_value = 123
backend = GCSBackend(app=self.app)
client1 = backend.client
assert client1 == backend.client
mock_pid.assert_called()
mock_client.assert_called()
mock_pid.return_value = 456
mock_client.return_value = Mock()
assert client1 != backend.client
mock_client.assert_called_with(project='project')
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
@patch('celery.backends.gcs.firestore.Client')
@patch('celery.backends.gcs.getpid')
def test_new_firestore_client_after_fork(
self, mock_pid, mock_firestore_client, mock_firestore_ttl
):
mock_firestore_instance = MagicMock()
mock_firestore_client.return_value = mock_firestore_instance
backend = GCSBackend(app=self.app)
mock_pid.return_value = 123
client1 = backend.firestore_client
client2 = backend.firestore_client
mock_firestore_client.assert_called_once_with(
project=backend.firestore_project
)
assert client1 == mock_firestore_instance
assert client2 == mock_firestore_instance
assert backend._pid == 123
mock_pid.return_value = 456
_ = backend.firestore_client
assert backend._pid == 456
@patch('celery.backends.gcs.firestore_admin_v1.FirestoreAdminClient')
@patch('celery.backends.gcs.firestore_admin_v1.GetFieldRequest')
def test_is_firestore_ttl_policy_enabled(
self, mock_get_field_request, mock_firestore_admin_client
):
mock_client_instance = MagicMock()
mock_firestore_admin_client.return_value = mock_client_instance
mock_field = MagicMock()
mock_field.ttl_config.state = 2 # State.ENABLED
mock_client_instance.get_field.return_value = mock_field
backend = GCSBackend(app=self.app)
result = backend._is_firestore_ttl_policy_enabled()
assert result
mock_field.ttl_config.state = 3 # State.NEEDS_REPAIR
mock_client_instance.get_field.return_value = mock_field
result = backend._is_firestore_ttl_policy_enabled()
assert not result
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
@patch.object(GCSBackend, '_expire_chord_key')
@patch.object(GCSBackend, 'get_key_for_chord')
@patch('celery.backends.gcs.KeyValueStoreBackend._apply_chord_incr')
def test_apply_chord_incr(
self,
mock_super_apply_chord_incr,
mock_get_key_for_chord,
mock_expire_chord_key,
mock_firestore_ttl,
):
mock_firestore_ttl.return_value = True
mock_get_key_for_chord.return_value = b'group_key'
header_result_args = [MagicMock()]
body = MagicMock()
backend = GCSBackend(app=self.app)
backend._apply_chord_incr(header_result_args, body)
mock_get_key_for_chord.assert_called_once_with(header_result_args[0])
mock_expire_chord_key.assert_called_once_with('group_key', 86400)
mock_super_apply_chord_incr.assert_called_once_with(
header_result_args, body
)
@patch.object(GCSBackend, '_firestore_document')
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
def test_incr(self, mock_firestore_ttl, mock_firestore_document):
self.app.conf.gcs_bucket = 'bucket'
self.app.conf.gcs_project = 'project'
mock_firestore_ttl.return_value = True
gcs_backend = GCSBackend(app=self.app)
gcs_backend.incr(b'some-key')
assert mock_firestore_document.call_count == 1
@patch('celery.backends.gcs.maybe_signature')
@patch.object(GCSBackend, 'incr')
@patch.object(GCSBackend, '_restore_deps')
@patch.object(GCSBackend, '_delete_chord_key')
@patch('celery.backends.gcs.allow_join_result')
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
def test_on_chord_part_return(
self,
mock_firestore_ttl,
mock_allow_join_result,
mock_delete_chord_key,
mock_restore_deps,
mock_incr,
mock_maybe_signature,
):
request = MagicMock()
request.group = 'group_id'
request.chord = {'chord_size': 2}
state = MagicMock()
result = MagicMock()
mock_firestore_ttl.return_value = True
mock_incr.return_value = 2
mock_restore_deps.return_value = MagicMock()
mock_restore_deps.return_value.join_native.return_value = [
'result1',
'result2',
]
mock_maybe_signature.return_value = MagicMock()
b = GCSBackend(app=self.app)
b.on_chord_part_return(request, state, result)
group_key = b.chord_keyprefix + b'group_id'
mock_incr.assert_called_once_with(group_key)
mock_restore_deps.assert_called_once_with('group_id', request)
mock_maybe_signature.assert_called_once_with(
request.chord, app=self.app
)
mock_restore_deps.return_value.join_native.assert_called_once_with(
timeout=self.app.conf.result_chord_join_timeout,
propagate=True,
)
mock_maybe_signature.return_value.delay.assert_called_once_with(
['result1', 'result2']
)
mock_delete_chord_key.assert_called_once_with(group_key)
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
@patch('celery.backends.gcs.GroupResult.restore')
@patch('celery.backends.gcs.maybe_signature')
@patch.object(GCSBackend, 'chord_error_from_stack')
def test_restore_deps(
self,
mock_chord_error_from_stack,
mock_maybe_signature,
mock_group_result_restore,
mock_firestore_ttl,
):
gid = 'group_id'
request = MagicMock()
mock_group_result_restore.return_value = MagicMock()
backend = GCSBackend(app=self.app)
deps = backend._restore_deps(gid, request)
mock_group_result_restore.assert_called_once_with(
gid, backend=backend
)
assert deps is not None
mock_chord_error_from_stack.assert_not_called()
mock_group_result_restore.side_effect = Exception('restore error')
deps = backend._restore_deps(gid, request)
mock_maybe_signature.assert_called_with(request.chord, app=self.app)
mock_chord_error_from_stack.assert_called_once()
assert deps is None
mock_group_result_restore.side_effect = None
mock_group_result_restore.return_value = None
deps = backend._restore_deps(gid, request)
mock_chord_error_from_stack.assert_called()
assert deps is None
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
@patch.object(GCSBackend, '_firestore_document')
def test_delete_chord_key(
self, mock_firestore_document, mock_firestore_ttl
):
key = 'test_key'
mock_document = MagicMock()
mock_firestore_document.return_value = mock_document
backend = GCSBackend(app=self.app)
backend._delete_chord_key(key)
mock_firestore_document.assert_called_once_with(key)
mock_document.delete.assert_called_once()
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
@patch.object(GCSBackend, '_firestore_document')
def test_expire_chord_key(
self, mock_firestore_document, mock_firestore_ttl
):
key = 'test_key'
expires = 86400
mock_document = MagicMock()
mock_firestore_document.return_value = mock_document
expected_expiry = datetime.utcnow() + timedelta(seconds=expires)
backend = GCSBackend(app=self.app)
backend._expire_chord_key(key, expires)
mock_firestore_document.assert_called_once_with(key)
mock_document.set.assert_called_once()
args, kwargs = mock_document.set.call_args
assert backend._field_expires in args[0]
assert args[0][backend._field_expires] >= expected_expiry
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
@patch.object(GCSBackend, 'firestore_client')
def test_firestore_document(
self, mock_firestore_client, mock_firestore_ttl
):
key = b'test_key'
mock_collection = MagicMock()
mock_document = MagicMock()
mock_firestore_client.collection.return_value = mock_collection
mock_collection.document.return_value = mock_document
backend = GCSBackend(app=self.app)
result = backend._firestore_document(key)
mock_firestore_client.collection.assert_called_once_with(
backend._collection_name
)
mock_collection.document.assert_called_once_with('test_key')
assert result == mock_document
@patch('celery.backends.gcs.maybe_signature')
@patch.object(GCSBackend, 'incr')
@patch.object(GCSBackend, '_restore_deps')
@patch.object(GCSBackend, '_delete_chord_key')
@patch.object(GCSBackend, 'chord_error_from_stack')
@patch('celery.backends.gcs.allow_join_result')
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
def test_on_chord_part_return_join_exception(
self,
mock_firestore_ttl,
mock_allow_join_result_,
mock_chord_error_from_stack,
mock_delete_chord_key,
mock_restore_deps,
mock_incr,
mock_maybe_signature,
):
"""Test on_chord_part_return when join_native raises exception."""
request = MagicMock()
request.group = 'group_id'
request.chord = {'chord_size': 2}
state = MagicMock()
result = MagicMock()
mock_firestore_ttl.return_value = True
mock_incr.return_value = 2
# Mock dependencies and callback
mock_deps = MagicMock()
mock_restore_deps.return_value = mock_deps
mock_callback = MagicMock()
mock_maybe_signature.return_value = mock_callback
# Make join_native raise an exception
join_exception = ValueError('Join failed')
mock_deps.join_native.side_effect = join_exception
mock_deps._failed_join_report.return_value = iter([]) # No culprit found
backend = GCSBackend(app=self.app)
backend.on_chord_part_return(request, state, result)
# Verify chord_error_from_stack was called with the exception
mock_chord_error_from_stack.assert_called_once()
call_args = mock_chord_error_from_stack.call_args
assert call_args[0][0] == mock_callback # callback argument
chord_error_arg = call_args[0][1] # exc argument
assert 'ValueError' in str(chord_error_arg)
assert chord_error_arg.__cause__ == join_exception
# Verify cleanup still happens
mock_deps.delete.assert_called_once()
mock_delete_chord_key.assert_called_once()
@patch('celery.backends.gcs.maybe_signature')
@patch.object(GCSBackend, 'incr')
@patch.object(GCSBackend, '_restore_deps')
@patch.object(GCSBackend, '_delete_chord_key')
@patch.object(GCSBackend, 'chord_error_from_stack')
@patch('celery.backends.gcs.allow_join_result')
@patch.object(GCSBackend, '_is_firestore_ttl_policy_enabled')
def test_on_chord_part_return_callback_exception(
self,
mock_firestore_ttl,
mock_allow_join_result_,
mock_chord_error_from_stack,
mock_delete_chord_key,
mock_restore_deps,
mock_incr,
mock_maybe_signature,
):
"""Test on_chord_part_return when callback.delay raises exception (line 302)."""
request = MagicMock()
request.group = 'group_id'
request.chord = {'chord_size': 2}
state = MagicMock()
result = MagicMock()
mock_firestore_ttl.return_value = True
mock_incr.return_value = 2
# Mock dependencies and callback
mock_deps = MagicMock()
mock_restore_deps.return_value = mock_deps
mock_deps.join_native.return_value = ['result1', 'result2']
mock_callback = MagicMock()
mock_maybe_signature.return_value = mock_callback
# Make callback.delay raise an exception
callback_exception = RuntimeError('Callback failed')
mock_callback.delay.side_effect = callback_exception
backend = GCSBackend(app=self.app)
backend.on_chord_part_return(request, state, result)
# Verify join was successful first
mock_deps.join_native.assert_called_once_with(
timeout=self.app.conf.result_chord_join_timeout,
propagate=True,
)
# Verify callback.delay was called and failed
mock_callback.delay.assert_called_once_with(['result1', 'result2'])
# Verify chord_error_from_stack was called with ChordError
mock_chord_error_from_stack.assert_called_once()
call_args = mock_chord_error_from_stack.call_args
assert call_args[0][0] == mock_callback # callback argument
chord_error_arg = call_args[0][1] # exc argument
assert 'Callback error:' in str(chord_error_arg)
assert 'RuntimeError' in str(chord_error_arg)
# Verify cleanup still happens
mock_deps.delete.assert_called_once()
mock_delete_chord_key.assert_called_once()
| test_GCSBackend |
python | apache__airflow | providers/atlassian/jira/src/airflow/providers/atlassian/jira/operators/jira.py | {
"start": 1116,
"end": 3788
} | class ____(BaseOperator):
"""
JiraOperator to interact and perform action on Jira issue tracking system.
This operator is designed to use Atlassian Jira SDK. For more information:
https://atlassian-python-api.readthedocs.io/jira.html
:param jira_conn_id: Reference to a pre-defined Jira Connection.
:param jira_method: Method name from Atlassian Jira Python SDK to be called.
:param jira_method_args: Method parameters for the jira_method. (templated)
:param result_processor: Function to further process the response from Jira.
:param get_jira_resource_method: Function or operator to get Jira resource on which the provided
jira_method will be executed.
"""
template_fields: Sequence[str] = ("jira_method_args",)
def __init__(
self,
*,
jira_method: str,
jira_conn_id: str = "jira_default",
jira_method_args: dict | None = None,
result_processor: Callable | None = None,
get_jira_resource_method: Callable | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.jira_conn_id = jira_conn_id
self.method_name = jira_method
self.jira_method_args = jira_method_args or {}
self.result_processor = result_processor
self.get_jira_resource_method = get_jira_resource_method
def execute(self, context: Context) -> Any:
if self.get_jira_resource_method is not None:
# if get_jira_resource_method is provided, jira_method will be executed on
# resource returned by executing the get_jira_resource_method.
# This makes all the provided methods of atlassian-python-api JIRA sdk accessible and usable
# directly at the JiraOperator without additional wrappers.
# ref: https://atlassian-python-api.readthedocs.io/jira.html
if isinstance(self.get_jira_resource_method, JiraOperator):
resource = self.get_jira_resource_method.execute(**context)
else:
resource = self.get_jira_resource_method(**context)
else:
# Default method execution is on the top level jira client resource
hook = JiraHook(jira_conn_id=self.jira_conn_id)
resource = hook.client
jira_result: Any = getattr(resource, self.method_name)(**self.jira_method_args)
output = jira_result.get("id", None) if isinstance(jira_result, dict) else None
context["task_instance"].xcom_push(key="id", value=output)
if self.result_processor:
return self.result_processor(context, jira_result)
return jira_result
| JiraOperator |
python | run-llama__llama_index | llama-index-core/llama_index/core/callbacks/simple_llm_handler.py | {
"start": 256,
"end": 2369
} | class ____(PythonicallyPrintingBaseHandler):
"""Callback handler for printing llms inputs/outputs."""
def __init__(self, logger: Optional[logging.Logger] = None) -> None:
super().__init__(
event_starts_to_ignore=[], event_ends_to_ignore=[], logger=logger
)
def start_trace(self, trace_id: Optional[str] = None) -> None:
return
def end_trace(
self,
trace_id: Optional[str] = None,
trace_map: Optional[Dict[str, List[str]]] = None,
) -> None:
return
def _print_llm_event(self, payload: dict) -> None:
from llama_index.core.llms import ChatMessage
if EventPayload.PROMPT in payload:
prompt = str(payload.get(EventPayload.PROMPT))
completion = str(payload.get(EventPayload.COMPLETION))
self._print(f"** Prompt: **\n{prompt}")
self._print("*" * 50)
self._print(f"** Completion: **\n{completion}")
self._print("*" * 50)
self._print("\n")
elif EventPayload.MESSAGES in payload:
messages = cast(List[ChatMessage], payload.get(EventPayload.MESSAGES, []))
messages_str = "\n".join([str(x) for x in messages])
response = str(payload.get(EventPayload.RESPONSE))
self._print(f"** Messages: **\n{messages_str}")
self._print("*" * 50)
self._print(f"** Response: **\n{response}")
self._print("*" * 50)
self._print("\n")
def on_event_start(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
parent_id: str = "",
**kwargs: Any,
) -> str:
return event_id
def on_event_end(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
**kwargs: Any,
) -> None:
"""Count the LLM or Embedding tokens as needed."""
if event_type == CBEventType.LLM and payload is not None:
self._print_llm_event(payload)
| SimpleLLMHandler |
python | redis__redis-py | redis/event.py | {
"start": 1494,
"end": 4018
} | class ____(EventDispatcherInterface):
# TODO: Make dispatcher to accept external mappings.
def __init__(
self,
event_listeners: Optional[
Dict[Type[object], List[EventListenerInterface]]
] = None,
):
"""
Dispatcher that dispatches events to listeners associated with given event.
"""
self._event_listeners_mapping: Dict[
Type[object], List[EventListenerInterface]
] = {
AfterConnectionReleasedEvent: [
ReAuthConnectionListener(),
],
AfterPooledConnectionsInstantiationEvent: [
RegisterReAuthForPooledConnections()
],
AfterSingleConnectionInstantiationEvent: [
RegisterReAuthForSingleConnection()
],
AfterPubSubConnectionInstantiationEvent: [RegisterReAuthForPubSub()],
AfterAsyncClusterInstantiationEvent: [RegisterReAuthForAsyncClusterNodes()],
AsyncAfterConnectionReleasedEvent: [
AsyncReAuthConnectionListener(),
],
}
self._lock = threading.Lock()
self._async_lock = None
if event_listeners:
self.register_listeners(event_listeners)
def dispatch(self, event: object):
with self._lock:
listeners = self._event_listeners_mapping.get(type(event), [])
for listener in listeners:
listener.listen(event)
async def dispatch_async(self, event: object):
if self._async_lock is None:
self._async_lock = asyncio.Lock()
async with self._async_lock:
listeners = self._event_listeners_mapping.get(type(event), [])
for listener in listeners:
await listener.listen(event)
def register_listeners(
self,
mappings: Dict[
Type[object],
List[Union[EventListenerInterface, AsyncEventListenerInterface]],
],
):
with self._lock:
for event_type in mappings:
if event_type in self._event_listeners_mapping:
self._event_listeners_mapping[event_type] = list(
set(
self._event_listeners_mapping[event_type]
+ mappings[event_type]
)
)
else:
self._event_listeners_mapping[event_type] = mappings[event_type]
| EventDispatcher |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_embedding_v2_utils.py | {
"start": 24544,
"end": 30723
} | class ____(_Optimizer):
"""Optimization parameters for Adagrad + Momentum with TPU embeddings.
Pass this to `tf.tpu.experimental.embedding.TPUEmbedding` via the `optimizer`
argument to set the global optimizer and its parameters:
```python
embedding = tf.tpu.experimental.embedding.TPUEmbedding(
...
optimizer=tf.tpu.experimental.embedding.AdagradMomentum(0.1))
```
This can also be used in a `tf.tpu.experimental.embedding.TableConfig` as the
optimizer parameter to set a table specific optimizer. This will override the
optimizer and parameters for global embedding optimizer defined above:
```python
table_one = tf.tpu.experimental.embedding.TableConfig(
vocabulary_size=...,
dim=...,
optimizer=tf.tpu.experimental.embedding.AdagradMomentum(0.2))
table_two = tf.tpu.experimental.embedding.TableConfig(
vocabulary_size=...,
dim=...)
feature_config = (
tf.tpu.experimental.embedding.FeatureConfig(
table=table_one),
tf.tpu.experimental.embedding.FeatureConfig(
table=table_two))
embedding = tf.tpu.experimental.embedding.TPUEmbedding(
feature_config=feature_config,
batch_size=...
optimizer=tf.tpu.experimental.embedding.AdagradMomentum(0.1))
```
In the above example, the first feature will be looked up in a table that has
a learning rate of 0.2 while the second feature will be looked up in a table
that has a learning rate of 0.1.
See 'tensorflow/core/protobuf/tpu/optimization_parameters.proto' for a
complete description of these parameters and their impacts on the optimizer
algorithm.
"""
def __init__(
self,
learning_rate: Union[float, Callable[[], float]] = 0.001,
momentum: float = 0.0,
use_nesterov: bool = False,
exponent: float = 2,
beta2: float = 1,
epsilon: float = 1e-10,
use_gradient_accumulation: bool = True,
clip_weight_min: Optional[float] = None,
clip_weight_max: Optional[float] = None,
weight_decay_factor: Optional[float] = None,
multiply_weight_decay_factor_by_learning_rate: Optional[bool] = None,
slot_variable_creation_fn: Optional[SlotVarCreationFnType] = None,
clipvalue: Optional[ClipValueType] = None,
low_dimensional_packing_status: bool = False,
):
"""Optimization parameters for Adagrad + Momentum.
Args:
learning_rate: The learning rate. It should be a floating point value or a
callable taking no arguments for a dynamic learning rate.
momentum: Moving average parameter for the momentum accumulator.
use_nesterov: Whether to use the Nesterov variant of momentum. See
Sutskever et al., 2013.
exponent: Exponent for the Adagrad accumulator.
beta2: Moving average parameter for the Adagrad accumulator.
epsilon: initial accumulator for Adagrad accumulator.
use_gradient_accumulation: setting this to `False` makes embedding
gradients calculation less accurate but faster.
clip_weight_min: the minimum value to clip by; None means -infinity.
clip_weight_max: the maximum value to clip by; None means +infinity.
weight_decay_factor: amount of weight decay to apply; None means that the
weights are not decayed.
multiply_weight_decay_factor_by_learning_rate: if true,
`weight_decay_factor` is multiplied by the current learning rate.
slot_variable_creation_fn: If you wish do directly control the creation of
the slot variables, set this to a callable taking three parameters: a
table variable, a list of slot names to create for it, and a list of
initializers. This function should return a dict with the slot names as
keys and the created variables as values with types matching the table
variable. When set to None (the default), uses the built-in variable
creation.
clipvalue: Controls clipping of the gradient. Set to either a single
positive scalar value to get clipping or a tuple of scalar values (min,
max) to set a separate maximum or minimum. If one of the two entries is
None, then there will be no clipping that direction.
low_dimensional_packing_status: Status of the low-dimensional embedding
packing optimization controls whether to optimize the packing of
1-dimensional, 2-dimensional, and 4-dimensional embedding tables in
memory.
"""
super().__init__(
learning_rate,
use_gradient_accumulation,
clip_weight_min,
clip_weight_max,
weight_decay_factor,
multiply_weight_decay_factor_by_learning_rate,
clipvalue,
slot_variable_creation_fn,
low_dimensional_packing_status,
)
if epsilon <= 0:
raise ValueError("Adagrad momentum: epsilon must be positive")
if exponent <= 0:
raise ValueError("Adagrad momentum: Precondition exponent must >0")
self.momentum = momentum
self.use_nesterov = use_nesterov
self.exponent = exponent
self.beta2 = beta2
self.epsilon = epsilon
def _slot_names(self) -> List[Text]:
return ["accumulators", "momenta"]
def _slot_initializers(self) -> List[init_ops_v2.Initializer]:
return [
init_ops_v2.Constant(support_partition=True),
init_ops_v2.Constant(support_partition=True),
]
def _set_optimization_parameters(
self, parameters: optimization_parameters_pb2.OptimizationParameters
):
super()._set_optimization_parameters(parameters)
parameters.adagrad_momentum.SetInParent()
parameters.adagrad_momentum.momentum = self.momentum
parameters.adagrad_momentum.use_nesterov = self.use_nesterov
parameters.adagrad_momentum.exponent = self.exponent
parameters.adagrad_momentum.beta2 = self.beta2
parameters.adagrad_momentum.epsilon = self.epsilon
def _load(self) -> Callable[..., ops.Operation]:
return tpu_ops.load_tpu_embedding_adagrad_momentum_parameters
def _retrieve(self) -> Callable[..., core.Tensor]:
return tpu_ops.retrieve_tpu_embedding_adagrad_momentum_parameters
@tf_export("tpu.experimental.embedding.FTRL")
| AdagradMomentum |
python | matplotlib__matplotlib | lib/matplotlib/patches.py | {
"start": 33728,
"end": 34321
} | class ____(Patch):
"""A general polycurve path patch."""
_edge_default = True
def __str__(self):
s = "PathPatch%d((%g, %g) ...)"
return s % (len(self._path.vertices), *tuple(self._path.vertices[0]))
@_docstring.interpd
def __init__(self, path, **kwargs):
"""
*path* is a `.Path` object.
Valid keyword arguments are:
%(Patch:kwdoc)s
"""
super().__init__(**kwargs)
self._path = path
def get_path(self):
return self._path
def set_path(self, path):
self._path = path
| PathPatch |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-premai/llama_index/llms/premai/base.py | {
"start": 1033,
"end": 9577
} | class ____(LLM):
"""PremAI LLM Provider."""
project_id: int = Field(
description=(
"The project ID in which the experiments or deployments are carried out. can find all your projects here: https://app.premai.io/projects/"
)
)
premai_api_key: Optional[str] = Field(
description="Prem AI API Key. Get it here: https://app.premai.io/api_keys/"
)
model: Optional[str] = Field(
description=(
"Name of the model. This is an optional parameter. The default model is the one deployed from Prem's LaunchPad. An example: https://app.premai.io/projects/<project-id>/launchpad. If model name is other than default model then it will override the calls from the model deployed from launchpad."
),
)
system_prompt: Optional[str] = Field(
description=(
"System prompts helps the model to guide the generation and the way it acts. Default system prompt is the one set on your deployed LaunchPad model under the specified project."
),
)
max_tokens: Optional[int] = Field(
description=("The max number of tokens to output from the LLM. ")
)
temperature: Optional[float] = Field(
description="Model temperature. Value should be >= 0 and <= 1.0"
)
max_retries: Optional[int] = Field(
description="Max number of retries to call the API"
)
repositories: Optional[dict] = Field(
description="Add valid repository ids. This will be overriding existing connected repositories (if any) and will use RAG with the connected repos."
)
additional_kwargs: Optional[dict] = Field(
description="Add any additional kwargs. This may override your existing settings."
)
_client: "Prem" = PrivateAttr()
def __init__(
self,
project_id: int,
premai_api_key: Optional[str] = None,
model: Optional[str] = None,
system_prompt: Optional[str] = None,
max_tokens: Optional[str] = 128,
temperature: Optional[float] = 0.1,
max_retries: Optional[int] = 1,
repositories: Optional[dict] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
**kwargs,
):
callback_manager = callback_manager or CallbackManager([])
api_key = get_from_param_or_env("api_key", premai_api_key, "PREMAI_API_KEY", "")
if not api_key:
raise ValueError(
"You must provide an API key to use premai. "
"You can either pass it in as an argument or set it `PREMAI_API_KEY`. You can get your API key here: https://app.premai.io/api_keys/"
)
additional_kwargs = {**(additional_kwargs or {}), **kwargs}
super().__init__(
project_id=project_id,
temperature=temperature,
max_tokens=max_tokens,
model=model,
api_key=api_key,
callback_manager=callback_manager,
system_prompt=system_prompt,
additional_kwargs=additional_kwargs,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
max_retries=max_retries,
repositories=repositories,
)
self._client = Prem(api_key=api_key)
@classmethod
def class_name(cls) -> str:
return "PremAI_LLM"
@property
def metadata(self) -> LLMMetadata:
# TODO: We need to fetch information from prem-sdk here
return LLMMetadata(
num_output=self.max_tokens,
is_chat_model=True,
temperature=self.temperature,
)
@property
def _model_kwargs(self) -> Dict[str, Any]:
return {
"model": self.model,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"system_prompt": self.system_prompt,
"repositories": self.repositories,
}
def _get_all_kwargs(self, **kwargs) -> Dict[str, Any]:
kwargs_to_ignore = [
"top_p",
"tools",
"frequency_penalty",
"presence_penalty",
"logit_bias",
"stop",
"seed",
]
keys_to_remove = []
for key in kwargs:
if key in kwargs_to_ignore:
print(f"WARNING: Parameter {key} is not supported in kwargs.")
keys_to_remove.append(key)
for key in keys_to_remove:
kwargs.pop(key)
all_kwargs = {**self._model_kwargs, **kwargs}
for key in list(self._model_kwargs.keys()):
if all_kwargs.get(key) is None or all_kwargs.get(key) == "":
all_kwargs.pop(key, None)
return all_kwargs
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
all_kwargs = self._get_all_kwargs(**{**self.additional_kwargs, **kwargs})
chat_messages, all_kwargs = prepare_messages_before_chat(
messages=messages, **all_kwargs
)
response = self._client.chat.completions.create(
project_id=self.project_id, messages=chat_messages, **all_kwargs
)
if not response.choices:
raise ChatPremError("ChatResponse must have at least one candidate")
choice = response.choices[0]
role = choice.message.role
if role is None:
raise ChatPremError(f"ChatResponse {choice} must have a role.")
content = choice.message.content or ""
return ChatResponse(
message=ChatMessage(role=role, content=content),
raw={
"role": role,
"content": content,
"document_chunks": [
chunk.to_dict() for chunk in response.document_chunks
],
},
)
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
all_kwargs = self._get_all_kwargs(**{**self.additional_kwargs, **kwargs})
chat_messages, all_kwargs = prepare_messages_before_chat(
messages=messages, **all_kwargs
)
response_generator = self._client.chat.completions.create(
project_id=self.project_id,
messages=chat_messages,
stream=True,
**all_kwargs,
)
def gen() -> ChatResponseGen:
content = ""
role = MessageRole.ASSISTANT
for chunk in response_generator:
delta = chunk.choices[0].delta
if delta is None or delta["content"] is None:
continue
chunk_content = delta["content"]
content += chunk_content
yield ChatResponse(
message=ChatMessage(content=content, role=role), delta=chunk_content
)
return gen()
def achat(self):
raise NotImplementedError(
"Current version of premai does not support async calls."
)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
complete_fn = chat_to_completion_decorator(self.chat)
kwargs["is_completion"] = True
return complete_fn(prompt, **kwargs)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
stream_complete_fn = stream_chat_to_completion_decorator(self.stream_chat)
return stream_complete_fn(prompt, **kwargs)
def acomplete(self):
raise NotImplementedError(
"Current version of premai does not support async calls."
)
def astream_complete(self):
raise NotImplementedError(
"Current version of premai does not support async calls."
)
def astream_chat(self):
raise NotImplementedError(
"Current version of premai does not support async calls."
)
| PremAI |
python | tartley__colorama | colorama/tests/initialise_test.py | {
"start": 4050,
"end": 6678
} | class ____(TestCase):
def _reset(self):
_wipe_internal_state_for_tests()
sys.stdout = orig_stdout
sys.stderr = orig_stderr
def tearDown(self):
self._reset()
@patch("colorama.ansitowin32.winapi_test", lambda: True)
def testJustFixWindowsConsole(self):
if sys.platform != "win32":
# just_fix_windows_console should be a no-op
just_fix_windows_console()
self.assertIs(sys.stdout, orig_stdout)
self.assertIs(sys.stderr, orig_stderr)
else:
def fake_std():
# Emulate stdout=not a tty, stderr=tty
# to check that we handle both cases correctly
stdout = Mock()
stdout.closed = False
stdout.isatty.return_value = False
stdout.fileno.return_value = 1
sys.stdout = stdout
stderr = Mock()
stderr.closed = False
stderr.isatty.return_value = True
stderr.fileno.return_value = 2
sys.stderr = stderr
for native_ansi in [False, True]:
with patch(
'colorama.ansitowin32.enable_vt_processing',
lambda *_: native_ansi
):
self._reset()
fake_std()
# Regular single-call test
prev_stdout = sys.stdout
prev_stderr = sys.stderr
just_fix_windows_console()
self.assertIs(sys.stdout, prev_stdout)
if native_ansi:
self.assertIs(sys.stderr, prev_stderr)
else:
self.assertIsNot(sys.stderr, prev_stderr)
# second call without resetting is always a no-op
prev_stdout = sys.stdout
prev_stderr = sys.stderr
just_fix_windows_console()
self.assertIs(sys.stdout, prev_stdout)
self.assertIs(sys.stderr, prev_stderr)
self._reset()
fake_std()
# If init() runs first, just_fix_windows_console should be a no-op
init()
prev_stdout = sys.stdout
prev_stderr = sys.stderr
just_fix_windows_console()
self.assertIs(prev_stdout, sys.stdout)
self.assertIs(prev_stderr, sys.stderr)
if __name__ == '__main__':
main()
| JustFixWindowsConsoleTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 672446,
"end": 672900
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of GrantMigratorRole"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "success")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
success = sgqlc.types.Field(Boolean, graphql_name="success")
"""Did the operation succeed?"""
| GrantMigratorRolePayload |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 36695,
"end": 37242
} | class ____(SuccessMessageMixin, PrivateViewMixin, UpdateView):
model = Project
form_class = ProjectAdvertisingForm
success_message = _("Project has been opted out from advertisement support")
template_name = "projects/project_advertising.html"
lookup_url_kwarg = "project_slug"
lookup_field = "slug"
def get_queryset(self):
return self.model.objects.for_admin_user(self.request.user)
def get_success_url(self):
return reverse("projects_advertising", args=[self.object.slug])
| ProjectAdvertisingUpdate |
python | django__django | tests/urlpatterns_reverse/middleware.py | {
"start": 440,
"end": 594
} | class ____(MiddlewareMixin):
def process_response(self, *args, **kwargs):
return HttpResponse(reverse("inner"))
| ReverseInnerInResponseMiddleware |
python | ansible__ansible | lib/ansible/module_utils/facts/network/iscsi.py | {
"start": 911,
"end": 4602
} | class ____(NetworkCollector):
name = 'iscsi'
_fact_ids = set() # type: t.Set[str]
def collect(self, module=None, collected_facts=None):
"""
Example of contents of /etc/iscsi/initiatorname.iscsi:
## DO NOT EDIT OR REMOVE THIS FILE!
## If you remove this file, the iSCSI daemon will not start.
## If you change the InitiatorName, existing access control lists
## may reject this initiator. The InitiatorName must be unique
## for each iSCSI initiator. Do NOT duplicate iSCSI InitiatorNames.
InitiatorName=iqn.1993-08.org.debian:01:44a42c8ddb8b
Example of output from the AIX lsattr command:
# lsattr -E -l iscsi0
disc_filename /etc/iscsi/targets Configuration file False
disc_policy file Discovery Policy True
initiator_name iqn.localhost.hostid.7f000002 iSCSI Initiator Name True
isns_srvnames auto iSNS Servers IP Addresses True
isns_srvports iSNS Servers Port Numbers True
max_targets 16 Maximum Targets Allowed True
num_cmd_elems 200 Maximum number of commands to queue to driver True
Example of output from the HP-UX iscsiutil command:
#iscsiutil -l
Initiator Name : iqn.1986-03.com.hp:mcel_VMhost3.1f355cf6-e2db-11e0-a999-b44c0aef5537
Initiator Alias :
Authentication Method : None
CHAP Method : CHAP_UNI
Initiator CHAP Name :
CHAP Secret :
NAS Hostname :
NAS Secret :
Radius Server Hostname :
Header Digest : None, CRC32C (default)
Data Digest : None, CRC32C (default)
SLP Scope list for iSLPD :
"""
iscsi_facts = {}
iscsi_facts['iscsi_iqn'] = ""
if sys.platform.startswith('linux') or sys.platform.startswith('sunos'):
for line in get_file_content('/etc/iscsi/initiatorname.iscsi', '').splitlines():
if line.startswith('#') or line.startswith(';') or line.strip() == '':
continue
if line.startswith('InitiatorName='):
iscsi_facts['iscsi_iqn'] = line.split('=', 1)[1]
break
elif sys.platform.startswith('aix'):
cmd = module.get_bin_path('lsattr')
if cmd is None:
return iscsi_facts
cmd += " -E -l iscsi0"
rc, out, err = module.run_command(cmd)
if rc == 0 and out:
line = self.findstr(out, 'initiator_name')
iscsi_facts['iscsi_iqn'] = line.split()[1].rstrip()
elif sys.platform.startswith('hp-ux'):
cmd = module.get_bin_path(
'iscsiutil',
opt_dirs=['/opt/iscsi/bin']
)
if cmd is None:
return iscsi_facts
cmd += " -l"
rc, out, err = module.run_command(cmd)
if out:
line = self.findstr(out, 'Initiator Name')
iscsi_facts['iscsi_iqn'] = line.split(":", 1)[1].rstrip()
return iscsi_facts
def findstr(self, text, match):
for line in text.splitlines():
if match in line:
found = line
return found
| IscsiInitiatorNetworkCollector |
python | pytorch__pytorch | test/functorch/test_aot_joint_with_descriptors.py | {
"start": 20181,
"end": 24140
} | class ____(torch.nn.Module):
def forward(
self,
primals,
tangents,
):
primals_1: "f32[2, 3]" # ParamAOTInput(target='linear.weight')
primals_2: "f32[2]" # ParamAOTInput(target='linear.bias')
primals_3: "f32[4, 3]" # PlainAOTInput(idx=0)
primals_4: "f32[]" # PlainAOTInput(idx=1)
tangents_1: "f32[4, 2]" # TangentAOTInput(output=PlainAOTOutput(idx=0))
primals_1, primals_2, primals_3, primals_4, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec)
transpose: "f32[3, 2]" = torch.ops.prims.transpose.default(primals_1, [1, 0]); primals_1 = None
mm: "f32[4, 2]" = torch.ops.aten.mm.default(primals_3, transpose); transpose = None
mul: "f32[4, 2]" = torch.ops.prims.mul.default(mm, 1.0); mm = None
mul_1: "f32[2]" = torch.ops.prims.mul.default(primals_2, 1.0); primals_2 = None
broadcast_in_dim: "f32[4, 2]" = torch.ops.prims.broadcast_in_dim.default(mul_1, [4, 2], [1]); mul_1 = None
add: "f32[4, 2]" = torch.ops.prims.add.default(mul, broadcast_in_dim); mul = broadcast_in_dim = None
mul_2: "f32[4, 2]" = torch.ops.prims.mul.default(add, primals_4); add = None
mul_3: "f32[4, 2]" = torch.ops.prims.mul.default(tangents_1, primals_4); tangents_1 = primals_4 = None
transpose_1: "f32[2, 4]" = torch.ops.prims.transpose.default(mul_3, [1, 0])
mm_1: "f32[2, 3]" = torch.ops.aten.mm.default(transpose_1, primals_3); transpose_1 = primals_3 = None
transpose_2: "f32[3, 2]" = torch.ops.prims.transpose.default(mm_1, [1, 0]); mm_1 = None
sum_1: "f32[2]" = torch.ops.prims.sum.default(mul_3, [0]); mul_3 = None
broadcast_in_dim_1: "f32[1, 2]" = torch.ops.prims.broadcast_in_dim.default(sum_1, [1, 2], [1]); sum_1 = None
as_strided: "f32[2]" = torch.ops.aten.as_strided.default(broadcast_in_dim_1, [2], [1]); broadcast_in_dim_1 = None
transpose_3: "f32[2, 3]" = torch.ops.prims.transpose.default(transpose_2, [1, 0]); transpose_2 = None
return pytree.tree_unflatten([
mul_2, # PlainAOTOutput(idx=0)
transpose_3, # GradAOTOutput(grad_of=ParamAOTInput(target='linear.weight'))
as_strided, # GradAOTOutput(grad_of=ParamAOTInput(target='linear.bias'))
None, # None
None, # None
], self._out_spec)""",
)
# Compile the result
parallel_model_fn = aot_compile_joint_with_descriptors(
joint_with_descriptors
)
# Test functional correctness
expected_output = model(*inputs, **kwargs)
actual_output = parallel_model_fn(
*dict(model.named_parameters()).values(), *inputs, **kwargs
)
self.assertEqual(expected_output, actual_output)
def test_multiple_outputs_module(self):
"""Test module with multiple outputs"""
class MultiOutputModule(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(3, 2)
self.linear2 = nn.Linear(3, 4)
def forward(self, x):
out1 = self.linear1(x)
out2 = self.linear2(x)
return out1, out2
model = MultiOutputModule()
inputs = (torch.randn(4, 3),)
with ExitStack() as stack:
# Export joint with descriptors
joint_with_descriptors = aot_export_joint_with_descriptors(
stack, model, inputs, decompositions=decomposition_table
)
# Test the exported graph structure
graph_code = joint_with_descriptors.graph_module.print_readable(
print_output=False, expanded_def=True
)
# Expect test on the printed graph
self.assertExpectedInline(
normalize_gm(graph_code),
"""\
| inner_f |
python | kamyu104__LeetCode-Solutions | Python/find-the-n-th-value-after-k-seconds.py | {
"start": 808,
"end": 1149
} | class ____(object):
def valueAfterKSeconds(self, n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
MOD = 10**9+7
prefix = [1]*n
for _ in range(k):
for i in xrange(1, n):
prefix[i] = (prefix[i]+prefix[i-1])%MOD
return prefix[-1]
| Solution2 |
python | faif__python-patterns | tests/creational/test_builder.py | {
"start": 105,
"end": 430
} | class ____(unittest.TestCase):
def test_house(self):
house = House()
self.assertEqual(house.size, "Big")
self.assertEqual(house.floor, "One")
def test_flat(self):
flat = Flat()
self.assertEqual(flat.size, "Small")
self.assertEqual(flat.floor, "More than One")
| TestSimple |
python | sqlalchemy__sqlalchemy | test/sql/test_insert.py | {
"start": 42889,
"end": 48353
} | class ____(
_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL
):
__dialect__ = postgresql.dialect(implicit_returning=True)
def test_insert_select(self):
table1 = self.tables.mytable
sel = select(table1.c.myid, table1.c.name).where(
table1.c.name == "foo"
)
ins = self.tables.myothertable.insert().from_select(
("otherid", "othername"), sel
)
self.assert_compile(
ins,
"INSERT INTO myothertable (otherid, othername) "
"SELECT mytable.myid, mytable.name FROM mytable "
"WHERE mytable.name = %(name_1)s",
checkparams={"name_1": "foo"},
)
def test_insert_select_return_defaults(self):
table1 = self.tables.mytable
sel = select(table1.c.myid, table1.c.name).where(
table1.c.name == "foo"
)
ins = (
self.tables.myothertable.insert()
.from_select(("otherid", "othername"), sel)
.return_defaults(self.tables.myothertable.c.otherid)
)
self.assert_compile(
ins,
"INSERT INTO myothertable (otherid, othername) "
"SELECT mytable.myid, mytable.name FROM mytable "
"WHERE mytable.name = %(name_1)s",
checkparams={"name_1": "foo"},
)
@testing.combinations(
True, False, argnames="insert_null_still_autoincrements"
)
@testing.combinations("values", "params", "nothing", argnames="paramtype")
def test_explicit_null_implicit_returning_still_renders(
self, paramtype, insert_null_still_autoincrements
):
"""test for future support of #7998 with RETURNING"""
t = Table(
"t",
MetaData(),
Column("x", Integer, primary_key=True),
Column("q", Integer),
)
dialect = postgresql.dialect()
dialect.insert_null_pk_still_autoincrements = (
insert_null_still_autoincrements
)
if paramtype == "values":
# for values present, we now have an extra check for this
stmt = t.insert().values(x=None, q=5)
if insert_null_still_autoincrements:
expected = (
"INSERT INTO t (x, q) VALUES (%(x)s, %(q)s) RETURNING t.x"
)
else:
expected = "INSERT INTO t (x, q) VALUES (%(x)s, %(q)s)"
params = None
elif paramtype == "params":
# for params, compiler doesnt have the value available to look
# at. we assume non-NULL
stmt = t.insert()
if insert_null_still_autoincrements:
expected = (
"INSERT INTO t (x, q) VALUES (%(x)s, %(q)s) RETURNING t.x"
)
else:
expected = "INSERT INTO t (x, q) VALUES (%(x)s, %(q)s)"
params = {"x": None, "q": 5}
elif paramtype == "nothing":
# no params, we assume full INSERT. this kind of compilation
# doesn't actually happen during execution since there are always
# parameters or values
stmt = t.insert()
expected = "INSERT INTO t (x, q) VALUES (%(x)s, %(q)s)"
params = None
self.assert_compile(stmt, expected, params=params, dialect=dialect)
def test_insert_multiple_values(self):
ins = self.tables.myothertable.insert().values(
[{"othername": "foo"}, {"othername": "bar"}]
)
self.assert_compile(
ins,
"INSERT INTO myothertable (othername) "
"VALUES (%(othername_m0)s), "
"(%(othername_m1)s)",
checkparams={"othername_m1": "bar", "othername_m0": "foo"},
)
def test_insert_multiple_values_literal_binds(self):
ins = self.tables.myothertable.insert().values(
[{"othername": "foo"}, {"othername": "bar"}]
)
self.assert_compile(
ins,
"INSERT INTO myothertable (othername) VALUES ('foo'), ('bar')",
checkparams={},
literal_binds=True,
)
def test_insert_multiple_values_return_defaults(self):
# TODO: not sure if this should raise an
# error or what
ins = (
self.tables.myothertable.insert()
.values([{"othername": "foo"}, {"othername": "bar"}])
.return_defaults(self.tables.myothertable.c.otherid)
)
self.assert_compile(
ins,
"INSERT INTO myothertable (othername) "
"VALUES (%(othername_m0)s), "
"(%(othername_m1)s)",
checkparams={"othername_m1": "bar", "othername_m0": "foo"},
)
def test_insert_single_list_values(self):
ins = self.tables.myothertable.insert().values([{"othername": "foo"}])
self.assert_compile(
ins,
"INSERT INTO myothertable (othername) "
"VALUES (%(othername_m0)s)",
checkparams={"othername_m0": "foo"},
)
def test_insert_single_element_values(self):
ins = self.tables.myothertable.insert().values({"othername": "foo"})
self.assert_compile(
ins,
"INSERT INTO myothertable (othername) "
"VALUES (%(othername)s) RETURNING myothertable.otherid",
checkparams={"othername": "foo"},
)
| InsertImplicitReturningTest |
python | pytorch__pytorch | torch/distributed/checkpoint/metadata.py | {
"start": 4427,
"end": 5631
} | class ____:
"""This class represents a lookup key for items in a state dict or Metadata."""
fqn: str
"""Fully Qualified Name of the object"""
offset: Optional[torch.Size] = None
"""If the object is a tensor, offset into the tensor we're looking for"""
index: Optional[int] = field(hash=False, compare=False, default=None)
"""
Index hint when searching for tensor chunk to speedup lookups (optional)
A common representation of a sharded tensor is as a list of chunks so to
find the index in such a list you need to linear search it.
When constructing an instance of MetadataIndex that points to that list,
one can provide the index as a hint and it will be probed first before
the linear search and thus making it significantly faster.
"""
def __init__(
self,
fqn: str,
offset: Optional[Sequence[int]] = None,
index: Optional[int] = None,
):
# We must use object.__setattr__ due to frozen=True
object.__setattr__(self, "fqn", fqn)
object.__setattr__(self, "index", index)
if offset is not None:
object.__setattr__(self, "offset", torch.Size(offset))
| MetadataIndex |
python | huggingface__transformers | tests/models/pvt/test_image_processing_pvt.py | {
"start": 2709,
"end": 4211
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = PvtImageProcessor if is_vision_available() else None
fast_image_processing_class = PvtImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = PvtImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 18, "width": 18})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
| PvtImageProcessingTest |
python | openai__openai-python | src/openai/types/beta/chatkit/chat_session_workflow_param.py | {
"start": 391,
"end": 1007
} | class ____(TypedDict, total=False):
id: Required[str]
"""Identifier for the workflow invoked by the session."""
state_variables: Dict[str, Union[str, bool, float]]
"""State variables forwarded to the workflow.
Keys may be up to 64 characters, values must be primitive types, and the map
defaults to an empty object.
"""
tracing: Tracing
"""Optional tracing overrides for the workflow invocation.
When omitted, tracing is enabled by default.
"""
version: str
"""Specific workflow version to run. Defaults to the latest deployed version."""
| ChatSessionWorkflowParam |
python | PrefectHQ__prefect | tests/server/models/test_flow_run_states.py | {
"start": 12121,
"end": 13089
} | class ____:
async def test_delete_flow_run_state(self, flow_run, session):
# create a flow run to read
flow_run_state = (
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=flow_run.id,
state=Running(),
)
).state
assert await models.flow_run_states.delete_flow_run_state(
session=session, flow_run_state_id=flow_run_state.id
)
# make sure the flow run state is deleted
result = await models.flow_run_states.read_flow_run_state(
session=session, flow_run_state_id=flow_run_state.id
)
assert result is None
async def test_delete_flow_run_state_returns_false_if_does_not_exist(self, session):
result = await models.flow_run_states.delete_flow_run_state(
session=session, flow_run_state_id=uuid4()
)
assert not result
| TestDeleteFlowRunState |
python | sqlalchemy__sqlalchemy | test/dialect/sqlite/test_types.py | {
"start": 8716,
"end": 10774
} | class ____(fixtures.TestBase):
__requires__ = ("json_type",)
__only_on__ = "sqlite"
__backend__ = True
@testing.requires.reflects_json_type
def test_reflection(self, connection, metadata):
Table("json_test", metadata, Column("foo", sqlite.JSON))
metadata.create_all(connection)
reflected = Table("json_test", MetaData(), autoload_with=connection)
is_(reflected.c.foo.type._type_affinity, sqltypes.JSON)
assert isinstance(reflected.c.foo.type, sqlite.JSON)
def test_rudimentary_roundtrip(self, metadata, connection):
sqlite_json = Table("json_test", metadata, Column("foo", sqlite.JSON))
metadata.create_all(connection)
value = {"json": {"foo": "bar"}, "recs": ["one", "two"]}
connection.execute(sqlite_json.insert(), dict(foo=value))
eq_(connection.scalar(select(sqlite_json.c.foo)), value)
def test_extract_subobject(self, connection, metadata):
sqlite_json = Table("json_test", metadata, Column("foo", sqlite.JSON))
metadata.create_all(connection)
value = {"json": {"foo": "bar"}}
connection.execute(sqlite_json.insert(), dict(foo=value))
eq_(
connection.scalar(select(sqlite_json.c.foo["json"])), value["json"]
)
def test_serializer_args(self, metadata):
sqlite_json = Table("json_test", metadata, Column("foo", sqlite.JSON))
data_element = {"foo": "bar"}
js = mock.Mock(side_effect=json.dumps)
jd = mock.Mock(side_effect=json.loads)
engine = engines.testing_engine(
options=dict(json_serializer=js, json_deserializer=jd)
)
metadata.create_all(engine)
with engine.begin() as conn:
conn.execute(sqlite_json.insert(), {"foo": data_element})
row = conn.execute(select(sqlite_json.c.foo)).first()
eq_(row, (data_element,))
eq_(js.mock_calls, [mock.call(data_element)])
eq_(jd.mock_calls, [mock.call(json.dumps(data_element))])
| JSONTest |
python | kamyu104__LeetCode-Solutions | Python/maximum-xor-of-two-numbers-in-an-array.py | {
"start": 1552,
"end": 2043
} | class ____(object):
def findMaximumXOR(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
for i in reversed(xrange(max(nums).bit_length())):
result <<= 1
prefixes = set()
for n in nums:
prefixes.add(n >> i)
for p in prefixes:
if (result | 1) ^ p in prefixes:
result |= 1
break
return result
| Solution2 |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_health.py | {
"start": 5757,
"end": 11263
} | class ____(graphene.ObjectType):
assetHealth = graphene.NonNull(GrapheneAssetHealthStatus)
materializationStatus = graphene.NonNull(GrapheneAssetHealthStatus)
materializationStatusMetadata = graphene.Field(GrapheneAssetHealthMaterializationMeta)
assetChecksStatus = graphene.NonNull(GrapheneAssetHealthStatus)
assetChecksStatusMetadata = graphene.Field(GrapheneAssetHealthCheckMeta)
freshnessStatus = graphene.NonNull(GrapheneAssetHealthStatus)
freshnessStatusMetadata = graphene.Field(GrapheneAssetHealthFreshnessMeta)
class Meta:
name = "AssetHealth"
def __init__(self, asset_key: GrapheneAssetKey, dynamic_partitions_loader):
super().__init__()
self._asset_key = asset_key
self._dynamic_partitions_loader = dynamic_partitions_loader
self.materialization_status_task = None
self.asset_check_status_task = None
self.freshness_status_task = None
async def resolve_materializationStatus(self, graphene_info: ResolveInfo) -> AssetHealthStatus:
if self.materialization_status_task is None:
self.materialization_status_task = asyncio.create_task(
get_materialization_status_and_metadata(graphene_info.context, self._asset_key)
)
materialization_status, _ = await self.materialization_status_task
return materialization_status
async def resolve_materializationStatusMetadata(
self, graphene_info: ResolveInfo
) -> GrapheneAssetHealthMaterializationMeta:
if self.materialization_status_task is None:
self.materialization_status_task = asyncio.create_task(
get_materialization_status_and_metadata(graphene_info.context, self._asset_key)
)
_, materialization_status_metadata = await self.materialization_status_task
return (
GrapheneAssetHealthMaterializationMeta.from_metadata_class(
materialization_status_metadata
)
if materialization_status_metadata
else None
)
async def resolve_assetChecksStatus(self, graphene_info: ResolveInfo) -> AssetHealthStatus:
if self.asset_check_status_task is None:
self.asset_check_status_task = asyncio.create_task(
get_asset_check_status_and_metadata(graphene_info.context, self._asset_key)
)
asset_checks_status, _ = await self.asset_check_status_task
return asset_checks_status
async def resolve_assetChecksStatusMetadata(
self, graphene_info: ResolveInfo
) -> GrapheneAssetHealthCheckMeta:
if self.asset_check_status_task is None:
self.asset_check_status_task = asyncio.create_task(
get_asset_check_status_and_metadata(graphene_info.context, self._asset_key)
)
_, asset_checks_status_metadata = await self.asset_check_status_task
return (
GrapheneAssetHealthCheckMeta.from_metadata_class(asset_checks_status_metadata)
if asset_checks_status_metadata
else None
)
async def resolve_freshnessStatus(self, graphene_info: ResolveInfo) -> AssetHealthStatus:
if self.freshness_status_task is None:
self.freshness_status_task = asyncio.create_task(
get_freshness_status_and_metadata(graphene_info.context, self._asset_key)
)
freshness_status, _ = await self.freshness_status_task
return freshness_status
async def resolve_freshnessStatusMetadata(
self, graphene_info: ResolveInfo
) -> GrapheneAssetHealthFreshnessMeta:
if self.freshness_status_task is None:
self.freshness_status_task = asyncio.create_task(
get_freshness_status_and_metadata(graphene_info.context, self._asset_key)
)
_, freshness_status_metadata = await self.freshness_status_task
return (
GrapheneAssetHealthFreshnessMeta(
lastMaterializedTimestamp=freshness_status_metadata.last_materialized_timestamp
)
if freshness_status_metadata
else None
)
async def resolve_assetHealth(self, graphene_info: ResolveInfo) -> AssetHealthStatus:
if not graphene_info.context.instance.dagster_asset_health_queries_supported():
return AssetHealthStatus.UNKNOWN
if self.materialization_status_task is None:
self.materialization_status_task = asyncio.create_task(
get_materialization_status_and_metadata(graphene_info.context, self._asset_key)
)
materialization_status, _ = await self.materialization_status_task
if self.asset_check_status_task is None:
self.asset_check_status_task = asyncio.create_task(
get_asset_check_status_and_metadata(graphene_info.context, self._asset_key)
)
asset_checks_status, _ = await self.asset_check_status_task
if self.freshness_status_task is None:
self.freshness_status_task = asyncio.create_task(
get_freshness_status_and_metadata(graphene_info.context, self._asset_key)
)
freshness_status, _ = await self.freshness_status_task
return overall_status_from_component_statuses(
asset_checks_status=asset_checks_status,
materialization_status=materialization_status,
freshness_status=freshness_status,
)
| GrapheneAssetHealth |
python | matplotlib__matplotlib | lib/matplotlib/offsetbox.py | {
"start": 29447,
"end": 36920
} | class ____(OffsetBox):
"""
An OffsetBox placed according to location *loc*.
AnchoredOffsetbox has a single child. When multiple children are needed,
use an extra OffsetBox to enclose them. By default, the offset box is
anchored against its parent Axes. You may explicitly specify the
*bbox_to_anchor*.
"""
zorder = 5 # zorder of the legend
# Location codes
codes = {'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
def __init__(self, loc, *,
pad=0.4, borderpad=0.5,
child=None, prop=None, frameon=True,
bbox_to_anchor=None,
bbox_transform=None,
**kwargs):
"""
Parameters
----------
loc : str
The box location. Valid locations are
'upper left', 'upper center', 'upper right',
'center left', 'center', 'center right',
'lower left', 'lower center', 'lower right'.
For backward compatibility, numeric values are accepted as well.
See the parameter *loc* of `.Legend` for details.
pad : float, default: 0.4
Padding around the child as fraction of the fontsize.
borderpad : float or (float, float), default: 0.5
Padding between the offsetbox frame and the *bbox_to_anchor*.
If a float, the same padding is used for both x and y.
If a tuple of two floats, it specifies the (x, y) padding.
.. versionadded:: 3.11
The *borderpad* parameter now accepts a tuple of (x, y) paddings.
child : `.OffsetBox`
The box that will be anchored.
prop : `.FontProperties`
This is only used as a reference for paddings. If not given,
:rc:`legend.fontsize` is used.
frameon : bool
Whether to draw a frame around the box.
bbox_to_anchor : `.BboxBase`, 2-tuple, or 4-tuple of floats
Box that is used to position the legend in conjunction with *loc*.
bbox_transform : None or :class:`matplotlib.transforms.Transform`
The transform for the bounding box (*bbox_to_anchor*).
**kwargs
All other parameters are passed on to `.OffsetBox`.
Notes
-----
See `.Legend` for a detailed description of the anchoring mechanism.
"""
super().__init__(**kwargs)
self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
self.set_child(child)
if isinstance(loc, str):
loc = _api.check_getitem(self.codes, loc=loc)
self.loc = loc
self.borderpad = borderpad
self.pad = pad
if prop is None:
self.prop = FontProperties(size=mpl.rcParams["legend.fontsize"])
else:
self.prop = FontProperties._from_any(prop)
if isinstance(prop, dict) and "size" not in prop:
self.prop.set_size(mpl.rcParams["legend.fontsize"])
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True,
visible=frameon,
boxstyle="square,pad=0",
)
def set_child(self, child):
"""Set the child to be anchored."""
self._child = child
if child is not None:
child.axes = self.axes
self.stale = True
def get_child(self):
"""Return the child."""
return self._child
def get_children(self):
"""Return the list of children."""
return [self._child]
def get_bbox(self, renderer):
# docstring inherited
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return self.get_child().get_bbox(renderer).padded(pad)
def get_bbox_to_anchor(self):
"""Return the bbox that the box is anchored to."""
if self._bbox_to_anchor is None:
return self.axes.bbox
else:
transform = self._bbox_to_anchor_transform
if transform is None:
return self._bbox_to_anchor
else:
return TransformedBbox(self._bbox_to_anchor, transform)
def set_bbox_to_anchor(self, bbox, transform=None):
"""
Set the bbox that the box is anchored to.
*bbox* can be a Bbox instance, a list of [left, bottom, width,
height], or a list of [left, bottom] where the width and
height will be assumed to be zero. The bbox will be
transformed to display coordinate by the given transform.
"""
if bbox is None or isinstance(bbox, BboxBase):
self._bbox_to_anchor = bbox
else:
try:
l = len(bbox)
except TypeError as err:
raise ValueError(f"Invalid bbox: {bbox}") from err
if l == 2:
bbox = [bbox[0], bbox[1], 0, 0]
self._bbox_to_anchor = Bbox.from_bounds(*bbox)
self._bbox_to_anchor_transform = transform
self.stale = True
@_compat_get_offset
def get_offset(self, bbox, renderer):
# docstring inherited
fontsize_in_pixels = renderer.points_to_pixels(self.prop.get_size_in_points())
try:
borderpad_x, borderpad_y = self.borderpad
except TypeError:
borderpad_x = self.borderpad
borderpad_y = self.borderpad
pad_x_pixels = borderpad_x * fontsize_in_pixels
pad_y_pixels = borderpad_y * fontsize_in_pixels
bbox_to_anchor = self.get_bbox_to_anchor()
x0, y0 = _get_anchored_bbox(
self.loc,
Bbox.from_bounds(0, 0, bbox.width, bbox.height),
bbox_to_anchor,
pad_x_pixels,
pad_y_pixels
)
return x0 - bbox.x0, y0 - bbox.y0
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.bounds)
if fontsize:
self.patch.set_mutation_scale(fontsize)
def draw(self, renderer):
# docstring inherited
if not self.get_visible():
return
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self.update_frame(bbox, fontsize)
self.patch.draw(renderer)
px, py = self.get_offset(self.get_bbox(renderer), renderer)
self.get_child().set_offset((px, py))
self.get_child().draw(renderer)
self.stale = False
def _get_anchored_bbox(loc, bbox, parentbbox, pad_x, pad_y):
"""
Return the (x, y) position of the *bbox* anchored at the *parentbbox* with
the *loc* code with the *borderpad* and padding *pad_x*, *pad_y*.
"""
# This is only called internally and *loc* should already have been
# validated. If 0 (None), we just let ``bbox.anchored`` raise.
c = [None, "NE", "NW", "SW", "SE", "E", "W", "E", "S", "N", "C"][loc]
container = parentbbox.padded(-pad_x, -pad_y)
return bbox.anchored(c, container=container).p0
| AnchoredOffsetbox |
python | google__jax | docs/autodidax.py | {
"start": 32434,
"end": 32891
} | class ____(NamedTuple):
in_binders: list[Var]
eqns: list[JaxprEqn]
outs: list[Atom]
def __hash__(self): return id(self)
__eq__ = op.is_
def raise_to_shaped(aval):
return ShapedArray(aval.shape, aval.dtype)
# -
# Type-checking a jaxpr involves checking that there are no unbound variables,
# that variables are only bound once, and that for each equation the type of
# the primitive application matches the type of the output binders.
# +
| Jaxpr |
python | doocs__leetcode | solution/2300-2399/2345.Finding the Number of Visible Mountains/Solution.py | {
"start": 0,
"end": 400
} | class ____:
def visibleMountains(self, peaks: List[List[int]]) -> int:
arr = [(x - y, x + y) for x, y in peaks]
cnt = Counter(arr)
arr.sort(key=lambda x: (x[0], -x[1]))
ans, cur = 0, -inf
for l, r in arr:
if r <= cur:
continue
cur = r
if cnt[(l, r)] == 1:
ans += 1
return ans
| Solution |
python | doocs__leetcode | solution/3200-3299/3275.K-th Nearest Obstacle Queries/Solution.py | {
"start": 0,
"end": 337
} | class ____:
def resultsArray(self, queries: List[List[int]], k: int) -> List[int]:
ans = []
pq = []
for i, (x, y) in enumerate(queries):
heappush(pq, -(abs(x) + abs(y)))
if i >= k:
heappop(pq)
ans.append(-pq[0] if i >= k - 1 else -1)
return ans
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-freshdesk/unit_tests/integration/test_tickets.py | {
"start": 2888,
"end": 5374
} | class ____(TestCase):
@HttpMocker()
def test_when_read_then_extract_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
HttpRequest(
f"https://{_DOMAIN}/api/v2/tickets?order_type=asc&order_by=updated_at&include=description,requester,stats&per_page=100&updated_since=2022-01-01T00%3A00%3A00Z"
),
_response().with_record(_record()).with_record(_record()).build(),
)
output = read(ConfigBuilder().domain(_DOMAIN).start_date(datetime(2022, 1, 1)), StateBuilder())
assert len(output.records) == 2
@HttpMocker()
def test_given_hitting_300th_page_when_read_then_reset_pagination(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
HttpRequest(
f"https://{_DOMAIN}/api/v2/tickets?order_type=asc&order_by=updated_at&include=description,requester,stats&per_page=100&updated_since=2022-01-01T00%3A00%3A00Z"
),
self._a_response_with_full_page("2023-01-01T00:00:00Z"),
)
for page in range(2, 301):
http_mocker.get(
HttpRequest(
f"https://{_DOMAIN}/api/v2/tickets?order_type=asc&order_by=updated_at&include=description,requester,stats&page={page}&per_page=100&updated_since=2022-01-01T00%3A00%3A00Z"
),
self._a_response_with_full_page("2023-01-01T00:00:00Z"),
)
http_mocker.get(
HttpRequest(
f"https://{_DOMAIN}/api/v2/tickets?order_type=asc&order_by=updated_at&include=description,requester,stats&page=301&per_page=100&updated_since=2022-01-01T00%3A00%3A00Z"
),
HttpResponse(PAGE_LIMIT_300TH_REACHED, 400),
)
http_mocker.get(
HttpRequest(
f"https://{_DOMAIN}/api/v2/tickets?order_type=asc&order_by=updated_at&include=description,requester,stats&per_page=100&updated_since=2023-01-01T00%3A00%3A00Z"
),
_response().with_record(_record()).with_record(_record()).build(),
)
output = read(ConfigBuilder().domain(_DOMAIN).start_date(datetime(2022, 1, 1)), StateBuilder())
assert len(output.records) == 300 * 100 + 2
def _a_response_with_full_page(self, cursor_value: str) -> HttpResponse:
response = _response()
for x in range(100):
response.with_record(_record().with_cursor(cursor_value))
return response.build()
| TicketsTest |
python | PrefectHQ__prefect | tests/cli/test_work_queues.py | {
"start": 11124,
"end": 13532
} | class ____:
async def test_resume(self, prefect_client, work_queue):
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"work-queue pause {work_queue.name} --pool default-agent-pool",
)
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"work-queue resume {work_queue.name}",
expected_code=0,
)
q = await read_queue(prefect_client, work_queue.name)
assert not q.is_paused
async def test_resume_by_id(self, prefect_client, work_queue):
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"work-queue pause {work_queue.name} --pool default-agent-pool",
)
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"work-queue resume {work_queue.id}",
expected_code=0,
)
q = await read_queue(prefect_client, work_queue.name)
assert not q.is_paused
async def test_resume_with_pool(
self,
prefect_client,
work_queue_1,
):
pool_name = work_queue_1.work_pool.name
await prefect_client.update_work_queue(
id=work_queue_1.id,
is_paused=True,
)
work_pool_queue = await read_queue(
prefect_client,
name=work_queue_1.name,
pool=pool_name,
)
assert work_pool_queue.is_paused
cmd = f"work-queue resume {work_queue_1.name} -p {pool_name}"
await run_sync_in_worker_thread(
invoke_and_assert,
command=cmd,
expected_code=0,
)
q = await read_queue(prefect_client, work_queue_1.name, pool=pool_name)
assert not q.is_paused
# Tests for all of the above, but with bad inputs
def test_resume_bad_queue_name(self):
invoke_and_assert(
command="work-queue resume bad-name",
expected_code=1,
)
def test_resume_bad_queue_id(self):
invoke_and_assert(
command="work-queue resume 00000000-0000-0000-0000-000000000000",
expected_code=1,
)
def test_resume_bad_pool_name(
self,
work_queue,
):
invoke_and_assert(
command=f"work-queue resume {work_queue.name} -p bad-pool",
expected_code=1,
)
| TestResumeWorkQueue |
python | milvus-io__pymilvus | tests/test_connections.py | {
"start": 7595,
"end": 12942
} | class ____:
@pytest.fixture(scope="function", params=[
{"host": "localhost", "port": "19530"},
{"host": "localhost", "port": "19531"},
{"host": "localhost", "port": "19530", "random": "useless"},
])
def host_port(self, request):
return request.param
@pytest.fixture(scope="function", params=[
{"host": None, "port": "19530"},
{"host": 1, "port": "19531"},
{"host": 1.0, "port": "19530", "random": "useless"},
])
def invalid_host(self, request):
return request.param
@pytest.fixture(scope="function", params=[
{"host": "localhost", "port": None},
{"host": "localhost", "port": 1.0},
{"host": "localhost", "port": b'19530', "random": "useless"},
])
def invalid_port(self, request):
return request.param
def test_add_connection_no_error(self, host_port):
add_connection = connections.add_connection
add_connection(test=host_port)
assert connections.get_connection_addr("test").get("address") == f"{host_port['host']}:{host_port['port']}"
connections.remove_connection("test")
def test_add_connection_no_error_with_user(self):
add_connection = connections.add_connection
host_port = {"host": "localhost", "port": "19530", "user": "_user"}
add_connection(test=host_port)
config = connections.get_connection_addr("test")
assert config.get("address") == f"{host_port['host']}:{host_port['port']}"
assert config.get("user") == host_port['user']
add_connection(default=host_port)
config = connections.get_connection_addr("default")
assert config.get("address") == f"{host_port['host']}:{host_port['port']}"
assert config.get("user") == host_port['user']
connections.remove_connection("test")
connections.disconnect("default")
def test_add_connection_raise_HostType(self, invalid_host):
add_connection = connections.add_connection
with pytest.raises(MilvusException) as excinfo:
add_connection(test=invalid_host)
LOGGER.info(f"Exception info: {excinfo.value}")
assert "Type of 'host' must be str." in excinfo.value.message
assert excinfo.value.code == ErrorCode.UNEXPECTED_ERROR
def test_add_connection_raise_PortType(self, invalid_port):
add_connection = connections.add_connection
with pytest.raises(MilvusException) as excinfo:
add_connection(test=invalid_port)
LOGGER.info(f"Exception info: {excinfo.value}")
assert "Type of 'port' must be str" in excinfo.value.message
assert excinfo.value.code == ErrorCode.UNEXPECTED_ERROR
@pytest.mark.parametrize("valid_addr", [
{"address": "127.0.0.1:19530"},
{"address": "example.com:19530"},
])
def test_add_connection_address(self, valid_addr):
alias = self.test_add_connection_address.__name__
config = {alias: valid_addr}
connections.add_connection(**config)
addr = connections.get_connection_addr(alias)
assert addr.get("address") == valid_addr.get("address")
LOGGER.info(f"addr: {addr}")
with mock.patch(f"{mock_prefix}.close", return_value=None):
connections.remove_connection(alias)
@pytest.mark.parametrize("invalid_addr", [
{"address": "127.0.0.1"},
{"address": "19530"},
])
def test_add_connection_address_invalid(self, invalid_addr):
alias = self.test_add_connection_address_invalid.__name__
config = {alias: invalid_addr}
with pytest.raises(MilvusException) as excinfo:
connections.add_connection(**config)
LOGGER.info(f"Exception info: {excinfo.value}")
assert "Illegal address" in excinfo.value.message
assert excinfo.value.code == ErrorCode.UNEXPECTED_ERROR
@pytest.mark.parametrize("valid_uri", [
{"uri": "http://127.0.0.1:19530"},
{"uri": "http://localhost:19530"},
{"uri": "http://example.com:80"},
{"uri": "http://example.com"},
])
def test_add_connection_uri(self, valid_uri):
alias = self.test_add_connection_uri.__name__
config = {alias: valid_uri}
connections.add_connection(**config)
addr = connections.get_connection_addr(alias)
LOGGER.info(f"addr: {addr}")
host, port = addr["address"].split(':')
assert host in valid_uri['uri'] or host in DefaultConfig.DEFAULT_HOST
assert port in valid_uri['uri'] or port in DefaultConfig.DEFAULT_PORT
LOGGER.info(f"host: {host}, port: {port}")
with mock.patch(f"{mock_prefix}.close", return_value=None):
connections.remove_connection(alias)
@pytest.mark.parametrize("invalid_uri", [
{"uri": "http://"},
{"uri": None},
{"uri": -1},
])
def test_add_connection_uri_invalid(self, invalid_uri):
alias = self.test_add_connection_uri_invalid.__name__
config = {alias: invalid_uri}
with pytest.raises(MilvusException) as excinfo:
connections.add_connection(**config)
LOGGER.info(f"Exception info: {excinfo.value}")
assert "Illegal uri" in excinfo.value.message
assert excinfo.value.code == ErrorCode.UNEXPECTED_ERROR
| TestAddConnection |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/django/toystore/forms.py | {
"start": 1361,
"end": 1470
} | class ____(ReprModelForm):
class Meta:
model = FileFields
fields = "__all__"
| FileFieldsForm |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_validation.py | {
"start": 6176,
"end": 7677
} | class ____:
"""Not the SearchStrategy type you were looking for."""
def check_type_(*args):
return check_type(*args)
def test_check_type_suggests_check_strategy():
check_type_(SearchStrategy, SearchStrategy(), "this is OK")
with pytest.raises(AssertionError, match="use check_strategy instead"):
check_type_(ActualSearchStrategy, None, "SearchStrategy assertion")
def check_strategy_(*args):
return check_strategy(*args)
def test_check_strategy_might_suggest_sampled_from():
with pytest.raises(InvalidArgument) as excinfo:
check_strategy_("not a strategy")
assert "sampled_from" not in str(excinfo.value)
with pytest.raises(InvalidArgument, match="such as st\\.sampled_from"):
check_strategy_([1, 2, 3])
with pytest.raises(InvalidArgument, match="such as st\\.sampled_from"):
check_strategy_((1, 2, 3))
check_strategy_(integers(), "passes for our custom coverage check")
@pytest.mark.parametrize("codec", ["ascii", "utf-8"])
def test_warn_on_strings_matching_common_codecs(codec):
with pytest.warns(
HypothesisWarning,
match=f"it seems like you are trying to use the codec {codec!r}",
):
@given(st.text(codec))
def f(s):
pass
f()
# if we reorder, it doesn't warn anymore
with warnings.catch_warnings():
warnings.simplefilter("error")
@given(st.text(codec[1:] + codec[:1]))
def f(s):
pass
f()
| SearchStrategy |
python | numpy__numpy | tools/swig/test/testMatrix.py | {
"start": 12585,
"end": 12850
} | class ____(MatrixTestCase):
def __init__(self, methodName="runTest"):
MatrixTestCase.__init__(self, methodName)
self.typeStr = "float"
self.typeCode = "f"
######################################################################
| floatTestCase |
python | modin-project__modin | modin/config/envvars.py | {
"start": 40778,
"end": 41114
} | class ____(EnvironmentVariable, type=ExactStr):
"""
The module to use that will be used for docstrings.
The value set here must be a valid, importable module. It should have
a `DataFrame`, `Series`, and/or several APIs directly (e.g. `read_csv`).
"""
varname = "MODIN_DOC_MODULE"
default = "pandas"
| DocModule |
python | wandb__wandb | wandb/vendor/pygments/lexers/eiffel.py | {
"start": 423,
"end": 2482
} | class ____(RegexLexer):
"""
For `Eiffel <http://www.eiffel.com>`_ source code.
.. versionadded:: 2.0
"""
name = 'Eiffel'
aliases = ['eiffel']
filenames = ['*.e']
mimetypes = ['text/x-eiffel']
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'--.*?\n', Comment.Single),
(r'[^\S\n]+', Text),
# Please note that keyword and operator are case insensitive.
(r'(?i)(true|false|void|current|result|precursor)\b', Keyword.Constant),
(r'(?i)(and(\s+then)?|not|xor|implies|or(\s+else)?)\b', Operator.Word),
(words((
'across', 'agent', 'alias', 'all', 'as', 'assign', 'attached',
'attribute', 'check', 'class', 'convert', 'create', 'debug',
'deferred', 'detachable', 'do', 'else', 'elseif', 'end', 'ensure',
'expanded', 'export', 'external', 'feature', 'from', 'frozen', 'if',
'inherit', 'inspect', 'invariant', 'like', 'local', 'loop', 'none',
'note', 'obsolete', 'old', 'once', 'only', 'redefine', 'rename',
'require', 'rescue', 'retry', 'select', 'separate', 'then',
'undefine', 'until', 'variant', 'when'), prefix=r'(?i)\b', suffix=r'\b'),
Keyword.Reserved),
(r'"\[(([^\]%]|\n)|%(.|\n)|\][^"])*?\]"', String),
(r'"([^"%\n]|%.)*?"', String),
include('numbers'),
(r"'([^'%]|%'|%%)'", String.Char),
(r"(//|\\\\|>=|<=|:=|/=|~|/~|[\\?!#%&@|+/\-=>*$<^\[\]])", Operator),
(r"([{}():;,.])", Punctuation),
(r'([a-z]\w*)|([A-Z][A-Z0-9_]*[a-z]\w*)', Name),
(r'([A-Z][A-Z0-9_]*)', Name.Class),
(r'\n+', Text),
],
'numbers': [
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'0[bB][01]+', Number.Bin),
(r'0[cC][0-7]+', Number.Oct),
(r'([0-9]+\.[0-9]*)|([0-9]*\.[0-9]+)', Number.Float),
(r'[0-9]+', Number.Integer),
],
}
| EiffelLexer |
python | huggingface__transformers | src/transformers/models/vjepa2/modeling_vjepa2.py | {
"start": 19857,
"end": 22550
} | class ____(nn.Module):
"""
Construct mask token, position and patch embeddings.
"""
def __init__(self, config: VJEPA2Config):
super().__init__()
self.config = config
self.predictor_embeddings = nn.Linear(config.hidden_size, config.pred_hidden_size)
self.num_mask_tokens = 0
self.zero_init_mask_tokens = config.pred_zero_init_mask_tokens
self.num_mask_tokens = config.pred_num_mask_tokens
self.mask_tokens = nn.Parameter(torch.zeros(self.num_mask_tokens, 1, 1, config.pred_hidden_size))
self.patch_size = config.patch_size
self.config = config
@staticmethod
def num_patches(config):
if config.frames_per_clip > 1:
return (
(config.frames_per_clip // config.tubelet_size)
* (config.crop_size // config.patch_size)
* (config.crop_size // config.patch_size)
)
else:
return (config.crop_size // config.patch_size) * (config.crop_size // config.patch_size)
def forward(
self,
hidden_states: torch.Tensor,
context_mask: list[torch.Tensor],
target_mask: list[torch.Tensor],
mask_index: int = 1,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
hidden_states : encoder outputs (context)
context_mask: tokens of the context (outputs from the encoder)
target_mask: tokens to predict
mask_index: index of the target mask to choose (useful for multiclip?)
"""
B = hidden_states.size(0)
context = self.predictor_embeddings(hidden_states)
# Make target tokens
mask_index = mask_index % self.num_mask_tokens
target = self.mask_tokens[mask_index]
# Note: this is problematic if the config isn't initialized with the right frames_per_clip value,
# e.g. for scenarios if we want to run predictor for more tokens than in the config.
# target = target.repeat(B, self.num_patches(self.config), 1)
# Remedy: use the provided target mask to get the max patch num
max_patch_num = target_mask[0].max() + 1 # one extra to include the last patch
target = target.repeat(B, max_patch_num, 1)
target = apply_masks(target, target_mask)
# Concatenate context & target tokens
context = context.repeat(len(context_mask), 1, 1)
embeddings = torch.cat([context, target], dim=1)
# Positions of context & target tokens
cm = torch.cat(context_mask, dim=0)
tm = torch.cat(target_mask, dim=0)
masks = torch.cat([cm, tm], dim=1)
return embeddings, masks
| VJEPA2PredictorEmbeddings |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/rds.py | {
"start": 5788,
"end": 7420
} | class ____(RdsBaseSensor):
"""
Waits for an RDS instance or cluster to enter one of a number of states.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:RdsDbSensor`
:param db_type: Type of the DB - either "instance" or "cluster" (default: 'instance')
:param db_identifier: The AWS identifier for the DB
:param target_statuses: Target status of DB
"""
template_fields: Sequence[str] = aws_template_fields(
"db_identifier",
"db_type",
"target_statuses",
)
def __init__(
self,
*,
db_identifier: str,
db_type: RdsDbType | str = RdsDbType.INSTANCE,
target_statuses: list[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.db_identifier = db_identifier
self.target_statuses = target_statuses or ["available"]
self.db_type = db_type
def poke(self, context: Context):
db_type = RdsDbType(self.db_type)
self.log.info(
"Poking for statuses : %s\nfor db instance %s", self.target_statuses, self.db_identifier
)
try:
if db_type == RdsDbType.INSTANCE:
state = self.hook.get_db_instance_state(self.db_identifier)
else:
state = self.hook.get_db_cluster_state(self.db_identifier)
except AirflowNotFoundException:
return False
return state in self.target_statuses
__all__ = [
"RdsExportTaskExistenceSensor",
"RdsDbSensor",
"RdsSnapshotExistenceSensor",
]
| RdsDbSensor |
python | getsentry__sentry | tests/sentry/issues/auto_source_code_config/test_frame_info.py | {
"start": 1376,
"end": 6228
} | class ____:
def test_frame_filename_repr(self) -> None:
path = "getsentry/billing/tax/manager.py"
frame_info = create_frame_info({"filename": path})
expected = f"FrameInfo: {path} stack_root: {frame_info.stack_root}"
assert frame_info.__repr__() == expected
@pytest.mark.parametrize("filepath", UNSUPPORTED_FRAME_FILENAMES)
def test_raises_unsupported(self, filepath: str) -> None:
with pytest.raises(UnsupportedFrameInfo):
create_frame_info({"filename": filepath})
@pytest.mark.parametrize("filepath", LEGITIMATE_HTTP_FILENAMES)
def test_legitimate_http_filenames_accepted(self, filepath: str) -> None:
# These files contain "http" but should NOT be rejected
frame_info = create_frame_info({"filename": filepath})
assert frame_info.raw_path == filepath
def test_raises_no_extension(self) -> None:
for filepath in NO_EXTENSION_FRAME_FILENAMES:
with pytest.raises(NeedsExtension):
create_frame_info({"filename": filepath})
@pytest.mark.parametrize(
"frame, expected_exception",
[
pytest.param({}, MissingModuleOrAbsPath, id="no_module"),
pytest.param({"module": "foo"}, MissingModuleOrAbsPath, id="no_abs_path"),
pytest.param(
# Classes without declaring a package are placed in
# the unnamed package which cannot be imported.
# https://docs.oracle.com/javase/specs/jls/se8/html/jls-7.html#jls-7.4.2
{"module": "NoPackageName", "abs_path": "OtherActivity.java"},
DoesNotFollowJavaPackageNamingConvention,
id="unnamed_package",
),
],
)
def test_java_raises_exception(
self, frame: dict[str, Any], expected_exception: type[Exception]
) -> None:
with pytest.raises(expected_exception):
create_frame_info(frame, "java")
@pytest.mark.parametrize(
"frame, expected_stack_root, expected_normalized_path",
[
pytest.param(
{"module": "foo.bar.Baz$handle$1", "abs_path": "baz.java"},
"foo/bar/",
"foo/bar/baz.java",
id="dollar_symbol_in_module",
),
pytest.param(
{"module": "foo.bar.Baz", "abs_path": "baz.extra.java"},
"foo/bar/",
"foo/bar/baz.extra.java",
id="two_dots_in_abs_path",
),
pytest.param(
{"module": "foo.bar.Baz", "abs_path": "no_extension"},
"foo/bar/",
"foo/bar/Baz", # The path does not use the abs_path
id="invalid_abs_path_no_extension",
),
pytest.param(
{"module": "foo.bar.Baz", "abs_path": "foo$bar"},
"foo/bar/",
"foo/bar/Baz", # The path does not use the abs_path
id="invalid_abs_path_dollar_sign",
),
pytest.param(
{"module": "foo.Baz", "abs_path": "foo"},
"foo/", # Single-depth stack root
"foo/Baz",
id="granularity_1",
),
],
)
def test_java_valid_frames(
self, frame: dict[str, Any], expected_stack_root: str, expected_normalized_path: str
) -> None:
frame_info = create_frame_info(frame, "java")
assert frame_info.stack_root == expected_stack_root
assert frame_info.normalized_path == expected_normalized_path
@pytest.mark.parametrize(
"frame_filename, stack_root, normalized_path",
[
pytest.param(
"app:///utils/something.py",
"app:///utils",
"utils/something.py",
),
pytest.param(
"./app/utils/something.py",
"./app",
"app/utils/something.py",
),
pytest.param(
"../../../../../../packages/something.py",
"../../../../../../packages",
"packages/something.py",
),
pytest.param(
"app:///../services/something.py",
"app:///../services",
"services/something.py",
),
pytest.param(
"/it/handles/backslashes/baz.py",
"/it/",
"it/handles/backslashes/baz.py",
),
],
)
def test_straight_path_prefix(
self, frame_filename: str, stack_root: str, normalized_path: str
) -> None:
frame_info = create_frame_info({"filename": frame_filename})
assert frame_info.normalized_path == normalized_path
assert frame_info.stack_root == stack_root
| TestFrameInfo |
python | sanic-org__sanic | guide/webapp/display/plugins/hook.py | {
"start": 137,
"end": 1146
} | class ____(DirectivePlugin):
def __call__( # type: ignore
self, directive: RSTDirective, md: Markdown
) -> None:
if md.renderer.NAME == "html":
md.before_render_hooks.append(self._hook)
def _hook(self, md: Markdown, state: BlockState) -> None:
prev = None
for idx, token in enumerate(state.tokens):
for type_ in ("column", "tab"):
if token["type"] == type_:
maybe_next = (
state.tokens[idx + 1]
if idx + 1 < len(state.tokens)
else None
)
token.setdefault("attrs", {})
if prev and prev["type"] != type_:
token["attrs"]["first"] = True
if (
maybe_next and maybe_next["type"] != type_
) or not maybe_next:
token["attrs"]["last"] = True
prev = token
| Hook |
python | rq__rq | rq/worker_pool.py | {
"start": 850,
"end": 10295
} | class ____:
class Status(Enum):
IDLE = 1
STARTED = 2
STOPPED = 3
def __init__(
self,
queues: Iterable[Union[str, Queue]],
connection: Redis,
num_workers: int = 1,
worker_class: type[BaseWorker] = Worker,
serializer: 'Serializer' = DefaultSerializer,
job_class: type[Job] = Job,
queue_class: type[Queue] = Queue,
*args,
**kwargs,
):
self.num_workers: int = num_workers
self._workers: list[Worker] = []
setup_loghandlers('INFO', DEFAULT_LOGGING_DATE_FORMAT, DEFAULT_LOGGING_FORMAT, name=__name__)
self.log: logging.Logger = logging.getLogger(__name__)
# self.log: logging.Logger = logger
self._queue_names: list[str] = parse_names(queues)
self.connection = connection
self.name: str = uuid4().hex
self._burst: bool = True
self._sleep: int = 0
self.status: self.Status = self.Status.IDLE # type: ignore
self.worker_class: type[BaseWorker] = worker_class
self.serializer: Serializer = serializer
self.job_class: type[Job] = job_class
self.queue_class: type[Queue] = queue_class
# A dictionary of WorkerData keyed by worker name
self.worker_dict: dict[str, WorkerData] = {}
self._connection_class, self._pool_class, self._pool_kwargs = parse_connection(connection)
@property
def queues(self) -> list[Queue]:
"""Returns a list of Queue objects"""
return [self.queue_class(name, connection=self.connection) for name in self._queue_names]
@property
def number_of_active_workers(self) -> int:
"""Returns a list of Queue objects"""
return len(self.worker_dict)
def _install_signal_handlers(self):
"""Installs signal handlers for handling SIGINT and SIGTERM
gracefully.
"""
signal.signal(signal.SIGINT, self.request_stop)
signal.signal(signal.SIGTERM, self.request_stop)
def request_stop(self, signum=None, frame=None):
"""Toggle self._stop_requested that's checked on every loop"""
self.log.info('Received SIGINT/SIGTERM, shutting down...')
self.status = self.Status.STOPPED
self.stop_workers()
def all_workers_have_stopped(self) -> bool:
"""Returns True if all workers have stopped."""
self.reap_workers()
# `bool(self.worker_dict)` sometimes returns True even if the dict is empty
return self.number_of_active_workers == 0
def reap_workers(self):
"""Removes dead workers from worker_dict"""
self.log.debug('Reaping dead workers')
worker_datas = list(self.worker_dict.values())
for data in worker_datas:
data.process.join(0.1)
if data.process.is_alive():
self.log.debug('Worker %s with pid %d is alive', data.name, data.pid)
else:
self.handle_dead_worker(data)
continue
# I'm still not sure why this is sometimes needed, temporarily commenting
# this out until I can figure it out.
# with contextlib.suppress(HorseMonitorTimeoutException):
# with UnixSignalDeathPenalty(1, HorseMonitorTimeoutException):
# try:
# # If wait4 returns, the process is dead
# os.wait4(data.process.pid, 0) # type: ignore
# self.handle_dead_worker(data)
# except ChildProcessError:
# # Process is dead
# self.handle_dead_worker(data)
# continue
def handle_dead_worker(self, worker_data: WorkerData):
"""
Handle a dead worker
"""
self.log.info('Worker %s with pid %d is dead', worker_data.name, worker_data.pid)
with contextlib.suppress(KeyError):
self.worker_dict.pop(worker_data.name)
def check_workers(self, respawn: bool = True) -> None:
"""
Check whether workers are still alive
"""
self.log.debug('Checking worker processes')
self.reap_workers()
# If we have less number of workers than num_workers,
# respawn the difference
if respawn and self.status != self.Status.STOPPED:
delta = self.num_workers - len(self.worker_dict)
if delta:
for i in range(delta):
self.start_worker(burst=self._burst, _sleep=self._sleep)
def get_worker_process(
self,
name: str,
burst: bool,
_sleep: float = 0,
logging_level: str = 'INFO',
) -> Process:
"""Returns the worker process"""
return Process(
target=run_worker,
args=(name, self._queue_names, self._connection_class, self._pool_class, self._pool_kwargs),
kwargs={
'_sleep': _sleep,
'burst': burst,
'logging_level': logging_level,
'worker_class': self.worker_class,
'job_class': self.job_class,
'serializer': self.serializer,
},
name=f'Worker {name} (WorkerPool {self.name})',
)
def start_worker(
self,
count: Optional[int] = None,
burst: bool = True,
_sleep: float = 0,
logging_level: str = 'INFO',
):
"""
Starts a worker and adds the data to worker_datas.
* sleep: waits for X seconds before creating worker, for testing purposes
"""
name = uuid4().hex
process = self.get_worker_process(name, burst=burst, _sleep=_sleep, logging_level=logging_level)
process.start()
worker_data = WorkerData(name=name, pid=process.pid, process=process) # type: ignore
self.worker_dict[name] = worker_data
self.log.debug('Spawned worker: %s with PID %d', name, process.pid)
def start_workers(self, burst: bool = True, _sleep: float = 0, logging_level: str = 'INFO'):
"""
Run the workers
* sleep: waits for X seconds before creating worker, only for testing purposes
"""
self.log.debug(f'Spawning {self.num_workers} workers')
for i in range(self.num_workers):
self.start_worker(i + 1, burst=burst, _sleep=_sleep, logging_level=logging_level)
def stop_worker(self, worker_data: WorkerData, sig=signal.SIGINT):
"""
Send stop signal to worker and catch "No such process" error if the worker is already dead.
"""
try:
os.kill(worker_data.pid, sig)
self.log.info('Sent shutdown command to worker with %s', worker_data.pid)
except OSError as e:
if e.errno == errno.ESRCH:
# "No such process" is fine with us
self.log.debug('Horse already dead')
else:
raise
def stop_workers(self):
"""Send SIGINT to all workers"""
self.log.info('Sending stop signal to %s workers', len(self.worker_dict))
worker_datas = list(self.worker_dict.values())
for worker_data in worker_datas:
self.stop_worker(worker_data)
def start(self, burst: bool = False, logging_level: str = 'INFO'):
self._burst = burst
respawn = not burst # Don't respawn workers if burst mode is on
setup_loghandlers(logging_level, DEFAULT_LOGGING_DATE_FORMAT, DEFAULT_LOGGING_FORMAT, name=__name__)
self.log.info(f'Starting worker pool {self.name} with pid %d...', os.getpid())
self.status = self.Status.STARTED
self.start_workers(burst=self._burst, logging_level=logging_level)
self._install_signal_handlers()
while True:
if self.status == self.Status.STOPPED:
if self.all_workers_have_stopped():
self.log.info('All workers stopped, exiting...')
break
else:
self.log.info('Waiting for workers to shutdown...')
time.sleep(1)
continue
else:
self.check_workers(respawn=respawn)
if burst and self.number_of_active_workers == 0:
self.log.info('All workers stopped, exiting...')
break
time.sleep(1)
def run_worker(
worker_name: str,
queue_names: Iterable[str],
connection_class,
connection_pool_class,
connection_pool_kwargs: dict,
worker_class: type[BaseWorker] = Worker,
serializer: 'Serializer' = DefaultSerializer,
job_class: type[Job] = Job,
queue_class: type[Queue] = Queue,
burst: bool = True,
logging_level: str = 'INFO',
_sleep: int = 0,
):
connection = connection_class(
connection_pool=ConnectionPool(connection_class=connection_pool_class, **connection_pool_kwargs)
)
queues = [queue_class(name, connection=connection) for name in queue_names]
worker = worker_class(
queues,
name=worker_name,
connection=connection,
serializer=serializer,
job_class=job_class,
queue_class=queue_class,
)
worker.log.info('Starting worker started with PID %s', os.getpid())
time.sleep(_sleep)
worker.work(burst=burst, with_scheduler=True, logging_level=logging_level)
| WorkerPool |
python | huggingface__transformers | src/transformers/models/umt5/modeling_umt5.py | {
"start": 4060,
"end": 5427
} | class ____(nn.Module):
def __init__(self, config: UMT5Config):
super().__init__()
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_gelu = self.act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
# To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32.
# See https://github.com/huggingface/transformers/issues/20287
# we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None``
if (
isinstance(self.wo.weight, torch.Tensor)
and hidden_states.dtype != self.wo.weight.dtype
and self.wo.weight.dtype != torch.int8
):
hidden_states = hidden_states.to(self.wo.weight.dtype)
hidden_states = self.wo(hidden_states)
return hidden_states
# Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->UMT5
| UMT5DenseGatedActDense |
python | huggingface__transformers | tests/quantization/gptq/test_gptq.py | {
"start": 13133,
"end": 13304
} | class ____(GPTQTestCUDA):
device_map = "auto"
use_exllama = True
@slow
@require_optimum
@require_gptq
@require_torch_gpu
@require_accelerate
| GPTQTestDeviceMapExllama |
python | coleifer__peewee | tests/fields.py | {
"start": 3181,
"end": 3567
} | class ____(ModelTestCase):
requires = [FloatModel]
def test_float_field(self):
f1 = FloatModel.create(value=1.23)
f2 = FloatModel.create(value=3.14, value_null=0.12)
query = FloatModel.select().order_by(FloatModel.id)
self.assertEqual([(f.value, f.value_null) for f in query],
[(1.23, None), (3.14, 0.12)])
| TestFloatField |
python | davidhalter__jedi | test/completion/recursion.py | {
"start": 213,
"end": 609
} | class ____():
def __init__(self):
self.recursive = [1, 3]
def annoying(self):
self.recursive = [self.recursive[0]]
def recurse(self):
self.recursive = [self.recursive[1]]
#? int()
X().recursive[0]
def to_list(iterable):
return list(set(iterable))
def recursion1(foo):
return to_list(to_list(foo)) + recursion1(foo)
#? int()
recursion1([1,2])[0]
| X |
python | apache__airflow | providers/celery/tests/unit/celery/executors/test_celery_executor.py | {
"start": 15301,
"end": 18608
} | class ____:
"""
A picklable object used to mock tasks sent to Celery. Can't use the mock library
here because it's not picklable.
"""
def apply_async(self, *args, **kwargs):
return 1
def _exit_gracefully(signum, _):
print(f"{os.getpid()} Exiting gracefully upon receiving signal {signum}")
sys.exit(signum)
@pytest.fixture
def register_signals():
"""
Register the same signals as scheduler does to test celery_executor to make sure it does not
hang.
"""
orig_sigint = orig_sigterm = orig_sigusr2 = signal.SIG_DFL
orig_sigint = signal.signal(signal.SIGINT, _exit_gracefully)
orig_sigterm = signal.signal(signal.SIGTERM, _exit_gracefully)
orig_sigusr2 = signal.signal(signal.SIGUSR2, _exit_gracefully)
yield
# Restore original signal handlers after test
signal.signal(signal.SIGINT, orig_sigint)
signal.signal(signal.SIGTERM, orig_sigterm)
signal.signal(signal.SIGUSR2, orig_sigusr2)
@pytest.mark.execution_timeout(200)
@pytest.mark.quarantined
def test_send_tasks_to_celery_hang(register_signals):
"""
Test that celery_executor does not hang after many runs.
"""
executor = celery_executor.CeleryExecutor()
task = MockTask()
task_tuples_to_send = [(None, None, None, task) for _ in range(26)]
for _ in range(250):
# This loop can hang on Linux if celery_executor does something wrong with
# multiprocessing.
results = executor._send_tasks_to_celery(task_tuples_to_send)
assert results == [(None, None, 1) for _ in task_tuples_to_send]
@conf_vars({("celery", "result_backend"): "rediss://test_user:test_password@localhost:6379/0"})
def test_celery_executor_with_no_recommended_result_backend(caplog):
import importlib
from airflow.providers.celery.executors.default_celery import log
with caplog.at_level(logging.WARNING, logger=log.name):
# reload celery conf to apply the new config
importlib.reload(default_celery)
assert "test_password" not in caplog.text
assert (
"You have configured a result_backend using the protocol `rediss`,"
" it is highly recommended to use an alternative result_backend (i.e. a database)."
) in caplog.text
@conf_vars({("celery_broker_transport_options", "sentinel_kwargs"): '{"service_name": "mymaster"}'})
def test_sentinel_kwargs_loaded_from_string():
import importlib
# reload celery conf to apply the new config
importlib.reload(default_celery)
assert default_celery.DEFAULT_CELERY_CONFIG["broker_transport_options"]["sentinel_kwargs"] == {
"service_name": "mymaster"
}
@conf_vars({("celery", "task_acks_late"): "False"})
def test_celery_task_acks_late_loaded_from_string():
import importlib
# reload celery conf to apply the new config
importlib.reload(default_celery)
assert default_celery.DEFAULT_CELERY_CONFIG["task_acks_late"] is False
@conf_vars({("celery", "extra_celery_config"): '{"worker_max_tasks_per_child": 10}'})
def test_celery_extra_celery_config_loaded_from_string():
import importlib
# reload celery conf to apply the new config
importlib.reload(default_celery)
assert default_celery.DEFAULT_CELERY_CONFIG["worker_max_tasks_per_child"] == 10
| MockTask |
python | pytorch__pytorch | torch/testing/_internal/common_utils.py | {
"start": 225648,
"end": 233134
} | class ____(TestCase):
exact_dtype = True
# Copies inputs to inplace operations to avoid inplace modifications
# to leaves requiring gradient
def _get_safe_inplace(self, inplace_variant):
@wraps(inplace_variant)
def _fn(t, *args, **kwargs):
return inplace_variant(t.clone(), *args, **kwargs)
return _fn
def _check_helper(self, device, dtype, op, variant, check, *, check_forward_ad=False, check_backward_ad=True,
check_batched_grad=None, check_batched_forward_grad=False):
assert check in ('gradcheck', 'bwgrad_bwgrad', 'fwgrad_bwgrad')
# NB: check_backward_ad does not affect gradgradcheck (always True)
if variant is None:
self.skipTest("Skipped! Variant not implemented.")
if not op.supports_dtype(dtype, torch.device(device).type):
self.skipTest(f"Skipped! {op.name} does not support dtype {str(dtype)}")
def is_inplace(variant):
if hasattr(variant, "__wrapped__"):
return variant.__wrapped__ is op.get_inplace()
return variant is op.get_inplace()
include_conjugated_inputs = op.test_conjugated_samples and dtype.is_complex
samples = op.sample_inputs(device, dtype, requires_grad=True, include_conjugated_inputs=include_conjugated_inputs,
small_inputs_only=TEST_WITH_SLOW_GRADCHECK)
for sample in samples:
if sample.broadcasts_input and is_inplace(variant):
continue
# Gradcheck expects tensors as its input, but autograd actually supports tensorlists
# and tensors passed as kwargs. The following creates a function that accepts just
# the tensors that require grad as varargs, and then recomposes them back into the
# original input.
# Creates gradcheck inputs by identifying tensors requiring grad
all_args = None
if is_iterable_of_tensors(sample.input):
all_args = chain(sample.input, sample.args, sample.kwargs.values())
else:
all_args = tuple(chain((sample.input,), sample.args, sample.kwargs.values())) # type: ignore[assignment]
gradcheck_args = tuple(x for x in all_args if (isinstance(x, torch.Tensor) and x.requires_grad)) # type: ignore[union-attr]
# Verifies sample input tensors should have no grad
# This may happen if the same tensor is used in two different SampleInputs
for t in gradcheck_args:
self.assertIsNone(t.grad,
"A sampled input has a gradient before running autograd. "
"This usually means that (at least) one input tensor is reused "
"across different SampleInputs. "
"Please create a new tensor for each SampleInput.")
def _input_recomposition_helper(inputs, inp, input_idx):
if is_iterable_of_tensors(inp):
tensor_list = []
for x in inp:
if isinstance(x, torch.Tensor) and x.requires_grad:
tensor_list.append(inputs[input_idx])
input_idx = input_idx + 1
else:
tensor_list.append(x)
return tensor_list, input_idx
elif isinstance(inp, torch.Tensor) and inp.requires_grad:
return inputs[input_idx], input_idx + 1
else:
return inp, input_idx
def fn(*inputs):
# Puts inputs back into sample properly
positional_args = []
input_idx = 0
inp, input_idx = _input_recomposition_helper(inputs, sample.input, input_idx)
positional_args.append(inp)
for x in sample.args:
inp, input_idx = _input_recomposition_helper(inputs, x, input_idx)
positional_args.append(inp)
# Recreates kwargs
kwargs = {}
for k, v in sample.kwargs.items():
inp, input_idx = _input_recomposition_helper(inputs, v, input_idx)
kwargs[k] = inp
output = op.gradcheck_wrapper(variant, *positional_args, **kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
if check == 'gradcheck':
if check_batched_grad is None:
check_batched_grad = op.check_batched_grad
self.assertTrue(gradcheck(fn, gradcheck_args,
check_batched_grad=check_batched_grad,
check_grad_dtypes=True,
nondet_tol=op.gradcheck_nondet_tol,
fast_mode=op.gradcheck_fast_mode,
check_forward_ad=check_forward_ad,
check_backward_ad=check_backward_ad,
check_undefined_grad=True,
check_batched_forward_grad=check_batched_forward_grad))
elif check in ('bwgrad_bwgrad', 'fwgrad_bwgrad'): # gradgrad check
self.assertFalse(check_forward_ad, msg="Cannot run forward AD check for gradgradcheck")
for gen_non_contig_grad_outputs in (False, True):
kwargs = {
"gen_non_contig_grad_outputs": gen_non_contig_grad_outputs,
"check_batched_grad": op.check_batched_gradgrad,
"check_grad_dtypes": True,
"nondet_tol": op.gradcheck_nondet_tol,
"fast_mode": op.gradcheck_fast_mode
}
if check == "fwgrad_bwgrad":
kwargs["check_fwd_over_rev"] = True
kwargs["check_rev_over_rev"] = False
kwargs["check_batched_grad"] = False
kwargs["check_undefined_grad"] = False
self.assertTrue(gradgradcheck(fn, gradcheck_args, **kwargs))
else:
self.assertTrue(False, msg="Unknown check requested!")
def _grad_test_helper(self, device, dtype, op, variant, *, check_forward_ad=False, check_backward_ad=True,
check_batched_grad=None, check_batched_forward_grad=False):
return self._check_helper(device, dtype, op, variant, 'gradcheck', check_forward_ad=check_forward_ad,
check_backward_ad=check_backward_ad, check_batched_grad=check_batched_grad,
check_batched_forward_grad=check_batched_forward_grad)
def _skip_helper(self, op, device, dtype):
if dtype not in op.supported_backward_dtypes(torch.device(device).type):
self.skipTest("Skipped! Op doesn't support autograd for this dtype.")
if not op.supports_autograd and not op.supports_forward_ad:
self.skipTest("Skipped! autograd not supported.")
# Base TestCase for NT tests; used to define common helpers, etc.
| TestGradients |
python | keras-team__keras | keras/src/ops/math.py | {
"start": 23942,
"end": 27382
} | class ____(Operation):
def __init__(
self,
sequence_length,
sequence_stride,
fft_length,
window="hann",
center=True,
*,
name=None,
):
super().__init__(name=name)
self.sequence_length = sequence_length
self.sequence_stride = sequence_stride
self.fft_length = fft_length
self.window = window
self.center = center
def compute_output_spec(self, x):
if x.shape[-1] is not None:
padded = 0 if self.center is False else (self.fft_length // 2) * 2
num_sequences = (
1
+ (x.shape[-1] + padded - self.fft_length)
// self.sequence_stride
)
else:
num_sequences = None
new_shape = x.shape[:-1] + (num_sequences, self.fft_length // 2 + 1)
return (
KerasTensor(shape=new_shape, dtype=x.dtype),
KerasTensor(shape=new_shape, dtype=x.dtype),
)
def call(self, x):
return backend.math.stft(
x,
sequence_length=self.sequence_length,
sequence_stride=self.sequence_stride,
fft_length=self.fft_length,
window=self.window,
center=self.center,
)
@keras_export("keras.ops.stft")
def stft(
x, sequence_length, sequence_stride, fft_length, window="hann", center=True
):
"""Short-Time Fourier Transform along the last axis of the input.
The STFT computes the Fourier transform of short overlapping windows of the
input. This giving frequency components of the signal as they change over
time.
Args:
x: Input tensor.
sequence_length: An integer representing the sequence length.
sequence_stride: An integer representing the sequence hop size.
fft_length: An integer representing the size of the FFT to apply. If not
specified, uses the smallest power of 2 enclosing `sequence_length`.
window: A string, a tensor of the window or `None`. If `window` is a
string, available values are `"hann"` and `"hamming"`. If `window`
is a tensor, it will be used directly as the window and its length
must be `sequence_length`. If `window` is `None`, no windowing is
used. Defaults to `"hann"`.
center: Whether to pad `x` on both sides so that the t-th sequence is
centered at time `t * sequence_stride`. Otherwise, the t-th sequence
begins at time `t * sequence_stride`. Defaults to `True`.
Returns:
A tuple containing two tensors - the real and imaginary parts of the
STFT output.
Example:
>>> x = keras.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0])
>>> stft(x, 3, 2, 3)
(array([[0.75, -0.375],
[3.75, -1.875],
[5.25, -2.625]]), array([[0.0, 0.64951905],
[0.0, 0.64951905],
[0.0, -0.64951905]]))
"""
if any_symbolic_tensors((x,)):
return STFT(
sequence_length=sequence_length,
sequence_stride=sequence_stride,
fft_length=fft_length,
window=window,
center=center,
).symbolic_call(x)
return backend.math.stft(
x,
sequence_length=sequence_length,
sequence_stride=sequence_stride,
fft_length=fft_length,
window=window,
center=center,
)
| STFT |
python | huggingface__transformers | src/transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py | {
"start": 17690,
"end": 21267
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: HunYuanMoEV1Config, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
# Diff from Llama - DynamicNTKAlphaRotary
if self.rope_type == "dynamic" and self.config.rope_parameters.get("alpha"):
self.dim = config.head_dim
base = self.config.rope_parameters["rope_theta"] * self.config.rope_parameters["alpha"] ** (
self.config.head_dim / (self.config.head_dim - 2)
)
inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.config.head_dim))
self.attention_scaling = 1.0
else:
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[HunYuanMoEV1Config] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
@auto_docstring
| HunYuanMoEV1RotaryEmbedding |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/glibc/package.py | {
"start": 298,
"end": 778
} | class ____(AutotoolsPackage, GNUMirrorPackage):
"""The GNU C Library provides many of the low-level components used
directly by programs written in the C or C++ languages.
"""
homepage = "https://www.gnu.org/software/libc/"
gnu_mirror_path = "libc/glibc-2.33.tar.gz"
git = "https://sourceware.org/git/glibc.git"
tags = ["runtime"]
provides("libc")
version("2.39", sha256="97f84f3b7588cd54093a6f6389b0c1a81e70d99708d74963a2e3eab7c7dc942d")
| Glibc |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 54712,
"end": 54809
} | class ____(atlas_info):
_lib_names = ['lapack_atlas'] + atlas_info._lib_names
| lapack_atlas_info |
python | django__django | django/contrib/postgres/search.py | {
"start": 7149,
"end": 8663
} | class ____(SearchQueryCombinable, Func):
output_field = SearchQueryField()
SEARCH_TYPES = {
"plain": "plainto_tsquery",
"phrase": "phraseto_tsquery",
"raw": "to_tsquery",
"websearch": "websearch_to_tsquery",
}
def __init__(
self,
value,
output_field=None,
*,
config=None,
invert=False,
search_type="plain",
):
if isinstance(value, LexemeCombinable):
search_type = "raw"
self.function = self.SEARCH_TYPES.get(search_type)
if self.function is None:
raise ValueError("Unknown search_type argument '%s'." % search_type)
if not hasattr(value, "resolve_expression"):
value = Value(value)
expressions = (value,)
self.config = SearchConfig.from_parameter(config)
if self.config is not None:
expressions = [self.config, *expressions]
self.invert = invert
super().__init__(*expressions, output_field=output_field)
def as_sql(self, compiler, connection, function=None, template=None):
sql, params = super().as_sql(compiler, connection, function, template)
if self.invert:
sql = "!!(%s)" % sql
return sql, params
def __invert__(self):
clone = self.copy()
clone.invert = not self.invert
return clone
def __str__(self):
result = super().__str__()
return ("~%s" % result) if self.invert else result
| SearchQuery |
python | python__mypy | mypy/test/teststubgen.py | {
"start": 28594,
"end": 32997
} | class ____(DataSuite):
"""Data-driven end-to-end test cases that generate stub files.
You can use these magic test case name suffixes:
*_semanal
Run semantic analysis (slow as this uses real stubs -- only use
when necessary)
*_import
Import module and perform runtime introspection (in the current
process!)
You can use these magic comments:
# flags: --some-stubgen-option ...
Specify custom stubgen options
# modules: module1 module2 ...
Specify which modules to output (by default only 'main')
"""
required_out_section = True
base_path = "."
files = ["stubgen.test"]
@unittest.skipIf(sys.platform == "win32", "clean up fails on Windows")
def run_case(self, testcase: DataDrivenTestCase) -> None:
with local_sys_path_set():
self.run_case_inner(testcase)
def run_case_inner(self, testcase: DataDrivenTestCase) -> None:
extra = [] # Extra command-line args
mods = [] # Module names to process
source = "\n".join(testcase.input)
for file, content in testcase.files + [("./main.py", source)]:
# Strip ./ prefix and .py suffix.
mod = file[2:-3].replace("/", ".")
if mod.endswith(".__init__"):
mod, _, _ = mod.rpartition(".")
mods.append(mod)
if "-p " not in source:
extra.extend(["-m", mod])
with open(file, "w") as f:
f.write(content)
options = self.parse_flags(source, extra)
if sys.version_info < options.pyversion:
pytest.skip()
modules = self.parse_modules(source)
out_dir = "out"
try:
try:
if testcase.name.endswith("_inspect"):
options.inspect = True
else:
if not testcase.name.endswith("_import"):
options.no_import = True
if not testcase.name.endswith("_semanal"):
options.parse_only = True
generate_stubs(options)
a: list[str] = []
for module in modules:
fnam = module_to_path(out_dir, module)
self.add_file(fnam, a, header=len(modules) > 1)
except CompileError as e:
a = e.messages
assert_string_arrays_equal(
testcase.output, a, f"Invalid output ({testcase.file}, line {testcase.line})"
)
finally:
for mod in mods:
if mod in sys.modules:
del sys.modules[mod]
shutil.rmtree(out_dir)
def parse_flags(self, program_text: str, extra: list[str]) -> Options:
flags = re.search("# flags: (.*)$", program_text, flags=re.MULTILINE)
pyversion = None
if flags:
flag_list = flags.group(1).split()
for i, flag in enumerate(flag_list):
if flag.startswith("--python-version="):
pyversion = flag.split("=", 1)[1]
del flag_list[i]
break
else:
flag_list = []
options = parse_options(flag_list + extra)
if pyversion:
# A hack to allow testing old python versions with new language constructs
# This should be rarely used in general as stubgen output should not be version-specific
major, minor = pyversion.split(".", 1)
options.pyversion = (int(major), int(minor))
if "--verbose" not in flag_list:
options.quiet = True
else:
options.verbose = True
return options
def parse_modules(self, program_text: str) -> list[str]:
modules = re.search("# modules: (.*)$", program_text, flags=re.MULTILINE)
if modules:
return modules.group(1).split()
else:
return ["main"]
def add_file(self, path: str, result: list[str], header: bool) -> None:
if not os.path.exists(path):
result.append("<%s was not generated>" % path.replace("\\", "/"))
return
if header:
result.append(f"# {path[4:]}")
with open(path, encoding="utf8") as file:
result.extend(file.read().splitlines())
self_arg = ArgSig(name="self")
| StubgenPythonSuite |
python | walkccc__LeetCode | solutions/644. Maximum Average Subarray II/644.py | {
"start": 0,
"end": 815
} | class ____:
def findMaxAverage(self, nums: list[int], k: int) -> float:
ERR = 1e-5
l = min(nums)
r = max(nums)
def check(m: float) -> bool:
"""
Returns True if there's a subarray, where its length >= k and its average
sum >= m.
"""
summ = 0
prevSum = 0
minPrevSum = 0
for i, num in enumerate(nums):
# Need to substract m for each `num` so that we can check if the sum of
# the subarray >= 0.
summ += num - m
if i >= k:
prevSum += nums[i - k] - m
minPrevSum = min(minPrevSum, prevSum)
if i + 1 >= k and summ >= minPrevSum:
return True
return False
while r - l > ERR:
m = (l + r) / 2
if check(m):
l = m
else:
r = m
return l
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-aws/prefect_aws/observers/ecs.py | {
"start": 1641,
"end": 1723
} | class ____(enum.Enum):
PRESENT = enum.auto()
ABSENT = enum.auto()
| FilterCase |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.