language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | crytic__slither | slither/tools/upgradeability/checks/abstract_checks.py | {
"start": 983,
"end": 5050
} | class ____(metaclass=abc.ABCMeta):
ARGUMENT = ""
HELP = ""
IMPACT: CheckClassification = CheckClassification.UNIMPLEMENTED
WIKI = ""
WIKI_TITLE = ""
WIKI_DESCRIPTION = ""
WIKI_EXPLOIT_SCENARIO = ""
WIKI_RECOMMENDATION = ""
REQUIRE_CONTRACT = False
REQUIRE_PROXY = False
REQUIRE_CONTRACT_V2 = False
def __init__(
self,
logger: Logger,
contract: Contract,
proxy: Optional[Contract] = None,
contract_v2: Optional[Contract] = None,
) -> None:
self.logger = logger
self.contract = contract
self.proxy = proxy
self.contract_v2 = contract_v2
if not self.ARGUMENT:
raise IncorrectCheckInitialization(f"NAME is not initialized {self.__class__.__name__}")
if not self.HELP:
raise IncorrectCheckInitialization(f"HELP is not initialized {self.__class__.__name__}")
if not self.WIKI:
raise IncorrectCheckInitialization(f"WIKI is not initialized {self.__class__.__name__}")
if not self.WIKI_TITLE:
raise IncorrectCheckInitialization(
f"WIKI_TITLE is not initialized {self.__class__.__name__}"
)
if not self.WIKI_DESCRIPTION:
raise IncorrectCheckInitialization(
f"WIKI_DESCRIPTION is not initialized {self.__class__.__name__}"
)
if not self.WIKI_EXPLOIT_SCENARIO and self.IMPACT not in [
CheckClassification.INFORMATIONAL
]:
raise IncorrectCheckInitialization(
f"WIKI_EXPLOIT_SCENARIO is not initialized {self.__class__.__name__}"
)
if not self.WIKI_RECOMMENDATION:
raise IncorrectCheckInitialization(
f"WIKI_RECOMMENDATION is not initialized {self.__class__.__name__}"
)
if self.REQUIRE_PROXY and self.REQUIRE_CONTRACT_V2:
# This is not a fundatemenal issues
# But it requires to change __main__ to avoid running two times the detectors
txt = f"REQUIRE_PROXY and REQUIRE_CONTRACT_V2 needs change in __main___ {self.__class__.__name__}"
raise IncorrectCheckInitialization(txt)
if self.IMPACT not in [
CheckClassification.LOW,
CheckClassification.MEDIUM,
CheckClassification.HIGH,
CheckClassification.INFORMATIONAL,
]:
raise IncorrectCheckInitialization(
f"IMPACT is not initialized {self.__class__.__name__}"
)
if self.REQUIRE_CONTRACT_V2 and contract_v2 is None:
raise IncorrectCheckInitialization(
f"ContractV2 is not initialized {self.__class__.__name__}"
)
if self.REQUIRE_PROXY and proxy is None:
raise IncorrectCheckInitialization(
f"Proxy is not initialized {self.__class__.__name__}"
)
@abc.abstractmethod
def _check(self) -> List[Output]:
"""TODO Documentation"""
return []
def check(self) -> List[Dict]:
all_outputs = self._check()
# Keep only dictionaries
all_results = [r.data for r in all_outputs]
if all_results:
if self.logger:
info = "\n"
for result in all_results:
info += result["description"]
info += f"Reference: {self.WIKI}"
self._log(info)
return all_results
def generate_result(
self,
info: CHECK_INFO,
additional_fields: Optional[Dict] = None,
) -> Output:
output = Output(
info, additional_fields, markdown_root=self.contract.compilation_unit.core.markdown_root
)
output.data["check"] = self.ARGUMENT
return output
def _log(self, info: str) -> None:
if self.logger:
self.logger.info(self.color(info))
@property
def color(self) -> Callable[[str], str]:
return classification_colors[self.IMPACT]
| AbstractCheck |
python | Lightning-AI__lightning | src/lightning/pytorch/cli.py | {
"start": 13848,
"end": 40146
} | class ____:
"""Implementation of a configurable command line tool for pytorch-lightning."""
def __init__(
self,
model_class: Optional[Union[type[LightningModule], Callable[..., LightningModule]]] = None,
datamodule_class: Optional[Union[type[LightningDataModule], Callable[..., LightningDataModule]]] = None,
save_config_callback: Optional[type[SaveConfigCallback]] = SaveConfigCallback,
save_config_kwargs: Optional[dict[str, Any]] = None,
trainer_class: Union[type[Trainer], Callable[..., Trainer]] = Trainer,
trainer_defaults: Optional[dict[str, Any]] = None,
seed_everything_default: Union[bool, int] = True,
parser_kwargs: Optional[Union[dict[str, Any], dict[str, dict[str, Any]]]] = None,
parser_class: type[LightningArgumentParser] = LightningArgumentParser,
subclass_mode_model: bool = False,
subclass_mode_data: bool = False,
args: ArgsType = None,
run: bool = True,
auto_configure_optimizers: bool = True,
load_from_checkpoint_support: bool = True,
) -> None:
"""Receives as input pytorch-lightning classes (or callables which return pytorch-lightning classes), which are
called / instantiated using a parsed configuration file and / or command line args.
Parsing of configuration from environment variables can be enabled by setting ``parser_kwargs={"default_env":
True}``. A full configuration yaml would be parsed from ``PL_CONFIG`` if set. Individual settings are so parsed
from variables named for example ``PL_TRAINER__MAX_EPOCHS``.
For more info, read :ref:`the CLI docs <lightning-cli>`.
Args:
model_class: An optional :class:`~lightning.pytorch.core.LightningModule` class to train on or a
callable which returns a :class:`~lightning.pytorch.core.LightningModule` instance when
called. If ``None``, you can pass a registered model with ``--model=MyModel``.
datamodule_class: An optional :class:`~lightning.pytorch.core.datamodule.LightningDataModule` class or a
callable which returns a :class:`~lightning.pytorch.core.datamodule.LightningDataModule` instance when
called. If ``None``, you can pass a registered datamodule with ``--data=MyDataModule``.
save_config_callback: A callback class to save the config.
save_config_kwargs: Parameters that will be used to instantiate the save_config_callback.
trainer_class: An optional subclass of the :class:`~lightning.pytorch.trainer.trainer.Trainer` class or a
callable which returns a :class:`~lightning.pytorch.trainer.trainer.Trainer` instance when called.
trainer_defaults: Set to override Trainer defaults or add persistent callbacks. The callbacks added through
this argument will not be configurable from a configuration file and will always be present for
this particular CLI. Alternatively, configurable callbacks can be added as explained in
:ref:`the CLI docs <lightning-cli>`.
seed_everything_default: Number for the :func:`~lightning.fabric.utilities.seed.seed_everything`
seed value. Set to True to automatically choose a seed value.
Setting it to False will avoid calling ``seed_everything``.
parser_kwargs: Additional arguments to instantiate each ``LightningArgumentParser``.
subclass_mode_model: Whether model can be any `subclass
<https://jsonargparse.readthedocs.io/en/stable/#class-type-and-sub-classes>`_
of the given class.
subclass_mode_data: Whether datamodule can be any `subclass
<https://jsonargparse.readthedocs.io/en/stable/#class-type-and-sub-classes>`_
of the given class.
args: Arguments to parse. If ``None`` the arguments are taken from ``sys.argv``. Command line style
arguments can be given in a ``list``. Alternatively, structured config options can be given in a
``dict`` or ``jsonargparse.Namespace``.
run: Whether subcommands should be added to run a :class:`~lightning.pytorch.trainer.trainer.Trainer`
method. If set to ``False``, the trainer and model classes will be instantiated only.
auto_configure_optimizers: Whether to automatically add default optimizer and lr_scheduler arguments.
load_from_checkpoint_support: Whether ``save_hyperparameters`` should save the original parsed
hyperparameters (instead of what ``__init__`` receives), such that it is possible for
``load_from_checkpoint`` to correctly instantiate classes even when using complex nesting and
dependency injection.
"""
self.save_config_callback = save_config_callback
self.save_config_kwargs = save_config_kwargs or {}
self.trainer_class = trainer_class
self.trainer_defaults = trainer_defaults or {}
self.seed_everything_default = seed_everything_default
self.parser_kwargs = parser_kwargs or {}
self.parser_class = parser_class
self.auto_configure_optimizers = auto_configure_optimizers
self.model_class = model_class
# used to differentiate between the original value and the processed value
self._model_class = model_class or LightningModule
self.subclass_mode_model = (model_class is None) or subclass_mode_model
self.datamodule_class = datamodule_class
# used to differentiate between the original value and the processed value
self._datamodule_class = datamodule_class or LightningDataModule
self.subclass_mode_data = (datamodule_class is None) or subclass_mode_data
main_kwargs, subparser_kwargs = self._setup_parser_kwargs(self.parser_kwargs)
self.setup_parser(run, main_kwargs, subparser_kwargs)
self.parse_arguments(self.parser, args)
self._parse_ckpt_path()
self.subcommand = self.config["subcommand"] if run else None
self._set_seed()
if load_from_checkpoint_support:
self._add_instantiators()
self.before_instantiate_classes()
self.instantiate_classes()
self.after_instantiate_classes()
if self.subcommand is not None:
self._run_subcommand(self.subcommand)
def _setup_parser_kwargs(self, parser_kwargs: dict[str, Any]) -> tuple[dict[str, Any], dict[str, Any]]:
subcommand_names = self.subcommands().keys()
main_kwargs = {k: v for k, v in parser_kwargs.items() if k not in subcommand_names}
subparser_kwargs = {k: v for k, v in parser_kwargs.items() if k in subcommand_names}
return main_kwargs, subparser_kwargs
def init_parser(self, **kwargs: Any) -> LightningArgumentParser:
"""Method that instantiates the argument parser."""
kwargs.setdefault("dump_header", [f"lightning.pytorch=={pl.__version__}"])
parser = self.parser_class(**kwargs)
parser.add_argument(
"-c", "--config", action=ActionConfigFile, help="Path to a configuration file in json or yaml format."
)
return parser
def setup_parser(
self, add_subcommands: bool, main_kwargs: dict[str, Any], subparser_kwargs: dict[str, Any]
) -> None:
"""Initialize and setup the parser, subcommands, and arguments."""
self.parser = self.init_parser(**main_kwargs)
if add_subcommands:
self._subcommand_method_arguments: dict[str, list[str]] = {}
self._add_subcommands(self.parser, **subparser_kwargs)
else:
self._add_arguments(self.parser)
def add_default_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
"""Adds default arguments to the parser."""
parser.add_argument(
"--seed_everything",
type=Union[bool, int],
default=self.seed_everything_default,
help=(
"Set to an int to run seed_everything with this value before classes instantiation."
"Set to True to use a random seed."
),
)
def add_core_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
"""Adds arguments from the core classes to the parser."""
parser.add_lightning_class_args(self.trainer_class, "trainer")
trainer_defaults = {"trainer." + k: v for k, v in self.trainer_defaults.items() if k != "callbacks"}
parser.set_defaults(trainer_defaults)
parser.add_lightning_class_args(self._model_class, "model", subclass_mode=self.subclass_mode_model)
if self.datamodule_class is not None:
parser.add_lightning_class_args(self._datamodule_class, "data", subclass_mode=self.subclass_mode_data)
else:
# this should not be required because the user might want to use the `LightningModule` dataloaders
parser.add_lightning_class_args(
self._datamodule_class, "data", subclass_mode=self.subclass_mode_data, required=False
)
def _add_arguments(self, parser: LightningArgumentParser) -> None:
# default + core + custom arguments
self.add_default_arguments_to_parser(parser)
self.add_core_arguments_to_parser(parser)
self.add_arguments_to_parser(parser)
# add default optimizer args if necessary
if self.auto_configure_optimizers:
if not parser._optimizers: # already added by the user in `add_arguments_to_parser`
parser.add_optimizer_args((Optimizer,))
if not parser._lr_schedulers: # already added by the user in `add_arguments_to_parser`
parser.add_lr_scheduler_args(LRSchedulerTypeTuple)
self.link_optimizers_and_lr_schedulers(parser)
def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
"""Implement to add extra arguments to the parser or link arguments.
Args:
parser: The parser object to which arguments can be added
"""
@staticmethod
def subcommands() -> dict[str, set[str]]:
"""Defines the list of available subcommands and the arguments to skip."""
return {
"fit": {"model", "train_dataloaders", "val_dataloaders", "datamodule"},
"validate": {"model", "dataloaders", "datamodule"},
"test": {"model", "dataloaders", "datamodule"},
"predict": {"model", "dataloaders", "datamodule"},
}
def _add_subcommands(self, parser: LightningArgumentParser, **kwargs: Any) -> None:
"""Adds subcommands to the input parser."""
self._subcommand_parsers: dict[str, LightningArgumentParser] = {}
parser_subcommands = parser.add_subcommands()
# the user might have passed a builder function
trainer_class = (
self.trainer_class if isinstance(self.trainer_class, type) else class_from_function(self.trainer_class)
)
# register all subcommands in separate subcommand parsers under the main parser
for subcommand in self.subcommands():
fn = getattr(trainer_class, subcommand)
# extract the first line description in the docstring for the subcommand help message
description = _get_short_description(fn)
subparser_kwargs = kwargs.get(subcommand, {})
subparser_kwargs.setdefault("description", description)
subcommand_parser = self._prepare_subcommand_parser(trainer_class, subcommand, **subparser_kwargs)
self._subcommand_parsers[subcommand] = subcommand_parser
parser_subcommands.add_subcommand(subcommand, subcommand_parser, help=description)
def _prepare_subcommand_parser(self, klass: type, subcommand: str, **kwargs: Any) -> LightningArgumentParser:
parser = self.init_parser(**kwargs)
self._add_arguments(parser)
# subcommand arguments
skip: set[Union[str, int]] = set(self.subcommands()[subcommand])
added = parser.add_method_arguments(klass, subcommand, skip=skip)
# need to save which arguments were added to pass them to the method later
self._subcommand_method_arguments[subcommand] = added
return parser
@staticmethod
def link_optimizers_and_lr_schedulers(parser: LightningArgumentParser) -> None:
"""Creates argument links for optimizers and learning rate schedulers that specified a ``link_to``."""
optimizers_and_lr_schedulers = {**parser._optimizers, **parser._lr_schedulers}
for key, (class_type, link_to) in optimizers_and_lr_schedulers.items():
if link_to == "AUTOMATIC":
continue
if isinstance(class_type, tuple):
parser.link_arguments(key, link_to)
else:
add_class_path = _add_class_path_generator(class_type)
parser.link_arguments(key, link_to, compute_fn=add_class_path)
def parse_arguments(self, parser: LightningArgumentParser, args: ArgsType) -> None:
"""Parses command line arguments and stores it in ``self.config``."""
if args is not None and len(sys.argv) > 1:
rank_zero_warn(
"LightningCLI's args parameter is intended to run from within Python like if it were from the command "
"line. To prevent mistakes it is not recommended to provide both args and command line arguments, got: "
f"sys.argv[1:]={sys.argv[1:]}, args={args}."
)
if isinstance(args, (dict, Namespace)):
self.config = parser.parse_object(args)
else:
self.config = parser.parse_args(args)
def _parse_ckpt_path(self) -> None:
"""If a checkpoint path is given, parse the hyperparameters from the checkpoint and update the config."""
if not self.config.get("subcommand"):
return
ckpt_path = self.config[self.config.subcommand].get("ckpt_path")
if ckpt_path and Path(ckpt_path).is_file():
ckpt = torch.load(ckpt_path, weights_only=True, map_location="cpu")
hparams = ckpt.get("hyper_parameters", {})
hparams.pop("_instantiator", None)
if not hparams:
return
if "_class_path" in hparams:
hparams = {
"class_path": hparams.pop("_class_path"),
"dict_kwargs": hparams,
}
hparams = {self.config.subcommand: {"model": hparams}}
try:
self.config = self.parser.parse_object(hparams, self.config)
except SystemExit:
sys.stderr.write("Parsing of ckpt_path hyperparameters failed!\n")
raise
def _dump_config(self) -> None:
if hasattr(self, "config_dump"):
return
self.config_dump = yaml.safe_load(
self.parser.dump(self.config, skip_link_targets=False, skip_none=False, format="yaml")
)
if "subcommand" in self.config:
self.config_dump = self.config_dump[self.config.subcommand]
def _add_instantiators(self) -> None:
self.parser.add_instantiator(
_InstantiatorFn(cli=self, key="model"),
_get_module_type(self._model_class),
subclasses=self.subclass_mode_model,
)
self.parser.add_instantiator(
_InstantiatorFn(cli=self, key="data"),
_get_module_type(self._datamodule_class),
subclasses=self.subclass_mode_data,
)
def before_instantiate_classes(self) -> None:
"""Implement to run some code before instantiating the classes."""
def instantiate_classes(self) -> None:
"""Instantiates the classes and sets their attributes."""
self.config_init = self.parser.instantiate_classes(self.config)
self.datamodule = self._get(self.config_init, "data")
self.model = self._get(self.config_init, "model")
self._add_configure_optimizers_method_to_model(self.subcommand)
self.trainer = self.instantiate_trainer()
def after_instantiate_classes(self) -> None:
"""Implement to run some code after instantiating the classes."""
def instantiate_trainer(self, **kwargs: Any) -> Trainer:
"""Instantiates the trainer.
Args:
kwargs: Any custom trainer arguments.
"""
extra_callbacks = [self._get(self.config_init, c) for c in self._parser(self.subcommand).callback_keys]
trainer_config = {**self._get(self.config_init, "trainer", default={}), **kwargs}
return self._instantiate_trainer(trainer_config, extra_callbacks)
def _instantiate_trainer(self, config: dict[str, Any], callbacks: list[Callback]) -> Trainer:
key = "callbacks"
if key in config:
if config[key] is None:
config[key] = []
elif not isinstance(config[key], list):
config[key] = [config[key]]
config[key].extend(callbacks)
if key in self.trainer_defaults:
value = self.trainer_defaults[key]
config[key] += value if isinstance(value, list) else [value]
if self.save_config_callback and not config.get("fast_dev_run", False):
config_callback = self.save_config_callback(
self._parser(self.subcommand),
self.config.get(str(self.subcommand), self.config),
**self.save_config_kwargs,
)
config[key].append(config_callback)
else:
rank_zero_warn(
f"The `{self.trainer_class.__qualname__}` class does not expose the `{key}` argument so they will"
" not be included."
)
return self.trainer_class(**config)
def _parser(self, subcommand: Optional[str]) -> LightningArgumentParser:
if subcommand is None:
return self.parser
# return the subcommand parser for the subcommand passed
return self._subcommand_parsers[subcommand]
@staticmethod
def configure_optimizers(
lightning_module: LightningModule, optimizer: Optimizer, lr_scheduler: Optional[LRSchedulerTypeUnion] = None
) -> Any:
"""Override to customize the :meth:`~lightning.pytorch.core.LightningModule.configure_optimizers` method.
Args:
lightning_module: A reference to the model.
optimizer: The optimizer.
lr_scheduler: The learning rate scheduler (if used).
"""
if lr_scheduler is None:
return optimizer
if isinstance(lr_scheduler, ReduceLROnPlateau):
return {
"optimizer": optimizer,
"lr_scheduler": {"scheduler": lr_scheduler, "monitor": lr_scheduler.monitor},
}
return [optimizer], [lr_scheduler]
def _add_configure_optimizers_method_to_model(self, subcommand: Optional[str]) -> None:
"""Overrides the model's :meth:`~lightning.pytorch.core.LightningModule.configure_optimizers` method if a
single optimizer and optionally a scheduler argument groups are added to the parser as 'AUTOMATIC'."""
if not self.auto_configure_optimizers:
return
parser = self._parser(subcommand)
def get_automatic(
class_type: Union[type, tuple[type, ...]], register: dict[str, tuple[Union[type, tuple[type, ...]], str]]
) -> list[str]:
automatic = []
for key, (base_class, link_to) in register.items():
if not isinstance(base_class, tuple):
base_class = (base_class,)
if link_to == "AUTOMATIC" and any(issubclass(c, class_type) for c in base_class):
automatic.append(key)
return automatic
optimizers = get_automatic(Optimizer, parser._optimizers)
lr_schedulers = get_automatic(LRSchedulerTypeTuple, parser._lr_schedulers)
if len(optimizers) == 0:
return
if len(optimizers) > 1 or len(lr_schedulers) > 1:
raise MisconfigurationException(
f"`{self.__class__.__name__}.add_configure_optimizers_method_to_model` expects at most one optimizer "
f"and one lr_scheduler to be 'AUTOMATIC', but found {optimizers + lr_schedulers}. In this case the "
"user is expected to link the argument groups and implement `configure_optimizers`, see "
"https://lightning.ai/docs/pytorch/stable/common/lightning_cli.html"
"#optimizers-and-learning-rate-schedulers"
)
optimizer_class = parser._optimizers[optimizers[0]][0]
optimizer_init = self._get(self.config_init, optimizers[0])
if not isinstance(optimizer_class, tuple):
optimizer_init = _global_add_class_path(optimizer_class, optimizer_init)
if not optimizer_init:
# optimizers were registered automatically but not passed by the user
return
lr_scheduler_init = None
if lr_schedulers:
lr_scheduler_class = parser._lr_schedulers[lr_schedulers[0]][0]
lr_scheduler_init = self._get(self.config_init, lr_schedulers[0])
if not isinstance(lr_scheduler_class, tuple):
lr_scheduler_init = _global_add_class_path(lr_scheduler_class, lr_scheduler_init)
if is_overridden("configure_optimizers", self.model):
_warn(
f"`{self.model.__class__.__name__}.configure_optimizers` will be overridden by "
f"`{self.__class__.__name__}.configure_optimizers`."
)
optimizer = instantiate_class(self.model.parameters(), optimizer_init)
lr_scheduler = instantiate_class(optimizer, lr_scheduler_init) if lr_scheduler_init else None
fn = partial(self.configure_optimizers, optimizer=optimizer, lr_scheduler=lr_scheduler)
update_wrapper(fn, self.configure_optimizers) # necessary for `is_overridden`
# override the existing method
self.model.configure_optimizers = MethodType(fn, self.model)
def _get(self, config: Namespace, key: str, default: Optional[Any] = None) -> Any:
"""Utility to get a config value which might be inside a subcommand."""
return config.get(str(self.subcommand), config).get(key, default)
def _run_subcommand(self, subcommand: str) -> None:
"""Run the chosen subcommand."""
before_fn = getattr(self, f"before_{subcommand}", None)
if callable(before_fn):
before_fn()
default = getattr(self.trainer, subcommand)
fn = getattr(self, subcommand, default)
fn_kwargs = self._prepare_subcommand_kwargs(subcommand)
fn(**fn_kwargs)
after_fn = getattr(self, f"after_{subcommand}", None)
if callable(after_fn):
after_fn()
def _prepare_subcommand_kwargs(self, subcommand: str) -> dict[str, Any]:
"""Prepares the keyword arguments to pass to the subcommand to run."""
fn_kwargs = {
k: v for k, v in self.config_init[subcommand].items() if k in self._subcommand_method_arguments[subcommand]
}
fn_kwargs["model"] = self.model
if self.datamodule is not None:
fn_kwargs["datamodule"] = self.datamodule
return fn_kwargs
def _set_seed(self) -> None:
"""Sets the seed."""
config_seed = self._get(self.config, "seed_everything")
if config_seed is False:
return
if config_seed is True:
# user requested seeding, choose randomly
config_seed = seed_everything(workers=True)
else:
config_seed = seed_everything(config_seed, workers=True)
if self.subcommand:
self.config[self.subcommand]["seed_everything"] = config_seed
else:
self.config["seed_everything"] = config_seed
def _class_path_from_class(class_type: type) -> str:
return class_type.__module__ + "." + class_type.__name__
def _global_add_class_path(
class_type: type, init_args: Optional[Union[Namespace, dict[str, Any]]] = None
) -> dict[str, Any]:
if isinstance(init_args, Namespace):
init_args = init_args.as_dict()
return {"class_path": _class_path_from_class(class_type), "init_args": init_args or {}}
def _add_class_path_generator(class_type: type) -> Callable[[Namespace], dict[str, Any]]:
def add_class_path(init_args: Namespace) -> dict[str, Any]:
return _global_add_class_path(class_type, init_args)
return add_class_path
def instantiate_class(args: Union[Any, tuple[Any, ...]], init: dict[str, Any]) -> Any:
"""Instantiates a class with the given args and init.
Args:
args: Positional arguments required for instantiation.
init: Dict of the form {"class_path":...,"init_args":...}.
Returns:
The instantiated class object.
"""
kwargs = init.get("init_args", {})
if not isinstance(args, tuple):
args = (args,)
class_module, class_name = init["class_path"].rsplit(".", 1)
module = __import__(class_module, fromlist=[class_name])
args_class = getattr(module, class_name)
return args_class(*args, **kwargs)
def _get_short_description(component: object) -> Optional[str]:
if component.__doc__ is None:
return None
try:
docstring = docstring_parser.parse(component.__doc__)
return docstring.short_description
except (ValueError, docstring_parser.ParseError) as ex:
rank_zero_warn(f"Failed parsing docstring for {component}: {ex}")
def _get_module_type(value: Union[Callable, type]) -> type:
if callable(value) and not isinstance(value, type):
return inspect.signature(value).return_annotation
return value
def _set_dict_nested(data: dict, key: str, value: Any) -> None:
keys = key.split(".")
for k in keys[:-1]:
assert k in data, f"Expected key {key} to be in data"
data = data[k]
data[keys[-1]] = value
| LightningCLI |
python | pytorch__pytorch | test/dynamo/test_subclasses.py | {
"start": 120781,
"end": 123170
} | class ____(torch.nn.Module):
def forward(
self,
primals_1: "Sym(s51)", # PlainAOTInput(idx=0)
primals_8: "Sym(s51)", # SubclassSizeAOTInput(base=PlainAOTInput(idx=3), idx=0)
primals_10: "Sym(s55)", # SubclassStrideAOTInput(base=PlainAOTInput(idx=3), idx=1)
tangents_1: "f64[s64, s55]", # SubclassGetAttrAOTInput(base=TangentAOTInput(output=PlainAOTOutput(idx=0)), attr='_values')
tangents_2: "i64[s51 + 1]", # SubclassGetAttrAOTInput(base=TangentAOTInput(output=PlainAOTOutput(idx=0)), attr='_offsets')
tangents_3: "f32[s0, 0]", # SubclassGetAttrAOTInput(base=TangentAOTInput(output=PlainAOTOutput(idx=0)), attr='_min_seqlen_tensor')
tangents_4: "f32[s83, 0]", # SubclassGetAttrAOTInput(base=TangentAOTInput(output=PlainAOTOutput(idx=0)), attr='_max_seqlen_tensor')
):
mul_1: "f64[s64, s55]" = torch.ops.aten.mul.Tensor(tangents_1, primals_1); tangents_1 = primals_1 = None
return (
None, # None
None, # None
None, # None
mul_1, # SubclassGetAttrAOTOutput(base=GradAOTOutput(grad_of=PlainAOTInput(idx=3)), attr='_values')
tangents_2, # SubclassGetAttrAOTOutput(base=GradAOTOutput(grad_of=PlainAOTInput(idx=3)), attr='_offsets')
tangents_3, # SubclassGetAttrAOTOutput(base=GradAOTOutput(grad_of=PlainAOTInput(idx=3)), attr='_min_seqlen_tensor')
tangents_4, # SubclassGetAttrAOTOutput(base=GradAOTOutput(grad_of=PlainAOTInput(idx=3)), attr='_max_seqlen_tensor')
primals_8, # SubclassSizeAOTOutput(base=GradAOTOutput(grad_of=PlainAOTInput(idx=3)), idx=0)
primals_10, # SubclassSizeAOTOutput(base=GradAOTOutput(grad_of=PlainAOTInput(idx=3)), idx=2)
primals_10, # SubclassStrideAOTOutput(base=GradAOTOutput(grad_of=PlainAOTInput(idx=3)), idx=1)
)
""", # noqa: B950
)
def test_njt_subclass_from_cat(self):
# create from an existing NJT
def f(nt):
y = nt.clone()
z = torch.cat([y, y], dim=-1)
return z
nt, _ = get_jagged_tensor(((2, 3, 4), 5), None, True)
fw, bw = self._compile_check(f, [(nt,)], dynamic=True, call_backward=True)
self.assertExpectedInline(
normalize_gm(fw[0].print_readable(print_output=False, expanded_def=True)),
"""\
| GraphModule |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 16547,
"end": 17412
} | class ____(ConnectionResponse):
type: Literal["ConnectionResult"] = "ConnectionResult"
@classmethod
def from_conn_response(cls, connection_response: ConnectionResponse) -> ConnectionResult:
"""
Get ConnectionResult from ConnectionResponse.
ConnectionResponse is autogenerated from the API schema, so we need to convert it to ConnectionResult
for communication between the Supervisor and the task process.
"""
# Exclude defaults to avoid sending unnecessary data
# Pass the type as ConnectionResult explicitly so we can then call model_dump_json with exclude_unset=True
# to avoid sending unset fields (which are defaults in our case).
return cls(
**connection_response.model_dump(exclude_defaults=True, by_alias=True), type="ConnectionResult"
)
| ConnectionResult |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 82467,
"end": 83085
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.rand(3, 3, 3, 3)
self.bias = torch.rand(3)
self.stride = (1, 1)
self.padding = (0, 0)
self.dilation = (1, 1)
self.groups = 1
def forward(self, x):
return F.conv2d(
x,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
def get_example_inputs(self) -> tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
| FunctionalConv2d |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP004.py | {
"start": 46,
"end": 80
} | class ____(
object,
):
...
| A |
python | huggingface__transformers | src/transformers/models/sam2_video/modeling_sam2_video.py | {
"start": 34145,
"end": 37023
} | class ____(nn.Module):
"""Attention with rotary position encoding."""
def __init__(
self,
config: Sam2VideoConfig,
kv_in_dim: Optional[int] = None,
rope_k_repeat=False,
):
super().__init__()
self.config = config
self.hidden_size = config.memory_attention_hidden_size
self.internal_dim = self.hidden_size // config.memory_attention_downsample_rate
self.num_attention_heads = config.memory_attention_num_attention_heads
self.head_dim = self.internal_dim // config.memory_attention_num_attention_heads
self.scaling = self.head_dim**-0.5
self.is_causal = False
self.kv_in_dim = kv_in_dim if kv_in_dim is not None else self.hidden_size
self.q_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
self.o_proj = nn.Linear(self.internal_dim, self.hidden_size)
self.rope_k_repeat = rope_k_repeat
self.dropout_p = config.memory_attention_rope_dropout
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
num_k_exclude_rope: int = 0,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Tensor:
# Input projections
batch_size, point_batch_size = query.shape[:2]
new_shape = (batch_size * point_batch_size, -1, self.num_attention_heads, self.head_dim)
query = self.q_proj(query).view(*new_shape).transpose(1, 2)
key = self.k_proj(key).view(*new_shape).transpose(1, 2)
value = self.v_proj(value).view(*new_shape).transpose(1, 2)
cos, sin = position_embeddings
# Apply rotary position encoding, excluding some keys if specified
query, key = apply_rotary_pos_emb_2d(
query, key, cos, sin, repeat_freqs_k=self.rope_k_repeat, num_k_exclude_rope=num_k_exclude_rope
)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query,
key,
value,
attention_mask=None,
dropout=0.0 if not self.training else self.dropout_p,
scaling=self.scaling,
is_causal=self.is_causal,
**kwargs,
)
attn_output = attn_output.reshape(
batch_size, point_batch_size, -1, self.num_attention_heads * self.head_dim
).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Sam2VideoRoPEAttention |
python | django-mptt__django-mptt | tests/myapp/tests.py | {
"start": 68757,
"end": 71427
} | class ____(TreeTestCase):
def test_insert_ordered_DFS_backwards_root_nodes(self):
rock = OrderedInsertion.objects.create(name="Rock")
OrderedInsertion.objects.create(name="Led Zeppelin", parent=rock)
OrderedInsertion.objects.create(name="Classical")
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
3 - 1 0 1 2
1 - 2 0 1 4
2 1 2 1 2 3
""",
)
def test_insert_ordered_BFS_backwards_root_nodes(self):
rock = OrderedInsertion.objects.create(name="Rock")
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 2
""",
)
OrderedInsertion.objects.create(name="Classical")
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
2 - 1 0 1 2
1 - 2 0 1 2
""",
)
# This tends to fail if it uses `rock.tree_id`, which is 1, although
# in the database Rock's tree_id has been updated to 2.
OrderedInsertion.objects.create(name="Led Zeppelin", parent=rock)
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
2 - 1 0 1 2
1 - 2 0 1 4
3 1 2 1 2 3
""",
)
def test_insert_ordered_DFS_backwards_nonroot_nodes(self):
music = OrderedInsertion.objects.create(name="music")
rock = OrderedInsertion.objects.create(name="Rock", parent=music)
OrderedInsertion.objects.create(name="Led Zeppelin", parent=rock)
OrderedInsertion.objects.create(name="Classical", parent=music)
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 8
4 1 1 1 2 3
2 1 1 1 4 7
3 2 1 2 5 6
""",
)
def test_insert_ordered_BFS_backwards_nonroot_nodes(self):
music = OrderedInsertion.objects.create(name="music")
rock = OrderedInsertion.objects.create(name="Rock", parent=music)
OrderedInsertion.objects.create(name="Classical", parent=music)
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 6
3 1 1 1 2 3
2 1 1 1 4 5
""",
)
OrderedInsertion.objects.create(name="Led Zeppelin", parent=rock)
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 8
3 1 1 1 2 3
2 1 1 1 4 7
4 2 1 2 5 6
""",
)
| TestOrderedInsertionBFS |
python | google__jax | tests/debugger_test.py | {
"start": 1505,
"end": 11379
} | class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if not jtu.test_device_matches(["cpu", "gpu", "tpu"]):
self.skipTest(f"Host callback not supported on {jtu.device_under_test()}")
def test_debugger_eof(self):
stdin, stdout = make_fake_stdin_stdout([])
def f(x):
y = jnp.sin(x)
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
with self.assertRaises(SystemExit):
f(2.)
jax.effects_barrier()
def test_debugger_can_continue(self):
stdin, stdout = make_fake_stdin_stdout(["c"])
def f(x):
y = jnp.sin(x)
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
f(2.)
jax.effects_barrier()
expected = _format_multiline(r"""
Entering jdb:
(jdb) """)
self.assertEqual(stdout.getvalue(), expected)
def test_debugger_can_print_value(self):
stdin, stdout = make_fake_stdin_stdout(["p x", "c"])
def f(x):
y = jnp.sin(x)
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
expected = _format_multiline(r"""
Entering jdb:
(jdb) Array(2., dtype=float32)
(jdb) """)
f(jnp.array(2., jnp.float32))
jax.effects_barrier()
self.assertEqual(stdout.getvalue(), expected)
def test_debugger_can_print_value_in_jit(self):
stdin, stdout = make_fake_stdin_stdout(["p x", "c"])
@jax.jit
def f(x):
y = jnp.sin(x)
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
expected = _format_multiline(r"""
Entering jdb:
(jdb) Array(2., dtype=float32)
(jdb) """)
f(jnp.array(2., jnp.float32))
jax.effects_barrier()
self.assertEqual(stdout.getvalue(), expected)
def test_debugger_can_print_multiple_values(self):
stdin, stdout = make_fake_stdin_stdout(["p x, y", "c"])
@jax.jit
def f(x):
y = x + 1.
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
expected = _format_multiline(r"""
Entering jdb:
(jdb) (Array(2., dtype=float32), Array(3., dtype=float32))
(jdb) """)
f(jnp.array(2., jnp.float32))
jax.effects_barrier()
self.assertEqual(stdout.getvalue(), expected)
def test_debugger_can_print_context(self):
stdin, stdout = make_fake_stdin_stdout(["l", "c"])
@jax.jit
def f(x):
y = jnp.sin(x)
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
f(2.)
jax.effects_barrier()
expected = _format_multiline(r"""
Entering jdb:
\(jdb\) > .*debugger_test\.py\([0-9]+\)
@jax\.jit
def f\(x\):
y = jnp\.sin\(x\)
-> debugger\.breakpoint\(stdin=stdin, stdout=stdout, backend="cli"\)
return y
.*
\(jdb\) """)
self.assertRegex(stdout.getvalue(), expected)
def test_debugger_can_print_backtrace(self):
stdin, stdout = make_fake_stdin_stdout(["bt", "c"])
@jax.jit
def f(x):
y = jnp.sin(x)
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
expected = _format_multiline(r"""
Entering jdb:.*
\(jdb\) Traceback:.*
""")
f(2.)
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
def test_debugger_can_work_with_multiple_stack_frames(self):
stdin, stdout = make_fake_stdin_stdout(["l", "u", "p x", "d", "c"])
def f(x):
y = jnp.sin(x)
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
@jax.jit
def g(x):
y = f(x)
return jnp.exp(y)
expected = _format_multiline(r"""
Entering jdb:
\(jdb\) > .*debugger_test\.py\([0-9]+\)
def f\(x\):
y = jnp\.sin\(x\)
-> debugger\.breakpoint\(stdin=stdin, stdout=stdout, backend="cli"\)
return y
.*
\(jdb\) > .*debugger_test\.py\([0-9]+\).*
@jax\.jit
def g\(x\):
-> y = f\(x\)
return jnp\.exp\(y\)
.*
\(jdb\) Array\(2\., dtype=float32\)
\(jdb\) > .*debugger_test\.py\([0-9]+\)
def f\(x\):
y = jnp\.sin\(x\)
-> debugger\.breakpoint\(stdin=stdin, stdout=stdout, backend="cli"\)
return y
.*
\(jdb\) """)
g(jnp.array(2., jnp.float32))
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
def test_can_use_multiple_breakpoints(self):
stdin, stdout = make_fake_stdin_stdout(["p y", "c", "p y", "c"])
def f(x):
y = x + 1.
debugger.breakpoint(stdin=stdin, stdout=stdout, ordered=True,
backend="cli")
return y
@jax.jit
def g(x):
y = f(x) * 2.
debugger.breakpoint(stdin=stdin, stdout=stdout, ordered=True,
backend="cli")
return jnp.exp(y)
expected = _format_multiline(r"""
Entering jdb:
(jdb) Array(3., dtype=float32)
(jdb) Entering jdb:
(jdb) Array(6., dtype=float32)
(jdb) """)
g(jnp.array(2., jnp.float32))
jax.effects_barrier()
self.assertEqual(stdout.getvalue(), expected)
def test_debugger_works_with_vmap(self):
stdin, stdout = make_fake_stdin_stdout(["p y", "c", "p y", "c"])
def f(x):
y = x + 1.
debugger.breakpoint(stdin=stdin, stdout=stdout, ordered=True,
backend="cli")
return 2. * y
@jax.jit
@jax.vmap
def g(x):
y = f(x)
return jnp.exp(y)
expected = _format_multiline(r"""
Entering jdb:
(jdb) Array(1., dtype=float32)
(jdb) Entering jdb:
(jdb) Array(2., dtype=float32)
(jdb) """)
g(jnp.arange(2., dtype=jnp.float32))
jax.effects_barrier()
self.assertEqual(stdout.getvalue(), expected)
def test_debugger_works_with_pmap(self):
if jax.local_device_count() < 2:
raise unittest.SkipTest("Test requires >= 2 devices.")
stdin, stdout = make_fake_stdin_stdout(["p y", "c", "p y", "c"])
def f(x):
y = jnp.sin(x)
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
@jax.pmap
def g(x):
y = f(x)
return jnp.exp(y)
expected = _format_multiline(r"""
Entering jdb:
\(jdb\) Array\(.*, dtype=float32\)
\(jdb\) Entering jdb:
\(jdb\) Array\(.*, dtype=float32\)
\(jdb\) """)
g(jnp.arange(2., dtype=jnp.float32))
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
def test_debugger_works_with_jit(self):
if jax.default_backend() != "tpu":
raise unittest.SkipTest("`pjit` doesn't work with CustomCall.")
stdin, stdout = make_fake_stdin_stdout(["p y", "c"])
def f(x):
y = x + 1
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
def g(x):
y = f(x)
return jnp.exp(y)
g = jax.jit(
g,
in_shardings=jax.sharding.PartitionSpec("dev"),
out_shardings=jax.sharding.PartitionSpec("dev"),
)
arr = (1 + jnp.arange(8)).astype(np.int32)
arr2 = jnp.arange(8, dtype=jnp.int32)
with jax.set_mesh(jax.sharding.Mesh(np.array(jax.devices()), ["dev"])):
expected = _format_multiline(r"""
Entering jdb:
\(jdb\) {}
\(jdb\) """.format(re.escape(repr(arr))))
g(arr2)
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
def test_debugger_uses_local_before_global_scope(self):
stdin, stdout = make_fake_stdin_stdout(["p foo", "c"])
foo = "outer"
def f(x):
foo = "inner"
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
del foo
return x
del foo
expected = _format_multiline(r"""
Entering jdb:
\(jdb\) 'inner'
\(jdb\) """)
f(2.)
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
def test_debugger_accesses_globals(self):
stdin, stdout = make_fake_stdin_stdout(["p foo", "c"])
@jax.jit
def g():
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
expected = _format_multiline(r"""
Entering jdb:
\(jdb\) \*\*\* NameError: name 'foo' is not defined
\(jdb\) """)
g()
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
def test_can_limit_num_frames(self):
stdin, stdout = make_fake_stdin_stdout(["u", "p x", "c"])
def g():
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli",
num_frames=2)
@jax.jit
def f():
x = 2
g()
return x
_ = f()
expected = _format_multiline(r"""
Entering jdb:
\(jdb\) .*
.*
.*
.*
.*
.*
.*
\(jdb\) 2
\(jdb\) """)
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
stdin, stdout = make_fake_stdin_stdout(["u", "u", "c"])
def g2():
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli",
num_frames=2)
@jax.jit
def f2():
x = 2
g2()
return x
expected = ".*At topmost frame.*"
_ = f2()
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
def test_can_handle_dictionaries_with_unsortable_keys(self):
stdin, stdout = make_fake_stdin_stdout(["p x", "p weird_dict",
"p weirder_dict", "c"])
@jax.jit
def f():
weird_dict = {(lambda x: x): 2., (lambda x: x * 2): 3}
weirder_dict = {(lambda x: x): weird_dict}
x = 2.
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
del weirder_dict
return x
expected = _format_multiline(r"""
Entering jdb:
\(jdb\) 2.0
\(jdb\) <cant_flatten>
\(jdb\) <cant_flatten>
\(jdb\) """)
_ = f()
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| CliDebuggerTest |
python | django__django | django/contrib/postgres/fields/array.py | {
"start": 10429,
"end": 10508
} | class ____(ArrayRHSMixin, Exact):
pass
@ArrayField.register_lookup
| ArrayExact |
python | Pylons__pyramid | src/pyramid/util.py | {
"start": 21696,
"end": 22603
} | class ____:
def loads(self, bstruct):
return text_(bstruct)
def dumps(self, appstruct):
return bytes_(appstruct)
def is_bound_method(ob):
return inspect.ismethod(ob) and getattr(ob, '__self__', None) is not None
def is_unbound_method(fn):
"""
This consistently verifies that the callable is bound to a
class.
"""
is_bound = is_bound_method(fn)
if not is_bound and inspect.isroutine(fn):
spec = inspect.getfullargspec(fn)
has_self = len(spec.args) > 0 and spec.args[0] == 'self'
if inspect.isfunction(fn) and has_self:
return True
return False
def reraise(tp, value, tb=None):
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
| SimpleSerializer |
python | wandb__wandb | wandb/automations/automations.py | {
"start": 1553,
"end": 2712
} | class ____(GQLInput, extra="forbid", validate_default=False):
"""A new automation to be created."""
name: Optional[str] = None
"""The name of this automation."""
description: Optional[str] = None
"""An optional description of this automation."""
enabled: Optional[bool] = None
"""Whether this automation is enabled. Only enabled automations will trigger."""
event: Optional[InputEvent] = None
"""The event that will trigger this automation."""
# Ensure that the event and its scope are always consistent, if the event is set.
@property
def scope(self) -> Optional[AutomationScope]:
"""The scope in which the triggering event must occur."""
return self.event.scope if self.event else None
@scope.setter
def scope(self, value: AutomationScope) -> None:
if self.event is None:
raise ValueError("Cannot set `scope` for an automation with no `event`")
self.event.scope = value
action: Optional[InputAction] = None
"""The action that will execute when this automation is triggered."""
__all__ = [
"Automation",
"NewAutomation",
]
| NewAutomation |
python | pyparsing__pyparsing | examples/pythonGrammarParser.py | {
"start": 6007,
"end": 8047
} | class ____(SemanticGroup):
def __init__(self, contents):
if len(contents) > 1:
self.rep = contents[1]
else:
self.rep = ""
if isinstance(contents, str):
self.contents = contents
else:
self.contents = contents[0]
def __str__(self):
return "{}{}".format(self.rep, self.contents)
def makeGroupObject(cls):
def groupAction(s, l, t):
try:
return cls(t[0].as_list())
except Exception:
return cls(t)
return groupAction
# bnf punctuation
LPAREN = Suppress("(")
RPAREN = Suppress(")")
LBRACK = Suppress("[")
RBRACK = Suppress("]")
COLON = Suppress(":")
ALT_OP = Suppress("|")
# bnf grammar
ident = Word(alphanums + "_")
bnfToken = Word(alphanums + "_") + ~FollowedBy(":")
repSymbol = one_of("* +")
bnfExpr = Forward()
optionalTerm = Group(LBRACK + bnfExpr + RBRACK).set_parse_action(
makeGroupObject(OptionalGroup)
)
bnfTerm = (
(bnfToken | quoted_string | optionalTerm | (LPAREN + bnfExpr + RPAREN))
+ Optional(repSymbol)
).set_parse_action(makeGroupObject(Atom))
andList = Group(bnfTerm + OneOrMore(bnfTerm)).set_parse_action(makeGroupObject(AndList))
bnfFactor = andList | bnfTerm
orList = Group(bnfFactor + OneOrMore(ALT_OP + bnfFactor)).set_parse_action(
makeGroupObject(OrList)
)
bnfExpr << (orList | bnfFactor)
bnfLine = ident + COLON + bnfExpr
bnfComment = "#" + rest_of_line
# build return tokens as a dictionary
bnf = Dict(OneOrMore(Group(bnfLine)))
bnf.ignore(bnfComment)
# bnf is defined, parse the grammar text
bnfDefs = bnf.parse_string(grammar)
# correct answer is 78
expected = 78
assert len(bnfDefs) == expected, "Error, found %d BNF defns, expected %d" % (
len(bnfDefs),
expected,
)
# list out defns in order they were parsed (to verify accuracy of parsing)
for k, v in bnfDefs:
print(k, "=", v)
print()
# list out parsed grammar defns (demonstrates dictionary access to parsed tokens)
for k in list(bnfDefs.keys()):
print(k, "=", bnfDefs[k])
| Atom |
python | pytorch__pytorch | torch/_inductor/virtualized.py | {
"start": 6350,
"end": 8910
} | class ____(NullHandler):
"""
We need access `V.kernel.removed_buffers` in DeferredLine class when there
is no kernel in the context. This happens when codegening the wrapper.
Initialize `removed_buffers` and `inplaced_to_remove` explicitly so we don't
need call 'getattr' with default value which is error prone to typo in
attribute name.
"""
def __init__(self):
super().__init__()
self.removed_buffers = OrderedSet[Any]()
self.inplaced_to_remove = OrderedSet[Any]()
self.index_dtype = "tl.int64"
def get_index_dtype_as_torch_dtype(self):
import torch
if self.index_dtype == "tl.int64":
return torch.int64
elif self.index_dtype == "tl.int32":
return torch.int32
else:
raise ValueError(f"Unknown dtype: {self.index_dtype}")
_ops: Virtualized[OpsHandler[Any]] = Virtualized(
"ops", cast(type[OpsHandler[Any]], MockHandler)
)
_graph: Virtualized[GraphLowering] = Virtualized("graph", NullHandler)
_extern_kernel_nodes: Virtualized[list[ExternKernelNode]] = Virtualized(
"extern_kernel_nodes", NullHandler
)
_real_inputs: Virtualized[list[torch.Tensor]] = Virtualized("real_inputs", NullHandler)
_fake_mode: Virtualized[FakeTensorMode] = Virtualized("fake_mode", NullHandler)
_kernel: Virtualized[NullKernelHandler] = Virtualized(
"kernel", NullKernelHandler
) # TODO: improve type
_debug: Virtualized[DebugContext] = Virtualized("debug", NullHandler)
_interpreter: Virtualized[InterpreterShim] = Virtualized("interpreter", NullHandler)
_aot_compilation: Virtualized[bool] = Virtualized("aot_compilation", NullHandler)
_current_node: Virtualized[torch.fx.Node] = Virtualized("current_node", NullHandler)
_local_buffer_context: Virtualized[LocalBufferContext] = Virtualized(
"local_buffer_context", NullHandler
)
_distributed_autotune_state: Virtualized[_DistributedAutotuneState] = Virtualized(
"distributed_autotune_state", NullHandler
)
def _choices_default():
"""
Lazy init the global choices handler
We virtualize InductorChoices to allow changing inductor heuristics from out of tree.
"""
from torch._inductor import config
from torch._inductor.choices import InductorChoices
if config.inductor_choices_class is not None:
rv = config.inductor_choices_class()
else:
rv = InductorChoices()
setattr(threadlocal, _choices._key, rv)
return rv
_choices: Virtualized[InductorChoices] = Virtualized("choices", _choices_default)
| NullKernelHandler |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-databend/destination_databend/client.py | {
"start": 106,
"end": 852
} | class ____:
def __init__(self, host: str, port: int, database: str, table: str, username: str, ssl: bool, password: str = None):
self.host = host
self.port = port
self.database = database
self.table = table
self.username = username
self.password = password
self.ssl = ssl or False
def open(self):
if self.ssl:
handle = connector.connect(f"databend://{self.username}:{self.password}@{self.host}:{self.port}/{self.database}").cursor()
else:
handle = connector.connect(
f"databend://{self.username}:{self.password}@{self.host}:{self.port}/{self.database}?sslmode=disable"
).cursor()
return handle
| DatabendClient |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/hybrid.py | {
"start": 44128,
"end": 44940
} | class ____(QueryableAttribute[_T]):
"""Describe the object returned by a hybrid_property() when
called as a class-level descriptor.
"""
if TYPE_CHECKING:
def getter(
self, fget: _HybridGetterType[_T]
) -> hybrid_property[_T]: ...
def setter(
self, fset: _HybridSetterType[_T]
) -> hybrid_property[_T]: ...
def deleter(
self, fdel: _HybridDeleterType[_T]
) -> hybrid_property[_T]: ...
@property
def overrides(self) -> hybrid_property[_T]: ...
def update_expression(
self, meth: _HybridUpdaterType[_T]
) -> hybrid_property[_T]: ...
def bulk_dml(
self, meth: _HybridBulkDMLType[_T]
) -> hybrid_property[_T]: ...
| _HybridClassLevelAccessor |
python | wandb__wandb | wandb/sdk/artifacts/artifact_manifests/artifact_manifest_v1.py | {
"start": 635,
"end": 2744
} | class ____(ArtifactManifest):
manifest_version: Annotated[Literal[1], Field(repr=False)] = 1
entries: Dict[str, ArtifactManifestEntry] = Field(default_factory=dict)
storage_policy: StoragePolicy = Field(
default_factory=make_storage_policy, exclude=True, repr=False
)
@classmethod
def from_manifest_json(cls, manifest_json: dict[str, Any]) -> ArtifactManifestV1:
data = ArtifactManifestV1Data(**manifest_json)
policy_name = data.storage_policy
policy_cfg = data.storage_policy_config
policy = StoragePolicy.lookup_by_name(policy_name).from_config(policy_cfg)
return cls(
manifest_version=data.version, entries=data.contents, storage_policy=policy
)
def to_manifest_json(self) -> dict:
"""This is the JSON that's stored in wandb_manifest.json.
If include_local is True we also include the local paths to files. This is
used to represent an artifact that's waiting to be saved on the current
system. We don't need to include the local paths in the artifact manifest
contents.
"""
omit_entry_fields = {"path", "local_path", "skip_cache"}
return {
"version": self.manifest_version,
"storagePolicy": self.storage_policy.name(),
"storagePolicyConfig": self.storage_policy.config(),
"contents": {
path: entry.model_dump(exclude=omit_entry_fields, exclude_defaults=True)
for path, entry in self.entries.items()
},
}
_DIGEST_HEADER: ClassVar[bytes] = b"wandb-artifact-manifest-v1\n"
"""Encoded prefix/header for the ArtifactManifest digest."""
def digest(self) -> HexMD5:
hasher = _md5(self._DIGEST_HEADER)
# sort by key (path)
for path, entry in sorted(self.entries.items(), key=itemgetter(0)):
hasher.update(f"{path}:{entry.digest}\n".encode())
return hasher.hexdigest()
def size(self) -> int:
return sum(entry.size for entry in self.entries.values() if entry.size)
| ArtifactManifestV1 |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/variant_values/package.py | {
"start": 216,
"end": 888
} | class ____(Package):
"""Test variant value validation with multiple definitions."""
homepage = "https://www.example.org"
url = "https://example.org/files/v3.4/cmake-3.4.3.tar.gz"
version("1.0", md5="4cb3ff35b2472aae70f542116d616e63")
version("2.0", md5="b2472aae70f542116d616e634cb3ff35")
version("3.0", md5="d616e634cb3ff35b2472aae70f542116")
variant("v", default="foo", values=["foo"], multi=False, when="@1.0")
variant("v", default="foo", values=["foo", "bar"], multi=False, when="@2.0")
# this overrides the prior definition entirely
variant("v", default="bar", values=["foo", "bar"], multi=True, when="@2.0:3.0")
| VariantValues |
python | squidfunk__mkdocs-material | material/plugins/blog/author.py | {
"start": 1672,
"end": 1754
} | class ____(Config):
authors = DictOfItems(SubConfig(Author), default = {})
| Authors |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/sparse_ops/sparse_matmul_op_test.py | {
"start": 1357,
"end": 4688
} | class ____(test.TestCase):
def _testCpuMatmul(self,
x,
y,
tr_a=False,
tr_b=False,
sp_a=True,
sp_b=False,
x_dtype=dtypes.float32,
y_dtype=dtypes.float32):
with self.cached_session(use_gpu=False):
tf_x = math_ops.cast(x, x_dtype)
tf_y = math_ops.cast(y, y_dtype)
tf_ans = math_ops.matmul(
tf_x,
tf_y,
transpose_a=tr_a,
transpose_b=tr_b,
a_is_sparse=sp_a,
b_is_sparse=sp_b)
out = self.evaluate(tf_ans)
np_x = math_ops.cast(tf_x, dtypes.float32).eval()
np_y = math_ops.cast(tf_y, dtypes.float32).eval()
if tr_a:
np_x = np.transpose(np_x)
if tr_b:
np_y = np.transpose(np_y)
np_ans = np.matrix(np_x) * np.matrix(np_y)
self.assertShapeEqual(np_ans, tf_ans)
self.assertAllCloseAccordingToType(np_ans, out, rtol=1e-4, atol=1e-4)
@test_util.run_deprecated_v1
def testBasic(self):
x = np.arange(0., 4.).reshape([4, 1]).astype(np.float32)
y = np.arange(-1., 1.).reshape([1, 2]).astype(np.float32)
for x_dtype in (dtypes.float32, dtypes.bfloat16):
for y_dtype in (dtypes.float32, dtypes.bfloat16):
self._testCpuMatmul(x, y, x_dtype=x_dtype, y_dtype=y_dtype)
@test_util.run_deprecated_v1
def testZeroDim(self):
x = np.ones((4, 0)).astype(np.float32)
y = np.ones((0, 3)).astype(np.float32)
for x_dtype in (dtypes.float32, dtypes.bfloat16):
for y_dtype in (dtypes.float32, dtypes.bfloat16):
self._testCpuMatmul(x, y, x_dtype=x_dtype, y_dtype=y_dtype)
@test_util.run_deprecated_v1
def testEmpty(self):
x = np.ones((0, 0)).astype(np.float32)
y = np.ones((0, 0)).astype(np.float32)
for x_dtype in (dtypes.float32, dtypes.bfloat16):
for y_dtype in (dtypes.float32, dtypes.bfloat16):
self._testCpuMatmul(x, y, x_dtype=x_dtype, y_dtype=y_dtype)
# Tests setting one dimension to be a high value.
@test_util.run_deprecated_v1
def testLarge(self):
r1 = np.random.randint(6000, 20000)
r2 = np.random.randint(1, 10)
r3 = np.random.randint(1, 10)
for m, k, n in [(r1, r2, r3), (r2, r1, r3), (r2, r3, r1)]:
for x_dtype in (dtypes.float32, dtypes.bfloat16):
for y_dtype in (dtypes.float32, dtypes.bfloat16):
x = RandMatrix(m, k, False)
y = RandMatrix(k, n, False)
self._testCpuMatmul(x, y, x_dtype=x_dtype, y_dtype=y_dtype)
# Tests random sized matrices.
@test_util.run_deprecated_v1
def testRandom(self):
for tr_a in [True, False]:
for tr_b in [True, False]:
for sp_a in [True, False]:
for sp_b in [True, False]:
for x_dtype in (dtypes.float32, dtypes.bfloat16):
for y_dtype in (dtypes.float32, dtypes.bfloat16):
n, k, m = np.random.randint(1, 100, size=3)
x = RandMatrix(n, k, tr_a)
y = RandMatrix(k, m, tr_b)
self._testCpuMatmul(
x,
y,
tr_a,
tr_b,
sp_a,
sp_b,
x_dtype=x_dtype,
y_dtype=y_dtype)
| SparseMatMulTest |
python | pypa__pipenv | pipenv/patched/pip/_vendor/rich/style.py | {
"start": 846,
"end": 26205
} | class ____:
"""A terminal style.
A terminal style consists of a color (`color`), a background color (`bgcolor`), and a number of attributes, such
as bold, italic etc. The attributes have 3 states: they can either be on
(``True``), off (``False``), or not set (``None``).
Args:
color (Union[Color, str], optional): Color of terminal text. Defaults to None.
bgcolor (Union[Color, str], optional): Color of terminal background. Defaults to None.
bold (bool, optional): Enable bold text. Defaults to None.
dim (bool, optional): Enable dim text. Defaults to None.
italic (bool, optional): Enable italic text. Defaults to None.
underline (bool, optional): Enable underlined text. Defaults to None.
blink (bool, optional): Enabled blinking text. Defaults to None.
blink2 (bool, optional): Enable fast blinking text. Defaults to None.
reverse (bool, optional): Enabled reverse text. Defaults to None.
conceal (bool, optional): Enable concealed text. Defaults to None.
strike (bool, optional): Enable strikethrough text. Defaults to None.
underline2 (bool, optional): Enable doubly underlined text. Defaults to None.
frame (bool, optional): Enable framed text. Defaults to None.
encircle (bool, optional): Enable encircled text. Defaults to None.
overline (bool, optional): Enable overlined text. Defaults to None.
link (str, link): Link URL. Defaults to None.
"""
_color: Optional[Color]
_bgcolor: Optional[Color]
_attributes: int
_set_attributes: int
_hash: Optional[int]
_null: bool
_meta: Optional[bytes]
__slots__ = [
"_color",
"_bgcolor",
"_attributes",
"_set_attributes",
"_link",
"_link_id",
"_ansi",
"_style_definition",
"_hash",
"_null",
"_meta",
]
# maps bits on to SGR parameter
_style_map = {
0: "1",
1: "2",
2: "3",
3: "4",
4: "5",
5: "6",
6: "7",
7: "8",
8: "9",
9: "21",
10: "51",
11: "52",
12: "53",
}
STYLE_ATTRIBUTES = {
"dim": "dim",
"d": "dim",
"bold": "bold",
"b": "bold",
"italic": "italic",
"i": "italic",
"underline": "underline",
"u": "underline",
"blink": "blink",
"blink2": "blink2",
"reverse": "reverse",
"r": "reverse",
"conceal": "conceal",
"c": "conceal",
"strike": "strike",
"s": "strike",
"underline2": "underline2",
"uu": "underline2",
"frame": "frame",
"encircle": "encircle",
"overline": "overline",
"o": "overline",
}
def __init__(
self,
*,
color: Optional[Union[Color, str]] = None,
bgcolor: Optional[Union[Color, str]] = None,
bold: Optional[bool] = None,
dim: Optional[bool] = None,
italic: Optional[bool] = None,
underline: Optional[bool] = None,
blink: Optional[bool] = None,
blink2: Optional[bool] = None,
reverse: Optional[bool] = None,
conceal: Optional[bool] = None,
strike: Optional[bool] = None,
underline2: Optional[bool] = None,
frame: Optional[bool] = None,
encircle: Optional[bool] = None,
overline: Optional[bool] = None,
link: Optional[str] = None,
meta: Optional[Dict[str, Any]] = None,
):
self._ansi: Optional[str] = None
self._style_definition: Optional[str] = None
def _make_color(color: Union[Color, str]) -> Color:
return color if isinstance(color, Color) else Color.parse(color)
self._color = None if color is None else _make_color(color)
self._bgcolor = None if bgcolor is None else _make_color(bgcolor)
self._set_attributes = sum(
(
bold is not None,
dim is not None and 2,
italic is not None and 4,
underline is not None and 8,
blink is not None and 16,
blink2 is not None and 32,
reverse is not None and 64,
conceal is not None and 128,
strike is not None and 256,
underline2 is not None and 512,
frame is not None and 1024,
encircle is not None and 2048,
overline is not None and 4096,
)
)
self._attributes = (
sum(
(
bold and 1 or 0,
dim and 2 or 0,
italic and 4 or 0,
underline and 8 or 0,
blink and 16 or 0,
blink2 and 32 or 0,
reverse and 64 or 0,
conceal and 128 or 0,
strike and 256 or 0,
underline2 and 512 or 0,
frame and 1024 or 0,
encircle and 2048 or 0,
overline and 4096 or 0,
)
)
if self._set_attributes
else 0
)
self._link = link
self._meta = None if meta is None else dumps(meta)
self._link_id = (
f"{randint(0, 999999)}{hash(self._meta)}" if (link or meta) else ""
)
self._hash: Optional[int] = None
self._null = not (self._set_attributes or color or bgcolor or link or meta)
@classmethod
def null(cls) -> "Style":
"""Create an 'null' style, equivalent to Style(), but more performant."""
return NULL_STYLE
@classmethod
def from_color(
cls, color: Optional[Color] = None, bgcolor: Optional[Color] = None
) -> "Style":
"""Create a new style with colors and no attributes.
Returns:
color (Optional[Color]): A (foreground) color, or None for no color. Defaults to None.
bgcolor (Optional[Color]): A (background) color, or None for no color. Defaults to None.
"""
style: Style = cls.__new__(Style)
style._ansi = None
style._style_definition = None
style._color = color
style._bgcolor = bgcolor
style._set_attributes = 0
style._attributes = 0
style._link = None
style._link_id = ""
style._meta = None
style._null = not (color or bgcolor)
style._hash = None
return style
@classmethod
def from_meta(cls, meta: Optional[Dict[str, Any]]) -> "Style":
"""Create a new style with meta data.
Returns:
meta (Optional[Dict[str, Any]]): A dictionary of meta data. Defaults to None.
"""
style: Style = cls.__new__(Style)
style._ansi = None
style._style_definition = None
style._color = None
style._bgcolor = None
style._set_attributes = 0
style._attributes = 0
style._link = None
style._meta = dumps(meta)
style._link_id = f"{randint(0, 999999)}{hash(style._meta)}"
style._hash = None
style._null = not (meta)
return style
@classmethod
def on(cls, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Style":
"""Create a blank style with meta information.
Example:
style = Style.on(click=self.on_click)
Args:
meta (Optional[Dict[str, Any]], optional): An optional dict of meta information.
**handlers (Any): Keyword arguments are translated in to handlers.
Returns:
Style: A Style with meta information attached.
"""
meta = {} if meta is None else meta
meta.update({f"@{key}": value for key, value in handlers.items()})
return cls.from_meta(meta)
bold = _Bit(0)
dim = _Bit(1)
italic = _Bit(2)
underline = _Bit(3)
blink = _Bit(4)
blink2 = _Bit(5)
reverse = _Bit(6)
conceal = _Bit(7)
strike = _Bit(8)
underline2 = _Bit(9)
frame = _Bit(10)
encircle = _Bit(11)
overline = _Bit(12)
@property
def link_id(self) -> str:
"""Get a link id, used in ansi code for links."""
return self._link_id
def __str__(self) -> str:
"""Re-generate style definition from attributes."""
if self._style_definition is None:
attributes: List[str] = []
append = attributes.append
bits = self._set_attributes
if bits & 0b0000000001111:
if bits & 1:
append("bold" if self.bold else "not bold")
if bits & (1 << 1):
append("dim" if self.dim else "not dim")
if bits & (1 << 2):
append("italic" if self.italic else "not italic")
if bits & (1 << 3):
append("underline" if self.underline else "not underline")
if bits & 0b0000111110000:
if bits & (1 << 4):
append("blink" if self.blink else "not blink")
if bits & (1 << 5):
append("blink2" if self.blink2 else "not blink2")
if bits & (1 << 6):
append("reverse" if self.reverse else "not reverse")
if bits & (1 << 7):
append("conceal" if self.conceal else "not conceal")
if bits & (1 << 8):
append("strike" if self.strike else "not strike")
if bits & 0b1111000000000:
if bits & (1 << 9):
append("underline2" if self.underline2 else "not underline2")
if bits & (1 << 10):
append("frame" if self.frame else "not frame")
if bits & (1 << 11):
append("encircle" if self.encircle else "not encircle")
if bits & (1 << 12):
append("overline" if self.overline else "not overline")
if self._color is not None:
append(self._color.name)
if self._bgcolor is not None:
append("on")
append(self._bgcolor.name)
if self._link:
append("link")
append(self._link)
self._style_definition = " ".join(attributes) or "none"
return self._style_definition
def __bool__(self) -> bool:
"""A Style is false if it has no attributes, colors, or links."""
return not self._null
def _make_ansi_codes(self, color_system: ColorSystem) -> str:
"""Generate ANSI codes for this style.
Args:
color_system (ColorSystem): Color system.
Returns:
str: String containing codes.
"""
if self._ansi is None:
sgr: List[str] = []
append = sgr.append
_style_map = self._style_map
attributes = self._attributes & self._set_attributes
if attributes:
if attributes & 1:
append(_style_map[0])
if attributes & 2:
append(_style_map[1])
if attributes & 4:
append(_style_map[2])
if attributes & 8:
append(_style_map[3])
if attributes & 0b0000111110000:
for bit in range(4, 9):
if attributes & (1 << bit):
append(_style_map[bit])
if attributes & 0b1111000000000:
for bit in range(9, 13):
if attributes & (1 << bit):
append(_style_map[bit])
if self._color is not None:
sgr.extend(self._color.downgrade(color_system).get_ansi_codes())
if self._bgcolor is not None:
sgr.extend(
self._bgcolor.downgrade(color_system).get_ansi_codes(
foreground=False
)
)
self._ansi = ";".join(sgr)
return self._ansi
@classmethod
@lru_cache(maxsize=1024)
def normalize(cls, style: str) -> str:
"""Normalize a style definition so that styles with the same effect have the same string
representation.
Args:
style (str): A style definition.
Returns:
str: Normal form of style definition.
"""
try:
return str(cls.parse(style))
except errors.StyleSyntaxError:
return style.strip().lower()
@classmethod
def pick_first(cls, *values: Optional[StyleType]) -> StyleType:
"""Pick first non-None style."""
for value in values:
if value is not None:
return value
raise ValueError("expected at least one non-None style")
def __rich_repr__(self) -> Result:
yield "color", self.color, None
yield "bgcolor", self.bgcolor, None
yield "bold", self.bold, None,
yield "dim", self.dim, None,
yield "italic", self.italic, None
yield "underline", self.underline, None,
yield "blink", self.blink, None
yield "blink2", self.blink2, None
yield "reverse", self.reverse, None
yield "conceal", self.conceal, None
yield "strike", self.strike, None
yield "underline2", self.underline2, None
yield "frame", self.frame, None
yield "encircle", self.encircle, None
yield "link", self.link, None
if self._meta:
yield "meta", self.meta
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Style):
return NotImplemented
return self.__hash__() == other.__hash__()
def __ne__(self, other: Any) -> bool:
if not isinstance(other, Style):
return NotImplemented
return self.__hash__() != other.__hash__()
def __hash__(self) -> int:
if self._hash is not None:
return self._hash
self._hash = hash(
(
self._color,
self._bgcolor,
self._attributes,
self._set_attributes,
self._link,
self._meta,
)
)
return self._hash
@property
def color(self) -> Optional[Color]:
"""The foreground color or None if it is not set."""
return self._color
@property
def bgcolor(self) -> Optional[Color]:
"""The background color or None if it is not set."""
return self._bgcolor
@property
def link(self) -> Optional[str]:
"""Link text, if set."""
return self._link
@property
def transparent_background(self) -> bool:
"""Check if the style specified a transparent background."""
return self.bgcolor is None or self.bgcolor.is_default
@property
def background_style(self) -> "Style":
"""A Style with background only."""
return Style(bgcolor=self.bgcolor)
@property
def meta(self) -> Dict[str, Any]:
"""Get meta information (can not be changed after construction)."""
return {} if self._meta is None else cast(Dict[str, Any], loads(self._meta))
@property
def without_color(self) -> "Style":
"""Get a copy of the style with color removed."""
if self._null:
return NULL_STYLE
style: Style = self.__new__(Style)
style._ansi = None
style._style_definition = None
style._color = None
style._bgcolor = None
style._attributes = self._attributes
style._set_attributes = self._set_attributes
style._link = self._link
style._link_id = f"{randint(0, 999999)}" if self._link else ""
style._null = False
style._meta = None
style._hash = None
return style
@classmethod
@lru_cache(maxsize=4096)
def parse(cls, style_definition: str) -> "Style":
"""Parse a style definition.
Args:
style_definition (str): A string containing a style.
Raises:
errors.StyleSyntaxError: If the style definition syntax is invalid.
Returns:
`Style`: A Style instance.
"""
if style_definition.strip() == "none" or not style_definition:
return cls.null()
STYLE_ATTRIBUTES = cls.STYLE_ATTRIBUTES
color: Optional[str] = None
bgcolor: Optional[str] = None
attributes: Dict[str, Optional[Any]] = {}
link: Optional[str] = None
words = iter(style_definition.split())
for original_word in words:
word = original_word.lower()
if word == "on":
word = next(words, "")
if not word:
raise errors.StyleSyntaxError("color expected after 'on'")
try:
Color.parse(word)
except ColorParseError as error:
raise errors.StyleSyntaxError(
f"unable to parse {word!r} as background color; {error}"
) from None
bgcolor = word
elif word == "not":
word = next(words, "")
attribute = STYLE_ATTRIBUTES.get(word)
if attribute is None:
raise errors.StyleSyntaxError(
f"expected style attribute after 'not', found {word!r}"
)
attributes[attribute] = False
elif word == "link":
word = next(words, "")
if not word:
raise errors.StyleSyntaxError("URL expected after 'link'")
link = word
elif word in STYLE_ATTRIBUTES:
attributes[STYLE_ATTRIBUTES[word]] = True
else:
try:
Color.parse(word)
except ColorParseError as error:
raise errors.StyleSyntaxError(
f"unable to parse {word!r} as color; {error}"
) from None
color = word
style = Style(color=color, bgcolor=bgcolor, link=link, **attributes)
return style
@lru_cache(maxsize=1024)
def get_html_style(self, theme: Optional[TerminalTheme] = None) -> str:
"""Get a CSS style rule."""
theme = theme or DEFAULT_TERMINAL_THEME
css: List[str] = []
append = css.append
color = self.color
bgcolor = self.bgcolor
if self.reverse:
color, bgcolor = bgcolor, color
if self.dim:
foreground_color = (
theme.foreground_color if color is None else color.get_truecolor(theme)
)
color = Color.from_triplet(
blend_rgb(foreground_color, theme.background_color, 0.5)
)
if color is not None:
theme_color = color.get_truecolor(theme)
append(f"color: {theme_color.hex}")
append(f"text-decoration-color: {theme_color.hex}")
if bgcolor is not None:
theme_color = bgcolor.get_truecolor(theme, foreground=False)
append(f"background-color: {theme_color.hex}")
if self.bold:
append("font-weight: bold")
if self.italic:
append("font-style: italic")
if self.underline:
append("text-decoration: underline")
if self.strike:
append("text-decoration: line-through")
if self.overline:
append("text-decoration: overline")
return "; ".join(css)
@classmethod
def combine(cls, styles: Iterable["Style"]) -> "Style":
"""Combine styles and get result.
Args:
styles (Iterable[Style]): Styles to combine.
Returns:
Style: A new style instance.
"""
iter_styles = iter(styles)
return sum(iter_styles, next(iter_styles))
@classmethod
def chain(cls, *styles: "Style") -> "Style":
"""Combine styles from positional argument in to a single style.
Args:
*styles (Iterable[Style]): Styles to combine.
Returns:
Style: A new style instance.
"""
iter_styles = iter(styles)
return sum(iter_styles, next(iter_styles))
def copy(self) -> "Style":
"""Get a copy of this style.
Returns:
Style: A new Style instance with identical attributes.
"""
if self._null:
return NULL_STYLE
style: Style = self.__new__(Style)
style._ansi = self._ansi
style._style_definition = self._style_definition
style._color = self._color
style._bgcolor = self._bgcolor
style._attributes = self._attributes
style._set_attributes = self._set_attributes
style._link = self._link
style._link_id = f"{randint(0, 999999)}" if self._link else ""
style._hash = self._hash
style._null = False
style._meta = self._meta
return style
@lru_cache(maxsize=128)
def clear_meta_and_links(self) -> "Style":
"""Get a copy of this style with link and meta information removed.
Returns:
Style: New style object.
"""
if self._null:
return NULL_STYLE
style: Style = self.__new__(Style)
style._ansi = self._ansi
style._style_definition = self._style_definition
style._color = self._color
style._bgcolor = self._bgcolor
style._attributes = self._attributes
style._set_attributes = self._set_attributes
style._link = None
style._link_id = ""
style._hash = None
style._null = False
style._meta = None
return style
def update_link(self, link: Optional[str] = None) -> "Style":
"""Get a copy with a different value for link.
Args:
link (str, optional): New value for link. Defaults to None.
Returns:
Style: A new Style instance.
"""
style: Style = self.__new__(Style)
style._ansi = self._ansi
style._style_definition = self._style_definition
style._color = self._color
style._bgcolor = self._bgcolor
style._attributes = self._attributes
style._set_attributes = self._set_attributes
style._link = link
style._link_id = f"{randint(0, 999999)}" if link else ""
style._hash = None
style._null = False
style._meta = self._meta
return style
def render(
self,
text: str = "",
*,
color_system: Optional[ColorSystem] = ColorSystem.TRUECOLOR,
legacy_windows: bool = False,
) -> str:
"""Render the ANSI codes for the style.
Args:
text (str, optional): A string to style. Defaults to "".
color_system (Optional[ColorSystem], optional): Color system to render to. Defaults to ColorSystem.TRUECOLOR.
Returns:
str: A string containing ANSI style codes.
"""
if not text or color_system is None:
return text
attrs = self._ansi or self._make_ansi_codes(color_system)
rendered = f"\x1b[{attrs}m{text}\x1b[0m" if attrs else text
if self._link and not legacy_windows:
rendered = (
f"\x1b]8;id={self._link_id};{self._link}\x1b\\{rendered}\x1b]8;;\x1b\\"
)
return rendered
def test(self, text: Optional[str] = None) -> None:
"""Write text with style directly to terminal.
This method is for testing purposes only.
Args:
text (Optional[str], optional): Text to style or None for style name.
"""
text = text or str(self)
sys.stdout.write(f"{self.render(text)}\n")
@lru_cache(maxsize=1024)
def _add(self, style: Optional["Style"]) -> "Style":
if style is None or style._null:
return self
if self._null:
return style
new_style: Style = self.__new__(Style)
new_style._ansi = None
new_style._style_definition = None
new_style._color = style._color or self._color
new_style._bgcolor = style._bgcolor or self._bgcolor
new_style._attributes = (self._attributes & ~style._set_attributes) | (
style._attributes & style._set_attributes
)
new_style._set_attributes = self._set_attributes | style._set_attributes
new_style._link = style._link or self._link
new_style._link_id = style._link_id or self._link_id
new_style._null = style._null
if self._meta and style._meta:
new_style._meta = dumps({**self.meta, **style.meta})
else:
new_style._meta = self._meta or style._meta
new_style._hash = None
return new_style
def __add__(self, style: Optional["Style"]) -> "Style":
combined_style = self._add(style)
return combined_style.copy() if combined_style.link else combined_style
NULL_STYLE = Style()
| Style |
python | ray-project__ray | rllib/algorithms/iql/iql.py | {
"start": 8230,
"end": 8458
} | class ____(MARWIL):
"""Implicit Q-learning (derived from MARWIL).
Uses MARWIL training step.
"""
@classmethod
@override(MARWIL)
def get_default_config(cls) -> AlgorithmConfig:
return IQLConfig()
| IQL |
python | wandb__wandb | wandb/integration/catboost/catboost.py | {
"start": 256,
"end": 5986
} | class ____:
"""`WandbCallback` automatically integrates CatBoost with wandb.
Args:
- metric_period: (int) if you are passing `metric_period` to your CatBoost model please pass the same value here (default=1).
Passing `WandbCallback` to CatBoost will:
- log training and validation metrics at every `metric_period`
- log iteration at every `metric_period`
Example:
```
train_pool = Pool(
train[features], label=train["label"], cat_features=cat_features
)
test_pool = Pool(test[features], label=test["label"], cat_features=cat_features)
model = CatBoostRegressor(
iterations=100,
loss_function="Cox",
eval_metric="Cox",
)
model.fit(
train_pool,
eval_set=test_pool,
callbacks=[WandbCallback()],
)
```
"""
def __init__(self, metric_period: int = 1):
if wandb.run is None:
raise wandb.Error("You must call `wandb.init()` before `WandbCallback()`")
with wb_telemetry.context() as tel:
tel.feature.catboost_wandb_callback = True
self.metric_period: int = metric_period
def after_iteration(self, info: SimpleNamespace) -> bool:
if info.iteration % self.metric_period == 0:
for data, metric in info.metrics.items():
for metric_name, log in metric.items():
# todo: replace with wandb.run._log once available
wandb.log({f"{data}-{metric_name}": log[-1]}, commit=False)
# todo: replace with wandb.run._log once available
wandb.log({f"iteration@metric-period-{self.metric_period}": info.iteration})
return True
def _checkpoint_artifact(
model: Union[CatBoostClassifier, CatBoostRegressor], aliases: List[str]
) -> None:
"""Upload model checkpoint as W&B artifact."""
if wandb.run is None:
raise wandb.Error(
"You must call `wandb.init()` before `_checkpoint_artifact()`"
)
model_name = f"model_{wandb.run.id}"
# save the model in the default `cbm` format
model_path = Path(wandb.run.dir) / "model"
model.save_model(model_path)
model_artifact = wandb.Artifact(name=model_name, type="model")
model_artifact.add_file(str(model_path))
wandb.log_artifact(model_artifact, aliases=aliases)
def _log_feature_importance(
model: Union[CatBoostClassifier, CatBoostRegressor],
) -> None:
"""Log feature importance with default settings."""
if wandb.run is None:
raise wandb.Error(
"You must call `wandb.init()` before `_checkpoint_artifact()`"
)
feat_df = model.get_feature_importance(prettified=True)
fi_data = [
[feat, feat_imp]
for feat, feat_imp in zip(feat_df["Feature Id"], feat_df["Importances"])
]
table = wandb.Table(data=fi_data, columns=["Feature", "Importance"])
# todo: replace with wandb.run._log once available
wandb.log(
{
"Feature Importance": wandb.plot.bar(
table, "Feature", "Importance", title="Feature Importance"
)
},
commit=False,
)
def log_summary(
model: Union[CatBoostClassifier, CatBoostRegressor],
log_all_params: bool = True,
save_model_checkpoint: bool = False,
log_feature_importance: bool = True,
) -> None:
"""`log_summary` logs useful metrics about catboost model after training is done.
Args:
model: it can be CatBoostClassifier or CatBoostRegressor.
log_all_params: (boolean) if True (default) log the model hyperparameters as W&B config.
save_model_checkpoint: (boolean) if True saves the model upload as W&B artifacts.
log_feature_importance: (boolean) if True (default) logs feature importance as W&B bar chart using the default setting of `get_feature_importance`.
Using this along with `wandb_callback` will:
- save the hyperparameters as W&B config,
- log `best_iteration` and `best_score` as `wandb.summary`,
- save and upload your trained model to Weights & Biases Artifacts (when `save_model_checkpoint = True`)
- log feature importance plot.
Example:
```python
train_pool = Pool(
train[features], label=train["label"], cat_features=cat_features
)
test_pool = Pool(test[features], label=test["label"], cat_features=cat_features)
model = CatBoostRegressor(
iterations=100,
loss_function="Cox",
eval_metric="Cox",
)
model.fit(
train_pool,
eval_set=test_pool,
callbacks=[WandbCallback()],
)
log_summary(model)
```
"""
if wandb.run is None:
raise wandb.Error("You must call `wandb.init()` before `log_summary()`")
if not (isinstance(model, (CatBoostClassifier, CatBoostRegressor))):
raise wandb.Error(
"Model should be an instance of CatBoostClassifier or CatBoostRegressor"
)
with wb_telemetry.context() as tel:
tel.feature.catboost_log_summary = True
# log configs
params = model.get_all_params()
if log_all_params:
wandb.config.update(params)
# log best score and iteration
wandb.run.summary["best_iteration"] = model.get_best_iteration()
wandb.run.summary["best_score"] = model.get_best_score()
# log model
if save_model_checkpoint:
aliases = ["best"] if params["use_best_model"] else ["last"]
_checkpoint_artifact(model, aliases=aliases)
# Feature importance
if log_feature_importance:
_log_feature_importance(model)
| WandbCallback |
python | google__flatbuffers | python/flatbuffers/compat.py | {
"start": 2133,
"end": 2373
} | class ____(RuntimeError):
"""Error raised when user tries to use a feature that
requires numpy without having numpy installed.
"""
pass
# NOTE: Future Jython support may require code here (look at `six`).
| NumpyRequiredForThisFeature |
python | sqlalchemy__sqlalchemy | test/dialect/oracle/test_types.py | {
"start": 40709,
"end": 47272
} | class ____(fixtures.TablesTest):
__only_on__ = "oracle"
__backend__ = True
run_inserts = "once"
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"z_test",
metadata,
Column("id", Integer, primary_key=True),
Column("data", Text),
Column("bindata", LargeBinary),
)
Table(
"binary_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", LargeBinary),
)
@classmethod
def insert_data(cls, connection):
cls.data = data = [
dict(
id=i,
data="this is text %d" % i,
bindata=b("this is binary %d" % i),
)
for i in range(1, 20)
]
connection.execute(cls.tables.z_test.insert(), data)
binary_table = cls.tables.binary_table
fname = os.path.join(
os.path.dirname(__file__), "..", "..", "binary_data_one.dat"
)
with open(fname, "rb") as file_:
cls.stream = stream = file_.read(12000)
for i in range(1, 11):
connection.execute(binary_table.insert(), dict(id=i, data=stream))
def _read_lob(self, engine, row):
if engine.dialect.is_async:
data = await_(row._mapping["data"].read())
bindata = await_(row._mapping["bindata"].read())
else:
data = row._mapping["data"].read()
bindata = row._mapping["bindata"].read()
return data, bindata
def test_lobs_without_convert(self):
engine = testing_engine(options=dict(auto_convert_lobs=False))
t = self.tables.z_test
with engine.begin() as conn:
row = conn.execute(t.select().where(t.c.id == 1)).first()
data, bindata = self._read_lob(engine, row)
eq_(data, "this is text 1")
eq_(bindata, b("this is binary 1"))
def test_lobs_with_convert(self, connection):
t = self.tables.z_test
row = connection.execute(t.select().where(t.c.id == 1)).first()
eq_(row._mapping["data"], "this is text 1")
eq_(row._mapping["bindata"], b("this is binary 1"))
def test_lobs_with_convert_raw(self, connection):
row = exec_sql(connection, "select data, bindata from z_test").first()
eq_(row._mapping["data"], "this is text 1")
eq_(row._mapping["bindata"], b("this is binary 1"))
def test_lobs_without_convert_many_rows(self):
engine = testing_engine(
options=dict(auto_convert_lobs=False, arraysize=1)
)
result = exec_sql(
engine.connect(),
"select id, data, bindata from z_test order by id",
)
results = result.fetchall()
def go():
actual = []
for row in results:
data, bindata = self._read_lob(engine, row)
actual.append(
dict(id=row._mapping["id"], data=data, bindata=bindata)
)
eq_(actual, self.data)
go()
def test_lobs_with_convert_many_rows(self):
# even with low arraysize, lobs are fine in autoconvert
engine = testing_engine(
options=dict(auto_convert_lobs=True, arraysize=1)
)
with engine.connect() as conn:
result = exec_sql(
conn,
"select id, data, bindata from z_test order by id",
)
results = result.fetchall()
eq_(
[
dict(
id=row._mapping["id"],
data=row._mapping["data"],
bindata=row._mapping["bindata"],
)
for row in results
],
self.data,
)
@testing.combinations(
(UnicodeText(),), (Text(),), (LargeBinary(),), argnames="datatype"
)
@testing.combinations((10,), (100,), (250,), argnames="datasize")
@testing.combinations(
("x,y,z"), ("y"), ("y,x,z"), ("x,z,y"), argnames="retcols"
)
def test_insert_returning_w_lobs(
self, datatype, datasize, retcols, metadata, connection
):
long_text = Table(
"long_text",
metadata,
Column("x", Integer),
Column("y", datatype),
Column("z", Integer),
)
long_text.create(connection)
if isinstance(datatype, UnicodeText):
word_seed = "ab🐍’«cdefg"
else:
word_seed = "abcdef"
some_text = " ".join(
"".join(random.choice(word_seed) for j in range(150))
for i in range(datasize)
)
if isinstance(datatype, LargeBinary):
some_text = some_text.encode("ascii")
data = {"x": 5, "y": some_text, "z": 10}
return_columns = [long_text.c[col] for col in retcols.split(",")]
expected = tuple(data[col] for col in retcols.split(","))
result = connection.execute(
long_text.insert().returning(*return_columns),
data,
)
eq_(result.fetchall(), [expected])
def test_insert_returning_w_unicode(self, metadata, connection):
long_text = Table(
"long_text",
metadata,
Column("x", Integer),
Column("y", Unicode(255)),
)
long_text.create(connection)
word_seed = "ab🐍’«cdefg"
some_text = " ".join(
"".join(random.choice(word_seed) for j in range(10))
for i in range(15)
)
data = {"x": 5, "y": some_text}
result = connection.execute(
long_text.insert().returning(long_text.c.y),
data,
)
eq_(result.fetchall(), [(some_text,)])
def test_large_stream(self, connection):
binary_table = self.tables.binary_table
result = connection.execute(
binary_table.select().order_by(binary_table.c.id)
).fetchall()
eq_(result, [(i, self.stream) for i in range(1, 11)])
def test_large_stream_single_arraysize(self):
binary_table = self.tables.binary_table
eng = testing_engine(options={"arraysize": 1})
with eng.connect() as conn:
result = conn.execute(
binary_table.select().order_by(binary_table.c.id)
).fetchall()
eq_(result, [(i, self.stream) for i in range(1, 11)])
| LOBFetchTest |
python | openai__openai-python | src/openai/types/realtime/response_mcp_call_arguments_done.py | {
"start": 205,
"end": 708
} | class ____(BaseModel):
arguments: str
"""The final JSON-encoded arguments string."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the MCP tool call item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
type: Literal["response.mcp_call_arguments.done"]
"""The event type, must be `response.mcp_call_arguments.done`."""
| ResponseMcpCallArgumentsDone |
python | pypa__pip | tests/unit/test_utils_compatibility_tags.py | {
"start": 622,
"end": 1579
} | class ____:
def mock_get_config_var(self, **kwd: str) -> Callable[[str], Any]:
"""
Patch sysconfig.get_config_var for arbitrary keys.
"""
get_config_var = sysconfig.get_config_var
def _mock_get_config_var(var: str) -> Any:
if var in kwd:
return kwd[var]
return get_config_var(var)
return _mock_get_config_var
def test_no_hyphen_tag(self) -> None:
"""
Test that no tag contains a hyphen.
"""
import pip._internal.utils.compatibility_tags
mock_gcf = self.mock_get_config_var(SOABI="cpython-35m-darwin")
with patch("sysconfig.get_config_var", mock_gcf):
supported = pip._internal.utils.compatibility_tags.get_supported()
for tag in supported:
assert "-" not in tag.interpreter
assert "-" not in tag.abi
assert "-" not in tag.platform
| Testcompatibility_tags |
python | django__django | tests/generic_relations_regress/tests.py | {
"start": 551,
"end": 13936
} | class ____(TestCase):
def test_inherited_models_content_type(self):
"""
GenericRelations on inherited classes use the correct content type.
"""
p = Place.objects.create(name="South Park")
r = Restaurant.objects.create(name="Chubby's")
l1 = Link.objects.create(content_object=p)
l2 = Link.objects.create(content_object=r)
self.assertEqual(list(p.links.all()), [l1])
self.assertEqual(list(r.links.all()), [l2])
def test_reverse_relation_pk(self):
"""
The correct column name is used for the primary key on the
originating model of a query. See #12664.
"""
p = Person.objects.create(account=23, name="Chef")
Address.objects.create(
street="123 Anywhere Place",
city="Conifer",
state="CO",
zipcode="80433",
content_object=p,
)
qs = Person.objects.filter(addresses__zipcode="80433")
self.assertEqual(1, qs.count())
self.assertEqual("Chef", qs[0].name)
def test_charlink_delete(self):
oddrel = OddRelation1.objects.create(name="clink")
CharLink.objects.create(content_object=oddrel)
oddrel.delete()
def test_textlink_delete(self):
oddrel = OddRelation2.objects.create(name="tlink")
TextLink.objects.create(content_object=oddrel)
oddrel.delete()
def test_charlink_filter(self):
oddrel = OddRelation1.objects.create(name="clink")
CharLink.objects.create(content_object=oddrel, value="value")
self.assertSequenceEqual(
OddRelation1.objects.filter(clinks__value="value"), [oddrel]
)
def test_textlink_filter(self):
oddrel = OddRelation2.objects.create(name="clink")
TextLink.objects.create(content_object=oddrel, value="value")
self.assertSequenceEqual(
OddRelation2.objects.filter(tlinks__value="value"), [oddrel]
)
def test_coerce_object_id_remote_field_cache_persistence(self):
restaurant = Restaurant.objects.create()
CharLink.objects.create(content_object=restaurant)
charlink = CharLink.objects.latest("pk")
self.assertIs(charlink.content_object, charlink.content_object)
# If the model (Cafe) uses more than one level of multi-table
# inheritance.
cafe = Cafe.objects.create()
CharLink.objects.create(content_object=cafe)
charlink = CharLink.objects.latest("pk")
self.assertIs(charlink.content_object, charlink.content_object)
def test_q_object_or(self):
"""
SQL query parameters for generic relations are properly
grouped when OR is used (#11535).
In this bug the first query (below) works while the second, with the
query parameters the same but in reverse order, does not.
The issue is that the generic relation conditions do not get properly
grouped in parentheses.
"""
note_contact = Contact.objects.create()
org_contact = Contact.objects.create()
Note.objects.create(note="note", content_object=note_contact)
org = Organization.objects.create(name="org name")
org.contacts.add(org_contact)
# search with a non-matching note and a matching org name
qs = Contact.objects.filter(
Q(notes__note__icontains=r"other note")
| Q(organizations__name__icontains=r"org name")
)
self.assertIn(org_contact, qs)
# search again, with the same query parameters, in reverse order
qs = Contact.objects.filter(
Q(organizations__name__icontains=r"org name")
| Q(notes__note__icontains=r"other note")
)
self.assertIn(org_contact, qs)
def test_join_reuse(self):
qs = Person.objects.filter(addresses__street="foo").filter(
addresses__street="bar"
)
self.assertEqual(str(qs.query).count("JOIN"), 2)
def test_generic_relation_ordering(self):
"""
Ordering over a generic relation does not include extraneous
duplicate results, nor excludes rows not participating in the relation.
"""
p1 = Place.objects.create(name="South Park")
p2 = Place.objects.create(name="The City")
c = Company.objects.create(name="Chubby's Intl.")
Link.objects.create(content_object=p1)
Link.objects.create(content_object=c)
places = list(Place.objects.order_by("links__id"))
def count_places(place):
return len([p for p in places if p.id == place.id])
self.assertEqual(len(places), 2)
self.assertEqual(count_places(p1), 1)
self.assertEqual(count_places(p2), 1)
def test_target_model_len_zero(self):
"""
Saving a model with a GenericForeignKey to a model instance whose
__len__ method returns 0 (Team.__len__() here) shouldn't fail (#13085).
"""
team1 = Team.objects.create(name="Backend devs")
note = Note(note="Deserve a bonus", content_object=team1)
note.save()
def test_target_model_bool_false(self):
"""
Saving a model with a GenericForeignKey to a model instance whose
__bool__ method returns False (Guild.__bool__() here) shouldn't fail
(#13085).
"""
g1 = Guild.objects.create(name="First guild")
note = Note(note="Note for guild", content_object=g1)
note.save()
@skipIfDBFeature("interprets_empty_strings_as_nulls")
def test_gfk_to_model_with_empty_pk(self):
"""Test related to #13085"""
# Saving model with GenericForeignKey to model instance with an
# empty CharField PK
b1 = Board.objects.create(name="")
tag = Tag(label="VP", content_object=b1)
tag.save()
def test_ticket_20378(self):
# Create a couple of extra HasLinkThing so that the autopk value
# isn't the same for Link and HasLinkThing.
hs1 = HasLinkThing.objects.create()
hs2 = HasLinkThing.objects.create()
hs3 = HasLinkThing.objects.create()
hs4 = HasLinkThing.objects.create()
l1 = Link.objects.create(content_object=hs3)
l2 = Link.objects.create(content_object=hs4)
self.assertSequenceEqual(HasLinkThing.objects.filter(links=l1), [hs3])
self.assertSequenceEqual(HasLinkThing.objects.filter(links=l2), [hs4])
self.assertSequenceEqual(
HasLinkThing.objects.exclude(links=l2), [hs1, hs2, hs3]
)
self.assertSequenceEqual(
HasLinkThing.objects.exclude(links=l1), [hs1, hs2, hs4]
)
def test_ticket_20564(self):
b1 = B.objects.create()
b2 = B.objects.create()
b3 = B.objects.create()
c1 = C.objects.create(b=b1)
c2 = C.objects.create(b=b2)
c3 = C.objects.create(b=b3)
A.objects.create(flag=None, content_object=b1)
A.objects.create(flag=True, content_object=b2)
self.assertSequenceEqual(C.objects.filter(b__a__flag=None), [c1, c3])
self.assertSequenceEqual(C.objects.exclude(b__a__flag=None), [c2])
def test_ticket_20564_nullable_fk(self):
b1 = B.objects.create()
b2 = B.objects.create()
b3 = B.objects.create()
d1 = D.objects.create(b=b1)
d2 = D.objects.create(b=b2)
d3 = D.objects.create(b=b3)
d4 = D.objects.create()
A.objects.create(flag=None, content_object=b1)
A.objects.create(flag=True, content_object=b1)
A.objects.create(flag=True, content_object=b2)
self.assertSequenceEqual(D.objects.exclude(b__a__flag=None), [d2])
self.assertSequenceEqual(D.objects.filter(b__a__flag=None), [d1, d3, d4])
self.assertSequenceEqual(B.objects.filter(a__flag=None), [b1, b3])
self.assertSequenceEqual(B.objects.exclude(a__flag=None), [b2])
def test_extra_join_condition(self):
# A crude check that content_type_id is taken in account in the
# join/subquery condition.
self.assertIn(
"content_type_id", str(B.objects.exclude(a__flag=None).query).lower()
)
# No need for any joins - the join from inner query can be trimmed in
# this case (but not in the above case as no a objects at all for given
# B would then fail).
self.assertNotIn(" join ", str(B.objects.exclude(a__flag=True).query).lower())
self.assertIn(
"content_type_id", str(B.objects.exclude(a__flag=True).query).lower()
)
def test_annotate(self):
hs1 = HasLinkThing.objects.create()
hs2 = HasLinkThing.objects.create()
HasLinkThing.objects.create()
b = Board.objects.create(name=str(hs1.pk))
Link.objects.create(content_object=hs2)
link = Link.objects.create(content_object=hs1)
Link.objects.create(content_object=b)
qs = HasLinkThing.objects.annotate(Sum("links")).filter(pk=hs1.pk)
# If content_type restriction isn't in the query's join condition,
# then wrong results are produced here as the link to b will also match
# (b and hs1 have equal pks).
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].links__sum, link.id)
link.delete()
# Now if we don't have proper left join, we will not produce any
# results at all here.
# clear cached results
qs = qs.all()
self.assertEqual(qs.count(), 1)
# Note - 0 here would be a nicer result...
self.assertIs(qs[0].links__sum, None)
# Finally test that filtering works.
self.assertEqual(qs.filter(links__sum__isnull=True).count(), 1)
self.assertEqual(qs.filter(links__sum__isnull=False).count(), 0)
def test_filter_targets_related_pk(self):
# Use hardcoded PKs to ensure different PKs for "link" and "hs2"
# objects.
HasLinkThing.objects.create(pk=1)
hs2 = HasLinkThing.objects.create(pk=2)
link = Link.objects.create(content_object=hs2, pk=1)
self.assertNotEqual(link.object_id, link.pk)
self.assertSequenceEqual(HasLinkThing.objects.filter(links=link.pk), [hs2])
def test_editable_generic_rel(self):
GenericRelationForm = modelform_factory(HasLinkThing, fields="__all__")
form = GenericRelationForm()
self.assertIn("links", form.fields)
form = GenericRelationForm({"links": None})
self.assertTrue(form.is_valid())
form.save()
links = HasLinkThing._meta.get_field("links")
self.assertEqual(links.save_form_data_calls, 1)
def test_ticket_22998(self):
related = Related.objects.create()
content = Content.objects.create(related_obj=related)
Node.objects.create(content=content)
# deleting the Related cascades to the Content cascades to the Node,
# where the pre_delete signal should fire and prevent deletion.
with self.assertRaises(ProtectedError):
related.delete()
def test_ticket_22982(self):
place = Place.objects.create(name="My Place")
self.assertIn("GenericRelatedObjectManager", str(place.links))
def test_filter_on_related_proxy_model(self):
place = Place.objects.create()
Link.objects.create(content_object=place)
self.assertEqual(Place.objects.get(link_proxy__object_id=place.id), place)
def test_generic_reverse_relation_with_mti(self):
"""
Filtering with a reverse generic relation, where the GenericRelation
comes from multi-table inheritance.
"""
place = Place.objects.create(name="Test Place")
link = Link.objects.create(content_object=place)
result = Link.objects.filter(places=place)
self.assertCountEqual(result, [link])
def test_generic_reverse_relation_with_abc(self):
"""
The reverse generic relation accessor (targets) is created if the
GenericRelation comes from an abstract base model (HasLinks).
"""
thing = HasLinkThing.objects.create()
link = Link.objects.create(content_object=thing)
self.assertCountEqual(link.targets.all(), [thing])
def test_generic_reverse_relation_exclude_filter(self):
place1 = Place.objects.create(name="Test Place 1")
place2 = Place.objects.create(name="Test Place 2")
Link.objects.create(content_object=place1)
link2 = Link.objects.create(content_object=place2)
qs = Link.objects.filter(~Q(places__name="Test Place 1"))
self.assertSequenceEqual(qs, [link2])
qs = Link.objects.exclude(places__name="Test Place 1")
self.assertSequenceEqual(qs, [link2])
def test_check_cached_value_pk_different_type(self):
"""Primary key is not checked if the content type doesn't match."""
board = Board.objects.create(name="some test")
oddrel = OddRelation1.objects.create(name="clink")
charlink = CharLink.objects.create(content_object=oddrel)
charlink = CharLink.objects.get(pk=charlink.pk)
self.assertEqual(charlink.content_object, oddrel)
charlink.object_id = board.pk
charlink.content_type_id = ContentType.objects.get_for_model(Board).id
self.assertEqual(charlink.content_object, board)
| GenericRelationTests |
python | pytorch__pytorch | test/export/test_export.py | {
"start": 15894,
"end": 96401
} | class ____(TestCase):
def _test_export_same_as_eager(self, f, args, kwargs=None):
kwargs = kwargs or {}
exported_program = export(f, args, kwargs)
self.assertEqual(exported_program.module()(*args, **kwargs), f(*args, **kwargs))
# this is not supported by .module()
# reversed_kwargs = {key: kwargs[key] for key in reversed(kwargs)}
# self.assertEqual(
# exported_program.module()(*args, **reversed_kwargs), f(*args, **reversed_kwargs)
# )
def _check_dynamic_shapes_specs_and_shapes(
self,
model,
inputs,
specs,
passing_shapes,
failing_shapes,
test_serdes=False,
):
from torch._export.serde.dynamic_shapes import (
_dump_dynamic_shapes,
_load_dynamic_shapes,
)
from torch.utils._pytree import tree_map
def _construct_inputs(shapes):
def _is_tensor_leaf(x):
return isinstance(x, tuple) and all(isinstance(y, int) for y in x)
return tree_map(
lambda x: torch.randn(*x) if _is_tensor_leaf(x) else x,
shapes,
is_leaf=_is_tensor_leaf,
)
# exports with a list of equivalent dynamic shapes specs,
# then tests for pass/fail on list of shapes
for _specs in specs:
ep = export(model, inputs, dynamic_shapes=_specs)
eps = [ep]
if test_serdes:
# test dynamic shapes serialization
# test that behavior remains the same when exporting with Ser/Des specs:
# serialize + deserialize original specs, and export.
ep_serdes = export(
model,
inputs,
dynamic_shapes=_load_dynamic_shapes(
_dump_dynamic_shapes(_specs, inputs)
),
)
eps.append(ep_serdes)
for ep in eps:
for shapes in passing_shapes:
test_inputs = _construct_inputs(shapes)
ep.module()(*test_inputs)
for shapes in failing_shapes:
test_inputs = _construct_inputs(shapes)
with self.assertRaisesRegex(AssertionError, "Guard failed"):
ep.module()(*test_inputs)
def test_basic(self):
class Module(torch.nn.Module):
def forward(self, x, y):
return x[0] + y
f = Module()
inp = ([torch.ones(1, 3)], torch.ones(1, 3))
self._test_export_same_as_eager(f, inp)
@testing.expectedFailureStrictV2
@skipIfCrossRef
def test_custom_tag_metadata_re_export(self):
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 2))
self.b = torch.nn.Parameter(torch.rand(4))
def forward(self, x):
out = torch.nn.functional.linear(x, self.w, self.b)
return out
f = Foo()
inputs = (torch.zeros(1, 2),)
ep = export(f, inputs)
new_gm = copy.deepcopy(ep.graph_module)
new_gm.meta["custom"] = {}
new_gm.meta["custom"]["f"] = "bar"
for node in new_gm.graph.nodes:
if (
node.op == "call_function"
and node.target == torch.ops.aten.linear.default
):
node.meta["custom"] = {}
node.meta["custom"]["quantization_tag"] = "foo"
new_ep = ep._update(new_gm, ep.graph_signature)
new_ep = export(new_ep.module(), inputs)
self.assertEqual(new_ep.graph_module.meta["custom"]["f"], "bar")
# the custom field should be preserved after re-export and
# should not be copied to other nodes
counter = 0
for node in new_ep.graph.nodes:
if "custom" in node.meta:
counter += 1
self.assertTrue(node.meta["custom"]["quantization_tag"] == "foo")
self.assertTrue(node.target == torch.ops.aten.linear.default)
self.assertEqual(counter, 1)
@testing.expectedFailureSerDer # can't serialize functorch ops
@testing.expectedFailureSerDerNonStrict # can't serialize functorch ops
def test_vmap_to_assert(self):
class VmapToAssert(torch.nn.Module):
def forward(self, x, y):
f = lambda x, y: (
(x * y).to("cpu", memory_format=torch.channels_last) + 1
).sum(dim=0) # noqa: E731
vmapped = torch.vmap(f)(x, y)
return vmapped.sum(dim=0)
ep = export(VmapToAssert(), (torch.zeros(4, 4, 4, 4), torch.zeros(4, 4, 4, 4)))
exported = ep.module()(torch.ones(4, 4, 4, 4), torch.ones(4, 4, 4, 4))
eager = VmapToAssert()(torch.ones(4, 4, 4, 4), torch.ones(4, 4, 4, 4))
self.assertEqual(exported, eager)
def test_from_node_metadata_export(self):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1d = torch.nn.Conv1d(3, 3, 3)
self.conv2d = torch.nn.Conv2d(3, 3, 3)
def forward(self, x):
x = self.conv2d(x)
x = x.squeeze(0)
x = self.conv1d(x)
return x
def example_inputs(self):
return
f = Foo()
inputs = (torch.randn(1, 3, 5, 5),)
ep = export(f, inputs)
graph_id = id(ep.graph)
gm = ep.module()
from torch.fx.traceback import NodeSourceAction
for node in gm.graph.nodes:
if node.op in ("placeholder", "output", "call_module"):
continue
if "weight" in node.name or "bias" in node.name:
self.assertTrue(
node.meta["from_node"][-1].pass_name
== "ExportedProgram.module().unlift()"
)
self.assertTrue(
node.meta["from_node"][-1].action
== [NodeSourceAction.CREATE, NodeSourceAction.REPLACE]
)
self.assertEqual(
node.meta["from_node"][-1].from_node[-1].graph_id, graph_id
)
else:
self.assertTrue(
node.meta["from_node"][-1].pass_name == "ExportedProgram.module()"
)
self.assertTrue(
node.meta["from_node"][-1].action == [NodeSourceAction.CREATE]
)
self.assertEqual(node.meta["from_node"][-1].graph_id, graph_id)
## re-export
ep2 = export(gm, inputs)
gm2 = ep2.module()
graph_id = id(ep2.graph)
for node in gm2.graph.nodes:
if node.op in ("placeholder", "output", "call_module"):
continue
if "weight" in node.name or "bias" in node.name:
self.assertTrue(
node.meta["from_node"][-1].pass_name
== "ExportedProgram.module().unlift()"
)
self.assertTrue(
node.meta["from_node"][-1].action
== [NodeSourceAction.CREATE, NodeSourceAction.REPLACE]
)
self.assertEqual(
node.meta["from_node"][-1].from_node[-1].graph_id, graph_id
)
else:
self.assertTrue(
node.meta["from_node"][-1].pass_name == "ExportedProgram.module()"
)
self.assertTrue(
node.meta["from_node"][-1].action == [NodeSourceAction.CREATE]
)
self.assertEqual(node.meta["from_node"][-1].graph_id, graph_id)
def test_annotate_on_assert(self):
# nodes added in `apply_runtime_assertion_pass` will be annotated
class M(torch.nn.Module):
def forward(self, x, y):
with torch.fx.traceback.annotate({"moo": 0}):
x = torch.cat([x, x])
b = y.item()
torch._check(b >= x.shape[0])
return x * b
with torch.fx.traceback.preserve_node_meta():
ep = torch.export.export(
M(),
(torch.randn(3), torch.tensor(6)),
dynamic_shapes={"x": {0: Dim("b")}, "y": None},
)
# clean up _torchdynamo related meta data as it could vary depending on the caller
# https://github.com/pytorch/pytorch/issues/167432
for node in ep.graph.nodes:
if "custom" in node.meta:
node.meta["custom"] = {
k: v
for k, v in node.meta["custom"].items()
if "_torchdynamo_disable" not in k
}
custom_metadata = torch.fx.traceback._get_custom_metadata(ep.module())
self.assertExpectedInline(
str(custom_metadata),
"""\
('call_function', 'cat', {'moo': 0})
('call_function', 'item', {'moo': 0})
('call_function', 'ge_1', {'moo': 0})
('call_function', '_assert_scalar_default', {'moo': 0})
('call_function', 'mul', {'moo': 0})""",
)
@requires_gpu
def test_flex_attention_export(self):
from torch.nn.attention.flex_attention import create_block_mask, flex_attention
class MixedFakeModeModel(torch.nn.Module):
def __init__(self, dim=64, use_inductor=True):
super().__init__()
self.dim = dim
self.q_proj = torch.nn.Linear(64, 64)
self.k_proj = torch.nn.Linear(64, 64)
self.v_proj = torch.nn.Linear(64, 64)
self.use_inductor = use_inductor
def forward(self, x):
batch_size, seq_len, _ = x.shape
# Process input first - this creates fake tensors in export's fake mode
processed = self.q_proj(x)
# Create some computation that depends on processed tensor
intermediate = processed.sum(dim=-1).detach() # Shape: (batch, seq_len)
# Now call create_block_mask which internally calls torch.compile
# The mask function will capture 'intermediate' which is a fake tensor
# from export's fake mode, but create_block_mask will create its own fake mode
def dynamic_mask_function(batch_idx, head_idx, q_idx, kv_idx):
# This captures the intermediate tensor from the outer scope
# When torch.compile is called inside create_block_mask,
# this tensor will be from export's fake mode while new tensors
# created inside will be from the nested fake mode
threshold = intermediate[
batch_idx, q_idx % seq_len
] # Access the captured tensor
return (kv_idx <= q_idx) & (threshold > 0) # Mix fake modes
block_mask = create_block_mask(
mask_mod=dynamic_mask_function,
B=batch_size,
H=None,
Q_LEN=seq_len,
KV_LEN=seq_len,
device=x.device,
)
q = self.q_proj(processed).view(batch_size, 1, seq_len, self.dim)
k = self.k_proj(processed).view(batch_size, 1, seq_len, self.dim)
v = self.v_proj(processed).view(batch_size, 1, seq_len, self.dim)
# Use flex_attention with the problematic block_mask
backend = "inductor" if self.use_inductor else "eager"
out = torch.compile(flex_attention, backend=backend)(
q, k, v, block_mask=block_mask
)
return out
model = MixedFakeModeModel(use_inductor=False)
x = torch.randn(2, 128, 64)
# Inductor doesn't work in eager mode flex attention
eager_out = model(x)
model.use_inductor = True
exported_mod = torch.export.export(model, (x,), strict=False).module()
self.assertExpectedInline(
str(exported_mod.code).strip(),
"""\
def forward(self, x):
x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
q_proj_weight = self.q_proj.weight
q_proj_bias = self.q_proj.bias
k_proj_weight = self.k_proj.weight
k_proj_bias = self.k_proj.bias
v_proj_weight = self.v_proj.weight
v_proj_bias = self.v_proj.bias
_guards_fn = self._guards_fn(x); _guards_fn = None
linear = torch.ops.aten.linear.default(x, q_proj_weight, q_proj_bias); x = None
sum_1 = torch.ops.aten.sum.dim_IntList(linear, [-1])
detach = torch.ops.aten.detach.default(sum_1); sum_1 = None
arange = torch.ops.aten.arange.start(0, 2, device = device(type='cpu'), pin_memory = False)
arange_1 = torch.ops.aten.arange.start(0, 1, device = device(type='cpu'), pin_memory = False)
arange_2 = torch.ops.aten.arange.start(0, 128, device = device(type='cpu'), pin_memory = False)
arange_3 = torch.ops.aten.arange.start(0, 128, device = device(type='cpu'), pin_memory = False)
lazy_load_decompositions = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions = None
_vmap_increment_nesting = torch._functorch.predispatch._vmap_increment_nesting(2, 'error'); _vmap_increment_nesting = None
_add_batch_dim = torch._functorch.predispatch._add_batch_dim(arange, 0, 1); arange = None
lazy_load_decompositions_1 = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions_1 = None
_vmap_increment_nesting_1 = torch._functorch.predispatch._vmap_increment_nesting(1, 'error'); _vmap_increment_nesting_1 = None
_add_batch_dim_1 = torch._functorch.predispatch._add_batch_dim(arange_1, 0, 2); arange_1 = _add_batch_dim_1 = None
lazy_load_decompositions_2 = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions_2 = None
_vmap_increment_nesting_2 = torch._functorch.predispatch._vmap_increment_nesting(128, 'error'); _vmap_increment_nesting_2 = None
_add_batch_dim_2 = torch._functorch.predispatch._add_batch_dim(arange_2, 0, 3); arange_2 = None
lazy_load_decompositions_3 = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions_3 = None
_vmap_increment_nesting_3 = torch._functorch.predispatch._vmap_increment_nesting(128, 'error'); _vmap_increment_nesting_3 = None
_add_batch_dim_3 = torch._functorch.predispatch._add_batch_dim(arange_3, 0, 4); arange_3 = None
remainder = torch.ops.aten.remainder.Scalar(_add_batch_dim_2, 128)
torch__dynamo__trace_wrapped_higher_order_op_mod_index0 = self.torch__dynamo__trace_wrapped_higher_order_op_ModIndex0
function_const_func_spec0 = self.function_const_func_spec0
flat_apply = torch.ops.higher_order.flat_apply(function_const_func_spec0, torch__dynamo__trace_wrapped_higher_order_op_mod_index0, 'torch._dynamo._trace_wrapped_higher_order_op.ModIndex', detach, _add_batch_dim, remainder); function_const_func_spec0 = torch__dynamo__trace_wrapped_higher_order_op_mod_index0 = _add_batch_dim = remainder = None
le = torch.ops.aten.le.Tensor(_add_batch_dim_3, _add_batch_dim_2); _add_batch_dim_3 = _add_batch_dim_2 = None
gt = torch.ops.aten.gt.Scalar(flat_apply, 0); flat_apply = None
and_1 = torch.ops.aten.__and__.Tensor(le, gt); le = gt = None
_remove_batch_dim = torch._functorch.predispatch._remove_batch_dim(and_1, 4, 128, 0); and_1 = None
_vmap_decrement_nesting = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting = None
_remove_batch_dim_1 = torch._functorch.predispatch._remove_batch_dim(_remove_batch_dim, 3, 128, 0); _remove_batch_dim = None
_vmap_decrement_nesting_1 = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting_1 = None
_remove_batch_dim_2 = torch._functorch.predispatch._remove_batch_dim(_remove_batch_dim_1, 2, 1, 0)
expand = torch.ops.aten.expand.default(_remove_batch_dim_1, [1, 128, 128]); _remove_batch_dim_1 = expand = None
_vmap_decrement_nesting_2 = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting_2 = None
_remove_batch_dim_3 = torch._functorch.predispatch._remove_batch_dim(_remove_batch_dim_2, 1, 2, 0); _remove_batch_dim_2 = None
_vmap_decrement_nesting_3 = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting_3 = None
pad = torch.ops.aten.pad.default(_remove_batch_dim_3, [0, 0, 0, 0]); _remove_batch_dim_3 = None
view = torch.ops.aten.view.default(pad, [2, 1, 1, 128, 1, 128]); pad = None
permute = torch.ops.aten.permute.default(view, [0, 1, 2, 4, 3, 5]); view = None
sum_2 = torch.ops.aten.sum.dim_IntList(permute, [-2, -1]); permute = None
eq = torch.ops.aten.eq.Scalar(sum_2, 16384)
gt_1 = torch.ops.aten.gt.Scalar(sum_2, 0)
lt = torch.ops.aten.lt.Scalar(sum_2, 16384); sum_2 = None
and_2 = torch.ops.aten.__and__.Tensor(gt_1, lt); gt_1 = lt = None
_assert_tensor_metadata_default = torch.ops.aten._assert_tensor_metadata.default(and_2, dtype = torch.bool, device = device(type='cpu'), layout = torch.strided); _assert_tensor_metadata_default = None
to = torch.ops.aten.to.dtype(and_2, torch.int8); and_2 = None
_assert_tensor_metadata_default_1 = torch.ops.aten._assert_tensor_metadata.default(eq, dtype = torch.bool, device = device(type='cpu'), layout = torch.strided); _assert_tensor_metadata_default_1 = None
to_1 = torch.ops.aten.to.dtype(eq, torch.int8); eq = None
_assert_tensor_metadata_default_2 = torch.ops.aten._assert_tensor_metadata.default(to, dtype = torch.int8, device = device(type='cpu'), layout = torch.strided); _assert_tensor_metadata_default_2 = None
to_2 = torch.ops.aten.to.dtype(to, torch.int32); to = None
sum_3 = torch.ops.aten.sum.dim_IntList(to_2, [-1])
argsort = torch.ops.aten.argsort.stable(to_2, stable = True, descending = True); to_2 = None
_assert_tensor_metadata_default_3 = torch.ops.aten._assert_tensor_metadata.default(sum_3, dtype = torch.int64, device = device(type='cpu'), layout = torch.strided); _assert_tensor_metadata_default_3 = None
to_3 = torch.ops.aten.to.dtype(sum_3, torch.int32, False, False, torch.contiguous_format); sum_3 = None
_assert_tensor_metadata_default_4 = torch.ops.aten._assert_tensor_metadata.default(argsort, dtype = torch.int64, device = device(type='cpu'), layout = torch.strided); _assert_tensor_metadata_default_4 = None
to_4 = torch.ops.aten.to.dtype(argsort, torch.int32, False, False, torch.contiguous_format); argsort = None
_assert_tensor_metadata_default_5 = torch.ops.aten._assert_tensor_metadata.default(to_1, dtype = torch.int8, device = device(type='cpu'), layout = torch.strided); _assert_tensor_metadata_default_5 = None
to_5 = torch.ops.aten.to.dtype(to_1, torch.int32); to_1 = None
sum_4 = torch.ops.aten.sum.dim_IntList(to_5, [-1])
argsort_1 = torch.ops.aten.argsort.stable(to_5, stable = True, descending = True); to_5 = None
_assert_tensor_metadata_default_6 = torch.ops.aten._assert_tensor_metadata.default(sum_4, dtype = torch.int64, device = device(type='cpu'), layout = torch.strided); _assert_tensor_metadata_default_6 = None
to_6 = torch.ops.aten.to.dtype(sum_4, torch.int32, False, False, torch.contiguous_format); sum_4 = None
_assert_tensor_metadata_default_7 = torch.ops.aten._assert_tensor_metadata.default(argsort_1, dtype = torch.int64, device = device(type='cpu'), layout = torch.strided); _assert_tensor_metadata_default_7 = None
to_7 = torch.ops.aten.to.dtype(argsort_1, torch.int32, False, False, torch.contiguous_format); argsort_1 = None
lazy_load_decompositions_4 = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions_4 = None
_vmap_increment_nesting_4 = torch._functorch.predispatch._vmap_increment_nesting(2, 'error'); _vmap_increment_nesting_4 = None
_add_batch_dim_4 = torch._functorch.predispatch._add_batch_dim(to_3, 0, 1)
_add_batch_dim_5 = torch._functorch.predispatch._add_batch_dim(to_4, 0, 1)
lazy_load_decompositions_5 = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions_5 = None
_vmap_increment_nesting_5 = torch._functorch.predispatch._vmap_increment_nesting(1, 'error'); _vmap_increment_nesting_5 = None
_add_batch_dim_6 = torch._functorch.predispatch._add_batch_dim(_add_batch_dim_4, 0, 2); _add_batch_dim_4 = None
_add_batch_dim_7 = torch._functorch.predispatch._add_batch_dim(_add_batch_dim_5, 0, 2); _add_batch_dim_5 = None
new_zeros = torch.ops.aten.new_zeros.default(_add_batch_dim_7, [1, 2], dtype = torch.int32, pin_memory = False)
arange_4 = torch.ops.aten.arange.default(1, dtype = torch.int32, device = device(type='cpu'), pin_memory = False)
unsqueeze = torch.ops.aten.unsqueeze.default(arange_4, -1); arange_4 = None
arange_5 = torch.ops.aten.arange.default(1, dtype = torch.int32, device = device(type='cpu'), pin_memory = False)
unsqueeze_1 = torch.ops.aten.unsqueeze.default(_add_batch_dim_6, -1); _add_batch_dim_6 = None
lt_1 = torch.ops.aten.lt.Tensor(arange_5, unsqueeze_1); arange_5 = unsqueeze_1 = None
where = torch.ops.aten.where.ScalarOther(lt_1, _add_batch_dim_7, 1); lt_1 = _add_batch_dim_7 = None
new_ones = torch.ops.aten.new_ones.default(new_zeros, [], pin_memory = False)
index_put_ = torch.ops.aten.index_put_.default(new_zeros, [unsqueeze, where], new_ones); new_zeros = unsqueeze = where = new_ones = None
slice_1 = torch.ops.aten.slice.Tensor(index_put_, 1, 0, 1); index_put_ = None
_remove_batch_dim_4 = torch._functorch.predispatch._remove_batch_dim(slice_1, 2, 1, 0); slice_1 = None
_vmap_decrement_nesting_4 = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting_4 = None
_remove_batch_dim_5 = torch._functorch.predispatch._remove_batch_dim(_remove_batch_dim_4, 1, 2, 0); _remove_batch_dim_4 = None
_vmap_decrement_nesting_5 = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting_5 = None
transpose = torch.ops.aten.transpose.int(_remove_batch_dim_5, -2, -1); _remove_batch_dim_5 = None
_assert_tensor_metadata_default_8 = torch.ops.aten._assert_tensor_metadata.default(transpose, dtype = torch.int32, device = device(type='cpu'), layout = torch.strided); _assert_tensor_metadata_default_8 = None
to_8 = torch.ops.aten.to.dtype(transpose, torch.int32); transpose = None
sum_5 = torch.ops.aten.sum.dim_IntList(to_8, [-1])
argsort_2 = torch.ops.aten.argsort.stable(to_8, stable = True, descending = True); to_8 = None
_assert_tensor_metadata_default_9 = torch.ops.aten._assert_tensor_metadata.default(sum_5, dtype = torch.int64, device = device(type='cpu'), layout = torch.strided); _assert_tensor_metadata_default_9 = None
to_9 = torch.ops.aten.to.dtype(sum_5, torch.int32, False, False, torch.contiguous_format); sum_5 = None
_assert_tensor_metadata_default_10 = torch.ops.aten._assert_tensor_metadata.default(argsort_2, dtype = torch.int64, device = device(type='cpu'), layout = torch.strided); _assert_tensor_metadata_default_10 = None
to_10 = torch.ops.aten.to.dtype(argsort_2, torch.int32, False, False, torch.contiguous_format); argsort_2 = None
lazy_load_decompositions_6 = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions_6 = None
_vmap_increment_nesting_6 = torch._functorch.predispatch._vmap_increment_nesting(2, 'error'); _vmap_increment_nesting_6 = None
_add_batch_dim_8 = torch._functorch.predispatch._add_batch_dim(to_6, 0, 1)
_add_batch_dim_9 = torch._functorch.predispatch._add_batch_dim(to_7, 0, 1)
lazy_load_decompositions_7 = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions_7 = None
_vmap_increment_nesting_7 = torch._functorch.predispatch._vmap_increment_nesting(1, 'error'); _vmap_increment_nesting_7 = None
_add_batch_dim_10 = torch._functorch.predispatch._add_batch_dim(_add_batch_dim_8, 0, 2); _add_batch_dim_8 = None
_add_batch_dim_11 = torch._functorch.predispatch._add_batch_dim(_add_batch_dim_9, 0, 2); _add_batch_dim_9 = None
new_zeros_1 = torch.ops.aten.new_zeros.default(_add_batch_dim_11, [1, 2], dtype = torch.int32, pin_memory = False)
arange_6 = torch.ops.aten.arange.default(1, dtype = torch.int32, device = device(type='cpu'), pin_memory = False)
unsqueeze_2 = torch.ops.aten.unsqueeze.default(arange_6, -1); arange_6 = None
arange_7 = torch.ops.aten.arange.default(1, dtype = torch.int32, device = device(type='cpu'), pin_memory = False)
unsqueeze_3 = torch.ops.aten.unsqueeze.default(_add_batch_dim_10, -1); _add_batch_dim_10 = None
lt_2 = torch.ops.aten.lt.Tensor(arange_7, unsqueeze_3); arange_7 = unsqueeze_3 = None
where_1 = torch.ops.aten.where.ScalarOther(lt_2, _add_batch_dim_11, 1); lt_2 = _add_batch_dim_11 = None
new_ones_1 = torch.ops.aten.new_ones.default(new_zeros_1, [], pin_memory = False)
index_put__1 = torch.ops.aten.index_put_.default(new_zeros_1, [unsqueeze_2, where_1], new_ones_1); new_zeros_1 = unsqueeze_2 = where_1 = new_ones_1 = None
slice_2 = torch.ops.aten.slice.Tensor(index_put__1, 1, 0, 1); index_put__1 = None
_remove_batch_dim_6 = torch._functorch.predispatch._remove_batch_dim(slice_2, 2, 1, 0); slice_2 = None
_vmap_decrement_nesting_6 = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting_6 = None
_remove_batch_dim_7 = torch._functorch.predispatch._remove_batch_dim(_remove_batch_dim_6, 1, 2, 0); _remove_batch_dim_6 = None
_vmap_decrement_nesting_7 = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting_7 = None
transpose_1 = torch.ops.aten.transpose.int(_remove_batch_dim_7, -2, -1); _remove_batch_dim_7 = None
_assert_tensor_metadata_default_11 = torch.ops.aten._assert_tensor_metadata.default(transpose_1, dtype = torch.int32, device = device(type='cpu'), layout = torch.strided); _assert_tensor_metadata_default_11 = None
to_11 = torch.ops.aten.to.dtype(transpose_1, torch.int32); transpose_1 = None
sum_6 = torch.ops.aten.sum.dim_IntList(to_11, [-1])
argsort_3 = torch.ops.aten.argsort.stable(to_11, stable = True, descending = True); to_11 = None
_assert_tensor_metadata_default_12 = torch.ops.aten._assert_tensor_metadata.default(sum_6, dtype = torch.int64, device = device(type='cpu'), layout = torch.strided); _assert_tensor_metadata_default_12 = None
to_12 = torch.ops.aten.to.dtype(sum_6, torch.int32, False, False, torch.contiguous_format); sum_6 = None
_assert_tensor_metadata_default_13 = torch.ops.aten._assert_tensor_metadata.default(argsort_3, dtype = torch.int64, device = device(type='cpu'), layout = torch.strided); _assert_tensor_metadata_default_13 = None
to_13 = torch.ops.aten.to.dtype(argsort_3, torch.int32, False, False, torch.contiguous_format); argsort_3 = None
linear_1 = torch.ops.aten.linear.default(linear, q_proj_weight, q_proj_bias); q_proj_weight = q_proj_bias = None
view_1 = torch.ops.aten.view.default(linear_1, [2, 1, 128, 64]); linear_1 = None
linear_2 = torch.ops.aten.linear.default(linear, k_proj_weight, k_proj_bias); k_proj_weight = k_proj_bias = None
view_2 = torch.ops.aten.view.default(linear_2, [2, 1, 128, 64]); linear_2 = None
linear_3 = torch.ops.aten.linear.default(linear, v_proj_weight, v_proj_bias); linear = v_proj_weight = v_proj_bias = None
view_3 = torch.ops.aten.view.default(linear_3, [2, 1, 128, 64]); linear_3 = None
sdpa_score0 = self.sdpa_score0
sdpa_mask0 = self.sdpa_mask0
flex_attention = torch.ops.higher_order.flex_attention(view_1, view_2, view_3, sdpa_score0, (128, 128, to_3, to_4, to_6, to_7, to_9, to_10, to_12, to_13, 128, 128, sdpa_mask0), 0.125, {'BACKEND': 'AUTO', 'PRESCALE_QK': False, 'ROWS_GUARANTEED_SAFE': False, 'BLOCKS_ARE_CONTIGUOUS': False, 'WRITE_DQ': True, 'OUTPUT_LOGSUMEXP': False, 'OUTPUT_MAX': False}, (), (detach,)); view_1 = view_2 = view_3 = sdpa_score0 = to_3 = to_4 = to_6 = to_7 = to_9 = to_10 = to_12 = to_13 = sdpa_mask0 = detach = None
getitem = flex_attention[0]
getitem_1 = flex_attention[1]; getitem_1 = None
getitem_2 = flex_attention[2]; flex_attention = getitem_2 = None
return pytree.tree_unflatten((getitem,), self._out_spec)""",
)
exported_out = exported_mod(x)
self.assertEqual(exported_out, eager_out)
def test_inductor_backend_inside_nonstrict(self):
class Foo(torch.nn.Module):
def forward(self, x):
def i_want_faster_code(inp1, inp2):
nonlocal x
return x + inp1 + inp2
out = torch.compile(i_want_faster_code)(x, x)
return x + out
foo = Foo()
with self.assertWarnsRegex(
UserWarning, "You are calling torch.compile inside torch.export region"
):
ep = export(foo, (torch.randn(4, 4),), strict=False).module()
self.assertExpectedInline(
str(ep.graph).strip(),
"""\
graph():
%x : [num_users=4] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %x), kwargs = {})
%add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %x), kwargs = {})
%add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %add_1), kwargs = {})
return (add_2,)""",
)
def test_bincount(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
weights = torch.linspace(0, 1, steps=5)
bc = x.bincount(weights)
return bc
model = M()
ep = export(model, (torch.randint(0, 8, (5,), dtype=torch.int64),))
inp = torch.randint(0, 8, (5,), dtype=torch.int64)
self.assertTrue(torch.allclose(ep.module()(inp), M()(inp)))
def test_symint_output(self):
class Foo(torch.nn.Module):
def forward(self, x):
z, y = x.size()
return z + y + x[0], z
inputs = (torch.ones(2, 3),)
dim0_x, dim1_x = torch.export.dims("dim0_x", "dim1_x")
dynamic_shapes = {"x": (dim0_x, dim1_x)}
export(Foo(), inputs, dynamic_shapes=dynamic_shapes)
@testing.expectedFailureStrictV2
def test_no_tensor_computation(self):
class Module(torch.nn.Module):
def forward(self, x, y):
return y
f = Module()
inp = ([torch.ones(1, 3)], 1)
ep = export(f, inp)
self.assertEqual(ep.module()(*inp), f(*inp))
self.assertExpectedInline(
str(ep.graph).strip(),
"""\
graph():
%x_0 : [num_users=0] = placeholder[target=x_0]
%y : [num_users=0] = placeholder[target=y]
return (1,)""",
)
def test_inline_script_function(self):
@torch.jit.script
def _forward(x: torch.Tensor):
if torch.jit.is_scripting():
return x.cos()
return x.sin()
class M(torch.nn.Module):
def forward(self, x: torch.Tensor):
return _forward(x)
x = torch.randn(3, 4)
ep = torch.export.export(M(), (x,))
FileCheck().check_count("torch.ops.aten.sin", 1, exactly=True).run(
str(ep.graph)
)
FileCheck().check_count("torch.ops.aten.cos", 0, exactly=True).run(
str(ep.graph)
)
res = ep.module()(x)
# We're inlining the original _forward function
# instead of the scripted function, so we get x.sin()
self.assertEqual(res, x.sin())
def test_nested_module_fake_tensor_leak(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self._tensor_cache = None
def forward(self, x):
if self._tensor_cache is None:
self._tensor_cache = x + 2
return self._tensor_cache.sum() + x.sum()
class Foo(torch.nn.Module):
def __init__(self, bar):
super().__init__()
self.bar = bar
def forward(self, x):
return self.bar(x)
foo = Foo(Bar())
_ = export(foo, (torch.ones(4, 4),), strict=False)
self.assertTrue(foo.bar._tensor_cache is None)
def test_export_leak_compile(self):
class BaseModule(torch.nn.Module):
def forward(self, *args, **kwargs):
raise NotImplementedError
class CacheModule(BaseModule):
def __init__(self, cache: torch.Tensor):
super().__init__()
assert cache.ndim == 3
self.cache = torch.nn.Parameter(cache, requires_grad=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
n_tokens = x.size(1)
rolled_cache = torch.roll(self.cache.data, -n_tokens, dims=1)
rolled_cache[:, -n_tokens:, :] = x
self.cache.data = rolled_cache
return self.cache
class LinearBlock(torch.nn.Module):
def __init__(self, in_features, out_features, activation=None):
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features)
self.activation = activation
def forward(self, x):
x = self.linear(x)
return self.activation(x) if self.activation else x
class MyModel(BaseModule):
def __init__(self):
super().__init__()
default_cache = torch.zeros(1, 10, 5)
self.cache_layer = CacheModule(default_cache)
self.fc1 = LinearBlock(5, 10, activation=torch.nn.ReLU())
self.fc2 = LinearBlock(10, 5)
def forward(self, x):
cached = self.cache_layer(x)
out = self.fc1(cached)
out = self.fc2(out)
return out
with self.assertRaisesRegex(
RuntimeError,
"We found a fake tensor in the exported program constant's list. "
"This typically means our tracing system encountered an op that we can't trace through. "
"For the potential source, you can refer to following model attribute: cache_layer.lifted_tensor_0. "
"Please file an issue on github.",
):
_ = export(MyModel(), (torch.randn(1, 3, 5),), strict=False)
with self.assertWarnsRegex(
UserWarning,
"We found a fake tensor in the exported program constant's list. "
"This typically means our tracing system encountered an op that we can't trace through. "
"For the potential source, you can refer to following model attribute: cache_layer.lifted_tensor_0. "
"Please file an issue on github.",
):
# can't trigger all variant of export because later on it will crash
# and it is good because we warned :).
with torch._export.config.patch(error_on_lifted_constant_tensors=False):
_ = torch.export.export(
MyModel(), (torch.randn(1, 3, 5),), strict=False
)
def test_inline_script_class_method(self):
class M(torch.nn.Module):
@staticmethod
@torch.jit.script
def _forward(x: torch.Tensor):
if torch.jit.is_scripting():
return x.cos()
return x.sin()
def forward(self, x: torch.Tensor):
return M._forward(x)
x = torch.randn(3, 4)
ep = torch.export.export(M(), (x,))
FileCheck().check_count("torch.ops.aten.sin", 1, exactly=True).run(
str(ep.graph)
)
FileCheck().check_count("torch.ops.aten.cos", 0, exactly=True).run(
str(ep.graph)
)
res = ep.module()(x)
# We're inlining the original _forward function
# instead of the scripted function, so we get x.sin()
self.assertEqual(res, x.sin())
def test_tag_ac_export(self):
ops_to_save = [torch.ops.aten.addmm.default]
def policy_fn(ctx, op, *args, **wargs):
if op in ops_to_save:
return torch.utils.checkpoint.CheckpointPolicy.MUST_SAVE
else:
return torch.utils.checkpoint.CheckpointPolicy.PREFER_RECOMPUTE
context_fn = functools.partial(
torch.utils.checkpoint.create_selective_checkpoint_contexts, policy_fn
)
class Block(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(128, 128)
self.relu = torch.nn.ReLU()
self.linear2 = torch.nn.Linear(128, 128)
def forward(self, x):
return self.linear2(self.relu(self.linear1(x)))
# Wrap the block with checkpointing
class CheckpointedBlock(torch.nn.Module):
def __init__(self):
super().__init__()
self.block = Block()
def forward(self, x):
return torch.utils.checkpoint.checkpoint(
self.block, x, context_fn=context_fn
)
model = CheckpointedBlock()
x = torch.randn(16, 128, requires_grad=True)
ep = torch.export.export(model, (x,), strict=True)
self.assertExpectedInline(
str(ep.graph).strip(),
"""\
graph():
%p_block_linear1_weight : [num_users=1] = placeholder[target=p_block_linear1_weight]
%p_block_linear1_bias : [num_users=1] = placeholder[target=p_block_linear1_bias]
%p_block_linear2_weight : [num_users=1] = placeholder[target=p_block_linear2_weight]
%p_block_linear2_bias : [num_users=1] = placeholder[target=p_block_linear2_bias]
%x : [num_users=1] = placeholder[target=x]
%wrap_body0 : [num_users=1] = get_attr[target=wrap_body0]
%tag_activation_checkpoint : [num_users=7] = call_function[target=torch.ops.higher_order.tag_activation_checkpoint](args = (%wrap_body0, %x, %p_block_linear1_weight, %p_block_linear1_bias, %p_block_linear2_weight, %p_block_linear2_bias), kwargs = {})
%getitem : [num_users=1] = call_function[target=operator.getitem](args = (%tag_activation_checkpoint, 0), kwargs = {})
%getitem_1 : [num_users=0] = call_function[target=operator.getitem](args = (%tag_activation_checkpoint, 1), kwargs = {})
%getitem_2 : [num_users=0] = call_function[target=operator.getitem](args = (%tag_activation_checkpoint, 2), kwargs = {})
%getitem_3 : [num_users=0] = call_function[target=operator.getitem](args = (%tag_activation_checkpoint, 3), kwargs = {})
%getitem_4 : [num_users=0] = call_function[target=operator.getitem](args = (%tag_activation_checkpoint, 4), kwargs = {})
%getitem_5 : [num_users=0] = call_function[target=operator.getitem](args = (%tag_activation_checkpoint, 5), kwargs = {})
%getitem_6 : [num_users=0] = call_function[target=operator.getitem](args = (%tag_activation_checkpoint, 6), kwargs = {})
return (getitem,)""",
)
self.assertExpectedInline(
str(ep.graph_module.wrap_body0.graph).strip(),
"""\
graph():
%arg0_1 : [num_users=1] = placeholder[target=arg0_1]
%arg1_1 : [num_users=2] = placeholder[target=arg1_1]
%arg2_1 : [num_users=2] = placeholder[target=arg2_1]
%arg3_1 : [num_users=2] = placeholder[target=arg3_1]
%arg4_1 : [num_users=2] = placeholder[target=arg4_1]
%linear : [num_users=2] = call_function[target=torch.ops.aten.linear.default](args = (%arg0_1, %arg1_1, %arg2_1), kwargs = {})
%relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%linear,), kwargs = {})
%linear_1 : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%relu, %arg3_1, %arg4_1), kwargs = {})
return (linear_1, arg1_1, arg2_1, linear, relu, arg3_1, arg4_1)""",
)
stack = contextlib.ExitStack()
with stack:
jwd = aot_export_joint_with_descriptors(stack, ep.module(), (x,))
for node in jwd.graph_module.graph.nodes:
if "recompute" in node.meta:
actual = node.meta["recompute"]
expected = policy_fn(None, node.target, None, None)
self.assertEqual(actual, expected)
self.assertExpectedInline(
str(jwd.graph_module.code).strip(),
"""\
def forward(self, primals, tangents):
primals_1, primals_2, primals_3, primals_4, primals_5, tangents_1, = fx_pytree.tree_flatten_spec([primals, tangents], self._in_spec)
t = torch.ops.aten.t.default(primals_1); primals_1 = None
addmm = torch.ops.aten.addmm.default(primals_2, primals_5, t); primals_2 = None
relu = torch.ops.aten.relu.default(addmm); addmm = None
detach_3 = torch.ops.aten.detach.default(relu)
t_1 = torch.ops.aten.t.default(primals_3); primals_3 = None
addmm_1 = torch.ops.aten.addmm.default(primals_4, relu, t_1); primals_4 = None
t_2 = torch.ops.aten.t.default(t_1); t_1 = None
mm = torch.ops.aten.mm.default(tangents_1, t_2); t_2 = None
t_3 = torch.ops.aten.t.default(tangents_1)
mm_1 = torch.ops.aten.mm.default(t_3, relu); t_3 = relu = None
t_4 = torch.ops.aten.t.default(mm_1); mm_1 = None
sum_1 = torch.ops.aten.sum.dim_IntList(tangents_1, [0], True); tangents_1 = None
view = torch.ops.aten.view.default(sum_1, [128]); sum_1 = None
t_5 = torch.ops.aten.t.default(t_4); t_4 = None
detach_6 = torch.ops.aten.detach.default(detach_3); detach_3 = None
threshold_backward = torch.ops.aten.threshold_backward.default(mm, detach_6, 0); mm = detach_6 = None
t_6 = torch.ops.aten.t.default(t); t = None
mm_2 = torch.ops.aten.mm.default(threshold_backward, t_6); t_6 = None
t_7 = torch.ops.aten.t.default(threshold_backward)
mm_3 = torch.ops.aten.mm.default(t_7, primals_5); t_7 = primals_5 = None
t_8 = torch.ops.aten.t.default(mm_3); mm_3 = None
sum_2 = torch.ops.aten.sum.dim_IntList(threshold_backward, [0], True); threshold_backward = None
view_1 = torch.ops.aten.view.default(sum_2, [128]); sum_2 = None
t_9 = torch.ops.aten.t.default(t_8); t_8 = None
return pytree.tree_unflatten([addmm_1, t_9, view_1, t_5, view, mm_2], self._out_spec)""",
)
def test_inline_script_class_method_recursive(self):
f = 0.4
i = 2
s = "foo"
@torch.jit.script
def _inner(x: torch.Tensor, y: torch.Tensor, f: float, i: int, s_len: int):
return x * y * f * i * s_len
class M(torch.nn.Module):
@staticmethod
@torch.jit.script
def _forward(x: torch.Tensor, y: torch.Tensor, f: float, i: int, s: str):
if torch.jit.is_scripting():
return _inner(x.cos(), y.cos(), f, i, len(s))
return _inner(x.sin(), y.sin(), f, i, len(s))
def forward(self, x: torch.Tensor):
return M._forward(x, y=x, f=f, i=i, s=s)
x = torch.randn(3, 4)
ep = torch.export.export(M(), (x,))
FileCheck().check_count("torch.ops.aten.sin", 2, exactly=True).run(
str(ep.graph)
)
FileCheck().check_count("torch.ops.aten.cos", 0, exactly=True).run(
str(ep.graph)
)
res = ep.module()(x)
# We're inlining the original _forward function
# instead of the scripted function, so we get x.sin()
self.assertEqual(res, _inner(x.sin(), x.sin(), f, i, len(s)))
def test_inline_script_method(self):
class M(torch.jit.ScriptModule):
@torch.jit.script_method
def _forward(self, x: torch.Tensor):
if torch.jit.is_scripting():
return x.cos()
return x.sin()
def forward(self, x):
return self._forward(x)
class Wrapped(torch.nn.Module):
def __init__(self, mod):
super().__init__()
self.mod = mod
def forward(self, x):
return self.mod(x)
x = torch.randn(3, 4)
ep = torch.export.export(Wrapped(M()), (x,))
FileCheck().check_count("torch.ops.aten.sin", 1, exactly=True).run(
str(ep.graph)
)
FileCheck().check_count("torch.ops.aten.cos", 0, exactly=True).run(
str(ep.graph)
)
res = ep.module()(x)
# We're inlining the original _forward function
# instead of the scripted function, so we get x.sin()
self.assertEqual(res, x.sin())
@testing.expectedFailureStrictV2
def test_no_tensor_computation_2(self):
class Module(torch.nn.Module):
def forward(self, x, y):
return x
f = Module()
inp = (torch.randn(3), 1)
ep = export(f, inp)
self.assertEqual(ep.module()(*inp), f(*inp))
self.assertExpectedInline(
str(ep.graph).strip(),
"""\
graph():
%x : [num_users=1] = placeholder[target=x]
%y : [num_users=0] = placeholder[target=y]
return (x,)""",
)
@testing.expectedFailureStrictV2
def test_no_tensor_computation_3(self):
class Module(torch.nn.Module):
def forward(self, x, y):
return 5
f = Module()
inp = (2, 1)
ep = export(f, inp)
self.assertEqual(ep.module()(*inp), f(*inp))
self.assertExpectedInline(
str(ep.graph).strip(),
"""\
graph():
%x : [num_users=0] = placeholder[target=x]
%y : [num_users=0] = placeholder[target=y]
return (5,)""",
)
@testing.expectedFailureStrictV2
def test_no_tensor_computation_4(self):
class Module(torch.nn.Module):
def forward(self, x, y):
return x
f = Module()
inp = ([torch.randn(3)], 1)
ep = export(f, inp)
self.assertEqual(ep.module()(*inp), f(*inp))
self.assertExpectedInline(
str(ep.graph).strip(),
"""\
graph():
%x_0 : [num_users=1] = placeholder[target=x_0]
%y : [num_users=0] = placeholder[target=y]
return (x_0,)""",
)
def test_not_registered_parameter(self):
class Basic(torch.nn.Module):
def __init__(self):
super().__init__()
self.params = {"foo": torch.nn.Parameter(torch.ones(3, 3))}
def forward(self, x):
return x + self.params["foo"]
f = Basic()
args = (torch.randn(1, 3),)
# strict-mode will error out because foo is registered as parameter
# in dynamo (a behavior that's different from eager). We decided to
# follow eager behavior.
ep = export(f, args, strict=False)
gm = ep.module()
self.assertEqual(len(ep.graph_signature.lifted_tensor_constants), 1)
self.assertEqual(len(ep.graph_signature.parameters), 0)
# check foo is not a parameter in the final graph
self.assertEqual(len(list(gm.named_parameters())), 0)
self.assertEqual(gm(*args), f(*args))
self.assertExpectedInline(
str(gm.graph).strip(),
"""\
graph():
%lifted_tensor_0 : [num_users=1] = get_attr[target=lifted_tensor_0]
%x : [num_users=2] = placeholder[target=x]
%_guards_fn : [num_users=0] = call_module[target=_guards_fn](args = (%x,), kwargs = {})
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, %lifted_tensor_0), kwargs = {})
return (add,)""",
)
def test_int_shape_specialization(self):
class M(torch.nn.Module):
def forward(self, x):
ori_size = (
int(x.shape[-2] / 1),
int(x.shape[-1] / 1),
)
x = F.interpolate(x, size=ori_size, mode="bilinear")
return x
input1 = (torch.rand(1, 3, 28, 28),)
input2 = (torch.rand(1, 3, 56, 56),)
inputs = [input1, input2]
model = M()
dynamic_shapes = {
"x": {2: torch.export.Dim.DYNAMIC, 3: torch.export.Dim.DYNAMIC},
}
with self.assertRaisesRegex(
(
torch.fx.experimental.symbolic_shapes.ConstraintViolationError,
torch._dynamo.exc.UserError,
),
(
r"your code specialized it to be a constant \(28\)(.*\n)*.*"
r"your code specialized it to be a constant \(28\)(.*\n)*.*"
),
):
export(model, input1, dynamic_shapes=dynamic_shapes, strict=False)
def test_external_call_non_strict_real_tensor(self):
class ExternalMethod:
def add(self, x):
return x + x
class Basic(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.external_add = ExternalMethod().add
def forward(self, x):
return self.external_add(x)
f = Basic()
args = (torch.randn(1, 3),)
ep = export(f, args, strict=False)
self.assertEqual(ep.module()(*args), f(*args))
def test_export_statically_known_true(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
shape = y.shape[0] ** 2 - 3 * y.shape[0]
end = shape
return x[:, :end]
dynamic_shapes = (
(torch.export.Dim.DYNAMIC, torch.export.Dim.DYNAMIC),
(torch.export.Dim.DYNAMIC, torch.export.Dim.DYNAMIC),
)
m = Foo()
inp = (torch.randn(4, 4), torch.randn(4, 4))
ep = export(
m,
inp,
dynamic_shapes=dynamic_shapes,
strict=False,
)
self.assertTrue(torch.allclose(ep.module()(*inp), m(*inp)))
FileCheck().check_count("torch.ops.aten.slice.Tensor", 1, exactly=True).run(
str(ep.graph)
)
FileCheck().check_count("operator.sub", 1, exactly=True).run(str(ep.graph))
def test_colon_parameter(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.register_parameter("foo:bar", torch.nn.Parameter(torch.ones(3, 3)))
def forward(self, x):
return x + getattr(self, "foo:bar")
ep = export(M(), (torch.randn(3, 3),))
x = torch.randn(3, 3)
self.assertEqual(ep.module()(x), M()(x))
def test_conv_dynamic(self):
# Simple module for demonstration
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=3, out_channels=32, kernel_size=3, padding=1
)
self.relu = torch.nn.ReLU()
self.maxpool = torch.nn.MaxPool2d(kernel_size=3)
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
a = self.conv(x)
a.add_(y)
return self.maxpool(self.relu(a))
example_args = (torch.randn(2, 3, 256, 256), torch.ones(2, 32, 256, 256))
dynamic_shapes = {"x": {0: Dim("batch")}, "y": {0: Dim("batch")}}
m = M()
exported_program: torch.export.ExportedProgram = export(
m, args=example_args, dynamic_shapes=dynamic_shapes
)
args = (torch.randn(17, 3, 256, 256), torch.ones(17, 32, 256, 256))
self.assertEqual(exported_program.module()(*args), m(*args))
args = (torch.randn(15, 3, 256, 256), torch.ones(15, 32, 256, 256))
self.assertEqual(exported_program.module()(*args), m(*args))
gm: torch.fx.GraphModule = torch.export.export(
m, args=example_args, dynamic_shapes=dynamic_shapes
).module()
args = (torch.randn(17, 3, 256, 256), torch.ones(17, 32, 256, 256))
self.assertEqual(gm(*args), m(*args))
args = (torch.randn(15, 3, 256, 256), torch.ones(15, 32, 256, 256))
self.assertEqual(gm(*args), m(*args))
# stride() is called for an undefined tensor
@testing.expectedFailureCppRuntimeNonStrict
def test_native_multi_attention_head(self):
embed_dim = 64
num_heads = 4
bs = 16
sl = 8
device = "cpu"
q = 6 * torch.rand(bs, sl, embed_dim, device=device, dtype=torch.float32) - 3
k = q
v = q
qkv = torch.nn.Linear(
embed_dim, 3 * embed_dim, device=device, dtype=torch.float32
)
proj = torch.nn.Linear(embed_dim, embed_dim, device=device, dtype=torch.float32)
class NativeMHA(torch.nn.Module):
def __init__(
self,
embed_dim,
num_heads,
qkv,
proj,
need_weights,
average_attn_weights,
mask_type,
):
super().__init__()
self.qkv = qkv
self.proj = proj
self.embed_dim = embed_dim
self.num_heads = num_heads
self.need_weights = need_weights
self.average_attn_weights = average_attn_weights
self.mask_type = mask_type
def forward(self, q, k, v, key_padding_mask):
return torch._native_multi_head_attention(
q,
k,
v,
self.embed_dim,
self.num_heads,
self.qkv.weight,
self.qkv.bias,
self.proj.weight,
self.proj.bias,
key_padding_mask,
need_weights=False,
average_attn_weights=False,
mask_type=1, # mask_type = 1 => src_key_padding_mask, mask_type = 0 => src_mask
)
for mask_type in (0, 1):
for need_weights in (True, False):
for average_attn_weights in (True, False):
npt = NativeMHA(
embed_dim=embed_dim,
num_heads=num_heads,
qkv=qkv,
proj=proj,
need_weights=need_weights,
average_attn_weights=average_attn_weights,
mask_type=mask_type,
)
sample_input = (q, k, v, None)
ep = export(
npt,
args=sample_input,
dynamic_shapes={
"q": {
0: Dim("dim0_q", max=1024),
},
"k": {
0: Dim("dim0_k", max=1024),
},
"v": {
0: Dim("dim0_v", max=1024),
},
"key_padding_mask": None,
},
)
self.assertEqual(ep.module()(*sample_input), npt(*sample_input))
def test_unused_constant(self):
class M(torch.nn.Module):
def forward(self, x):
y = torch.tensor(3)
return x * x
ep = export(M(), (torch.ones(3),))
self.assertEqual(len(ep.constants), 0)
class M(torch.nn.Module):
def __init__(self, num_features: int = 1) -> None:
super().__init__()
self.num_features = num_features
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
res = [torch.Tensor([])] * self.num_features
for i in range(self.num_features):
res[i] = x * (i + 1)
return res
inp = torch.ones(3)
ep = export(M(), (inp,))
self.assertEqual(len(ep.constants), 0)
unf = unflatten(ep)
self.assertTrue(torch.allclose(M()(inp)[0], unf(inp)[0]))
def test_unbacked_bincount(self):
class Foo(torch.nn.Module):
def forward(self, xs):
u0, u1 = xs.tolist()
x = torch.ones(u0, dtype=torch.int64)
y = torch.bincount(x, minlength=u1)
return y
m = Foo()
x = torch.tensor([20, 10])
ep = export(m, (x,))
self.assertTrue(torch.allclose(ep.module()(x), m(x)))
y = torch.tensor([5, 10])
self.assertTrue(torch.allclose(ep.module()(y), m(y)))
@requires_gpu
def test_export_custom_triton_kernel(self):
@triton.jit
def add_kernel(
in_ptr0,
in_ptr1,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x + y
tl.store(out_ptr + offsets, output, mask=mask)
@torch.library.triton_op("mylib::add", mutates_args=())
def custom_add(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
output = torch.empty_like(x)
n_elements = output.numel()
def grid(meta):
return (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
capture_triton(add_kernel)[grid](x, y, output, n_elements, 16)
return output
class M(torch.nn.Module):
def forward(self, x, y):
return custom_add(x, y)
args = (
torch.randn(3, device=GPU_TYPE),
torch.randn(3, device=GPU_TYPE),
)
max_len = 128
dynamic_shapes = {
"x": {0: Dim("dim0_x", max=max_len)},
"y": {0: Dim("dim0_y", max=max_len)},
}
m = M()
ep = export(m, args, dynamic_shapes=dynamic_shapes)
FileCheck().check_count("torch.ops.mylib.add", 1, exactly=True).run(
ep.graph_module.code
)
ep_decomposed = ep.run_decompositions(decompose_custom_triton_ops=False)
FileCheck().check_count("torch.ops.mylib.add", 1, exactly=True).run(
ep.graph_module.code
)
ep_decomposed = ep.run_decompositions(decompose_custom_triton_ops=True)
FileCheck().check_count(
"torch.ops.higher_order.triton_kernel_wrapper_functional", 1, exactly=True
).run(ep_decomposed.graph_module.code)
exp_out = m(*args)
self.assertEqual(exp_out, ep.module()(*args))
@requires_gpu
def test_export_custom_triton_kernel_mutable(self):
@triton.jit
def add_kernel(
in_ptr0,
in_ptr1,
out_ptr,
n_elements,
BLOCK_SIZE: "tl.constexpr",
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(in_ptr0 + offsets, mask=mask)
y = tl.load(in_ptr1 + offsets, mask=mask)
output = x + y
tl.store(out_ptr + offsets, output, mask=mask)
@torch.library.triton_op("mylib::add", mutates_args={"output"})
def custom_add_out(
x: torch.Tensor, y: torch.Tensor, output: torch.Tensor
) -> torch.Tensor:
n_elements = output.numel()
def grid(meta):
return (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
capture_triton(add_kernel)[grid](x, y, output, n_elements, 16)
return output.clone()
class M(torch.nn.Module):
def forward(self, x, y, out):
return custom_add_out(x, y, out)
args = (
torch.randn(3, device=GPU_TYPE),
torch.randn(3, device=GPU_TYPE),
torch.zeros(3, device=GPU_TYPE),
)
custom_add_out(*args)
max_len = 128
dynamic_shapes = {
"x": {0: Dim("dim0_x", max=max_len)},
"y": {0: Dim("dim0_y", max=max_len)},
"out": {0: Dim("dim0_z", max=max_len)},
}
m = M()
ep = export(m, args, dynamic_shapes=dynamic_shapes)
FileCheck().check_count("torch.ops.mylib.add", 1, exactly=True).run(
ep.graph_module.code
)
ep_decomposed = ep.run_decompositions(decompose_custom_triton_ops=False)
FileCheck().check_count(
"torch.ops.higher_order.auto_functionalized", 1, exactly=True
).run(ep_decomposed.graph_module.code)
ep_decomposed = ep.run_decompositions(decompose_custom_triton_ops=True)
if is_training_ir_test(self._testMethodName):
# TODO: For training IR test, we functionalize the custom triton op with auto_functionalized.
# The custom op's functional decomposition is not triggered as a result. It might be better to
# decompose the custom triton ops. Users can workaround by unwrapping auto_functionalized
# in order to get the functional triton hop if needed.
FileCheck().check_count(
"torch.ops.higher_order.auto_functionalized", 1, exactly=True
).run(ep_decomposed.graph_module.code)
else:
FileCheck().check_count(
"torch.ops.higher_order.triton_kernel_wrapper_functional",
1,
exactly=True,
).run(ep_decomposed.graph_module.code)
x, y, out = (
torch.randn(3, device=GPU_TYPE),
torch.randn(3, device=GPU_TYPE),
torch.zeros(3, device=GPU_TYPE),
)
exp_out = m(x, y, out)
out_copy = out.clone()
out_copy2 = out.clone()
out_copy3 = out.clone()
self.assertEqual(exp_out, ep.module()(x, y, out_copy))
# For non-functional graph module, out_copy is mutated
self.assertEqual(out, out_copy)
self.assertEqual(exp_out, ep_decomposed.module()(x, y, out_copy2))
# For non-functional graph module, out_copy is not mutated
self.assertEqual(out_copy2, out_copy3)
def test_masked_select_dynamic(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
mask = x.ge(0.5)
return torch.masked_select(x, mask)
example_args = (torch.randn(3, 4, 5),)
dim0_x_max, dim1_x_max = 100, 7
dynamic_shapes = {
"x": {
0: Dim("dim0_x", max=dim0_x_max),
1: Dim("dim1_x_max", max=dim1_x_max),
}
}
m = M()
exported_program: torch.export.ExportedProgram = export(
m, args=example_args, dynamic_shapes=dynamic_shapes
)
# Test that the expected upper bound is among the range constraints.
expected_upper_bound = dim0_x_max * dim1_x_max * 5
vr_upper_bounds = [
vr.upper for vr in exported_program.range_constraints.values()
]
self.assertTrue(expected_upper_bound in set(vr_upper_bounds))
# Test that none of the upper bounds are larger.
for vr_upper in vr_upper_bounds:
self.assertTrue(vr_upper <= expected_upper_bound)
def test_nonzero_dynamic(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x: torch.Tensor, as_tuple: bool) -> torch.Tensor:
return torch.nonzero(x, as_tuple=as_tuple)
# Case 1 and 2: as_tuple is True and as_tuple is False.
for as_tuple in [True, False]:
example_args = (torch.randn(3, 4, 5), as_tuple)
dim0_x_max, dim1_x_max = 100, 7
dynamic_shapes = {
"x": {
0: Dim("dim0_x", max=dim0_x_max),
1: Dim("dim1_x_max", max=dim1_x_max),
},
"as_tuple": None,
}
m = M()
exported_program: torch.export.ExportedProgram = export(
m, args=example_args, dynamic_shapes=dynamic_shapes
)
# Test that the expected upper bound is among the range constraints.
expected_upper_bound = dim0_x_max * dim1_x_max * 5
vr_upper_bounds = [
vr.upper for vr in exported_program.range_constraints.values()
]
self.assertTrue(expected_upper_bound in set(vr_upper_bounds))
# Test that none of the upper bounds are larger.
for vr_upper in vr_upper_bounds:
self.assertTrue(vr_upper <= expected_upper_bound)
# Case 3: Test special case when input has zero dimensions and a nonzero
# scalar value.
example_args = (torch.tensor(10), as_tuple)
dim0_x_max = 100
dynamic_shapes = {
"x": None,
"as_tuple": None,
}
m = M()
exported_program: torch.export.ExportedProgram = export(
m, args=example_args, dynamic_shapes=dynamic_shapes
)
# Test that the expected upper bound is equal to 1, since our output
# for this edge case should always be a tensor of size 1.
vr_upper_bounds = [
vr.upper for vr in exported_program.range_constraints.values()
]
for vr_upper in vr_upper_bounds:
self.assertEqual(vr_upper, 1)
@testing.expectedFailureStrictV2
def test_detect_leak_strict(self):
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y
global_list = []
class ReferenceControl:
def __init__(self, mod):
self.bank = []
self.bank_dict = {}
self.mod = mod
def hacked_up_forward(self_, x, y):
self.bank.append(x.clone())
self.bank_dict["x"] = x.clone()
global_list.append(x.clone())
return x + y
self.mod.forward = hacked_up_forward.__get__(self.mod, Foo)
def __call__(self, x, y):
ep = export(self.mod, (x, y), strict=True).module()
out = ep(x, y)
return out
def update(self):
print(self.bank)
foo = Foo()
ref = ReferenceControl(foo)
# TODO (tmanlaibaatar) this kinda sucks but today there is no good way to get
# good source name. We should have an util that post processes dynamo source names
# to be more readable.
with self.assertWarnsRegex(
UserWarning,
r"(L\['self']\._modules\['_export_root']\.forward\.__func__\.__closure__\[1\]\.cell_contents\.bank"
r"|L\['self']\._modules\['_export_root']\.forward\.__func__\.__closure__\[1\]\.cell_contents\.bank_dict"
r"|L\['self']\._modules\['_export_root']\.forward\.__func__\.__closure__\[0\]\.cell_contents)",
):
ref(torch.randn(4, 4), torch.randn(4, 4))
def test_mask_nonzero_static(self):
class TestModule(torch.nn.Module):
def forward(self, seq_embeddings, mask, exp):
# Instead of `output = seq_embeddings[mask]`` which makes
# output.shape have unbacked symint, encode side knowledge of
# output.shape as exp.shape to force it to have backed symint
index = torch.nonzero_static(mask, size=exp.shape[0])
chunked_index = index.chunk(chunks=mask.dim(), dim=1)
output = seq_embeddings[chunked_index].squeeze()
final_output = output * 2
return final_output
m = TestModule()
seq_embeddings = torch.randn(5, 5)
mask = torch.ones(5, 5, dtype=torch.bool)
exp = torch.randn(25)
output = m(seq_embeddings, mask, exp)
batch = torch.export.Dim("batch")
exp_size = torch.export.Dim("exp_size", max=100)
ep = export(
m,
(seq_embeddings, mask, exp),
dynamic_shapes={
"seq_embeddings": (batch, None),
"mask": (batch, None),
"exp": (exp_size,),
},
)
ep_output = ep.module()(seq_embeddings, mask, exp)
self.assertTrue(torch.allclose(output, ep_output))
seq_embeddings = torch.randn(6, 5)
mask = torch.ones(6, 5, dtype=torch.bool)
exp = torch.randn(30)
output = m(seq_embeddings, mask, exp)
ep_output = ep.module()(seq_embeddings, mask, exp)
self.assertTrue(torch.allclose(output, ep_output))
def test_setgrad_lifted_tensor(self):
class M(torch.nn.Module):
def forward(self, x, y):
with torch.enable_grad():
c = torch.tensor(4)
z = c + x + y
return z * z
m = M()
x = torch.randn(4)
y = torch.randn(4)
# Need to surround export with no_grad to bypass AutogradStateOpsFailSafeguard.
with torch.no_grad():
ep = export(m, (x, y))
self.assertEqual(ep.module()(x, y), m(x, y))
def test_subclass_context(self):
class Foo(torch.nn.Module):
def forward(self, x):
return x + 1
input = TwoTensor(
TwoTensor(torch.randn(4, 4), torch.rand(4, 4)),
TwoTensor(torch.randn(4, 4), torch.rand(4, 4)),
)
input_test = TwoTensor(
TwoTensor(torch.randn(6, 6), torch.rand(6, 6)),
TwoTensor(torch.randn(6, 6), torch.rand(6, 6)),
)
for strict in [True, False]:
dim = torch.export.ShapesCollection()
dim[input] = [Dim.STATIC, Dim.AUTO]
ep = torch.export.export(Foo(), (input,), strict=strict, dynamic_shapes=dim)
self.assertExpectedInline(
str(ep.graph).strip(),
"""\
graph():
%x : [num_users=1] = placeholder[target=x]
%add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%x, 1), kwargs = {})
return (add,)""",
)
with self.assertRaisesRegex(
AssertionError, escape("Guard failed: x.size()[0] == 4")
):
ep.module()(input_test)
def test_basic_non_strict_real_tensor(self):
class Basic(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.randn(1, 3))
def forward(self, x, y):
return x[0] + y - self.param
f = Basic()
args = ([torch.randn(1, 3)], torch.randn(1, 3))
ep = export(f, args, strict=False)
self.assertEqual(ep.module()(*args), f(*args))
def test_where_decomp(self):
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.ops.aten.where.default(x > 0)
test_module = TestModule()
sample_input = (torch.randn(2, 3),)
def auto_dynamic_shapes_from_args(args): # pyre-ignore
"""
This function creates dynamic shapes specification with Dim.AUTO
in all dimensions of all tensors for given argument list.
"""
if isinstance(args, list):
return [auto_dynamic_shapes_from_args(arg) for arg in args]
elif isinstance(args, tuple):
return tuple(auto_dynamic_shapes_from_args(arg) for arg in args)
elif isinstance(args, dict):
return {k: auto_dynamic_shapes_from_args(v) for k, v in args.items()}
elif isinstance(args, torch.Tensor):
return {j: Dim.AUTO for j in range(args.dim())}
else:
print(f"args type: {type(args)}")
return None
ep = torch.export.export(
test_module,
sample_input,
dynamic_shapes=auto_dynamic_shapes_from_args(sample_input),
).run_decompositions({})
def test_basic_non_strict_fake_tensor(self):
class Basic(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.randn(3, 2))
def forward(self, x, y):
return x[0] + y - self.param
fake_mode = FakeTensorMode(shape_env=ShapeEnv(tracked_fakes=[]))
f = Basic()
with fake_mode:
args = ([torch.empty(3, 2)], torch.empty(3, 2))
ep = export(f, args, strict=False)
inputs = ([torch.randn(3, 2)], torch.randn(3, 2))
self.assertEqual(ep.module()(*inputs), f(*inputs))
def test_non_strict_dynamic_shapes(self):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.u = torch.nn.Buffer(torch.ones(1))
self.v = torch.nn.Buffer(torch.ones(1))
def forward(self, x, ys, zs, c):
y = ys[0] + ys[1] + zs["a"] + zs["b"]
self.v.add_(3)
w = self.u - self.v
if x.shape[0] < 3 and c.shape[0] != 4:
return x + w, x + y
else:
return x - w, x - y
foo = Foo()
inp = (
torch.ones(5),
[torch.zeros(5), torch.ones(5)],
{"a": torch.zeros(5), "b": torch.ones(5)},
torch.ones(4),
)
dim = torch.export.Dim("dim", min=3)
dynamic_shapes = (
{0: dim},
[{0: dim}, {0: dim}],
{"a": {0: dim}, "b": {0: dim}},
None,
)
ep_ns = torch.export.export(
foo, inp, dynamic_shapes=dynamic_shapes, strict=False
)
bad_runtime_inp1 = (
torch.ones(6),
[torch.zeros(5), torch.ones(5)],
{"a": torch.zeros(5), "b": torch.ones(5)},
torch.ones(4),
)
with self.assertRaisesRegex(
AssertionError,
escape("Guard failed: ys[0].size()[0] == x.size()[0]"),
):
# expected 6, but got 5
ep_ns.module()(*bad_runtime_inp1)
bad_runtime_inp2 = (
torch.ones(5),
[torch.zeros(5), torch.ones(5)],
{"a": torch.zeros(5), "b": torch.ones(5)},
torch.ones(6),
)
with self.assertRaisesRegex(
AssertionError,
escape("Guard failed: c.size()[0] == 4"),
):
# expected 4, but got 6
ep_ns.module()(*bad_runtime_inp2)
good_runtime_inp = (
torch.ones(7),
[torch.zeros(7), torch.ones(7)],
{"a": torch.zeros(7), "b": torch.ones(7)},
torch.ones(4),
)
ep_ns.module()(*good_runtime_inp)
bad_example_inp = (
torch.ones(2),
[torch.zeros(2), torch.ones(2)],
{"a": torch.zeros(2), "b": torch.ones(2)},
torch.ones(4),
)
with self.assertRaisesRegex(
torch.fx.experimental.symbolic_shapes.ConstraintViolationError,
"2 not in range.*3,",
):
ep_ns = torch.export.export(
foo, bad_example_inp, dynamic_shapes=dynamic_shapes, strict=False
)
def test_non_strict_dynamic_shapes_suggested_fixes(self):
class Foo(torch.nn.Module):
def forward(self, x, c):
if x.shape[0] <= 6:
return x + 1, c + 2
else:
return x - 1, c - 2
foo = Foo()
bad_example_inp = (
torch.ones(5),
torch.ones(4),
)
dim = torch.export.Dim("dim", min=3)
dynamic_shapes = (
{0: dim},
None,
)
with self.assertRaisesRegex(
torch._dynamo.exc.UserError,
"Constraints violated \\(dim\\)!(.*\n)*.*"
"Not all values of dim.*satisfy the generated guard(.*\n)*.*"
"Suggested fixes:(.*\n)*.*"
"dim = Dim\\('dim', min=3, max=6\\)",
):
torch.export.export(
foo, bad_example_inp, dynamic_shapes=dynamic_shapes, strict=False
)
def test_symint_item(self):
class M(torch.nn.Module):
def forward(self, tensor):
return tensor.item()
input = (torch.tensor([1], dtype=torch.int),)
orig_res = M()(*input)
ep_res = torch.export.export(M(), input).module()(*input)
self.assertEqual(orig_res, ep_res)
def test_symbool_item(self):
class M(torch.nn.Module):
def forward(self, tensor):
return tensor.item()
input = (torch.tensor([1], dtype=torch.bool),)
orig_res = M()(*input)
ep_res = torch.export.export(M(), input).module()(*input)
self.assertEqual(orig_res, ep_res)
def test_symfloat_item(self):
class M(torch.nn.Module):
def forward(self, tensor):
return tensor.item()
input = (torch.tensor([3.14], dtype=torch.float),)
orig_res = M()(*input)
ep_res = torch.export.export(M(), input).module()(*input)
self.assertEqual(orig_res, ep_res)
def test_unbacked_to_cond(self):
strict = True
class M(torch.nn.Module):
def forward(self, a):
az = a.nonzero()
def true_fn(x):
return (x + 1).sum()
def false_fn(x):
return (x + 3).sum()
r = torch.cond(az.size(0) > 3, true_fn, false_fn, (az,))
return r * 2
M()(torch.randn(7))
torch.export.export(M(), (torch.randn(7),), strict=strict)
def test_unbacked_to_cond_passthrough(self):
strict = True
class M(torch.nn.Module):
def forward(self, a):
az = a.nonzero()
def true_fn(x):
return x + 1
def false_fn(x):
return x + 3
r = torch.cond(az.size(0) > 3, true_fn, false_fn, (az,))
return r * 2
M()(torch.randn(7))
torch.export.export(M(), (torch.randn(7),), strict=strict)
def test_cond_branches_return_constant_int(self):
if "cpp_runtime_nonstrict" in self.id():
self.skipTest("TODO Unexpected success in OSS but not in fbcode.")
class M(torch.nn.Module):
def forward(self, x):
idx = torch.cond(x.sum() > 3, lambda: 0, lambda: 1, tuple())
return x[idx]
args = (torch.randn(3, 3),)
m = M()
ep = export(M(), args)
if self._testMethodName == "test_cond_branches_return_constant_int":
self.assertExpectedInline(
normalize_gm(ep.module().print_readable(print_output=False)),
"""\
| TestExport |
python | mlflow__mlflow | mlflow/data/dataset.py | {
"start": 175,
"end": 4237
} | class ____:
"""
Represents a dataset for use with MLflow Tracking, including the name, digest (hash),
schema, and profile of the dataset as well as source information (e.g. the S3 bucket or
managed Delta table from which the dataset was derived). Most datasets expose features
and targets for training and evaluation as well.
"""
def __init__(self, source: DatasetSource, name: str | None = None, digest: str | None = None):
"""
Base constructor for a dataset. All subclasses must call this constructor.
"""
self._name = name
self._source = source
# Note: Subclasses should call super() once they've initialized all of
# the class attributes necessary for digest computation
self._digest = digest or self._compute_digest()
@abstractmethod
def _compute_digest(self) -> str:
"""Computes a digest for the dataset. Called if the user doesn't supply
a digest when constructing the dataset.
Returns:
A string digest for the dataset. We recommend a maximum digest length
of 10 characters with an ideal length of 8 characters.
"""
def to_dict(self) -> dict[str, str]:
"""Create config dictionary for the dataset.
Subclasses should override this method to provide additional fields in the config dict,
e.g., schema, profile, etc.
Returns a string dictionary containing the following fields: name, digest, source, source
type.
"""
return {
"name": self.name,
"digest": self.digest,
"source": self.source.to_json(),
"source_type": self.source._get_source_type(),
}
def to_json(self) -> str:
"""
Obtains a JSON string representation of the :py:class:`Dataset
<mlflow.data.dataset.Dataset>`.
Returns:
A JSON string representation of the :py:class:`Dataset <mlflow.data.dataset.Dataset>`.
"""
return json.dumps(self.to_dict())
def _get_source_type(self) -> str:
"""Returns the type of the dataset's underlying source."""
return self.source._get_source_type()
@property
def name(self) -> str:
"""
The name of the dataset, e.g. ``"iris_data"``, ``"myschema.mycatalog.mytable@v1"``, etc.
"""
if self._name is not None:
return self._name
else:
return "dataset"
@property
def digest(self) -> str:
"""
A unique hash or fingerprint of the dataset, e.g. ``"498c7496"``.
"""
return self._digest
@property
def source(self) -> DatasetSource:
"""
Information about the dataset's source, represented as an instance of
:py:class:`DatasetSource <mlflow.data.dataset_source.DatasetSource>`. For example, this
may be the S3 location or the name of the managed Delta Table from which the dataset
was derived.
"""
return self._source
@property
@abstractmethod
def profile(self) -> Any | None:
"""
Optional summary statistics for the dataset, such as the number of rows in a table, the
mean / median / std of each table column, etc.
"""
@property
@abstractmethod
def schema(self) -> Any | None:
"""
Optional dataset schema, such as an instance of :py:class:`mlflow.types.Schema` representing
the features and targets of the dataset.
"""
def _to_mlflow_entity(self) -> DatasetEntity:
"""
Returns:
A `mlflow.entities.Dataset` instance representing the dataset.
"""
dataset_dict = self.to_dict()
return DatasetEntity(
name=dataset_dict["name"],
digest=dataset_dict["digest"],
source_type=dataset_dict["source_type"],
source=dataset_dict["source"],
schema=dataset_dict.get("schema"),
profile=dataset_dict.get("profile"),
)
| Dataset |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/abstractClass7.py | {
"start": 307,
"end": 519
} | class ____(RGB):
def __init__(self, red: int, green: int, blue: int) -> None:
self.rgb = red, green, blue
# This should generate an error because "intensity" is not implemented.
p = Point(1, 2, 3)
| Point |
python | getsentry__sentry | tests/sentry/dashboards/endpoints/test_organization_dashboards.py | {
"start": 737,
"end": 81341
} | class ____(OrganizationDashboardWidgetTestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
self.url = reverse(
"sentry-api-0-organization-dashboards",
kwargs={"organization_id_or_slug": self.organization.slug},
)
self.dashboard_2 = Dashboard.objects.create(
title="Dashboard 2", created_by_id=self.user.id, organization=self.organization
)
DashboardWidget.objects.create(
dashboard=self.dashboard_2,
title="Widget 1",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
)
def assert_equal_dashboards(self, dashboard, data):
assert data["id"] == str(dashboard.id)
assert data["title"] == dashboard.title
assert data["createdBy"]["id"] == str(dashboard.created_by_id)
widgets = self.get_widgets(dashboard.id)
widget_displays = []
for widget in widgets:
widget_displays.append(DashboardWidgetDisplayTypes.get_type_name(widget.display_type))
assert data["widgetDisplay"] == widget_displays
filters = dashboard.get_filters()
if filters and filters.get("projects"):
assert data.get("projects") == filters["projects"]
assert "widgets" not in data
def test_get(self) -> None:
response = self.do_request("get", self.url)
assert response.status_code == 200, response.content
assert len(response.data) == 3
assert "default-overview" == response.data[0]["id"]
self.assert_equal_dashboards(self.dashboard, response.data[1])
self.assert_equal_dashboards(self.dashboard_2, response.data[2])
def test_get_default_overview_has_widget_preview_field(self) -> None:
response = self.do_request("get", self.url)
assert response.status_code == 200, response.content
assert "default-overview" == response.data[0]["id"]
default_overview_data = Dashboard.get_prebuilt(
self.organization, self.user, "default-overview"
)
default_overview = response.data[0]
assert default_overview["widgetPreview"] == [
{"displayType": w["displayType"], "layout": None}
for w in default_overview_data["widgets"]
]
def test_get_with_tombstone(self) -> None:
DashboardTombstone.objects.create(organization=self.organization, slug="default-overview")
response = self.do_request("get", self.url)
assert response.status_code == 200, response.content
assert len(response.data) == 2
assert "default-overview" not in [r["id"] for r in response.data]
def test_get_query(self) -> None:
dashboard = Dashboard.objects.create(
title="Dashboard 11", created_by_id=self.user.id, organization=self.organization
)
response = self.do_request("get", self.url, data={"query": "1"})
assert response.status_code == 200, response.content
assert len(response.data) == 2
self.assert_equal_dashboards(self.dashboard, response.data[0])
self.assert_equal_dashboards(dashboard, response.data[1])
def test_get_query_no_results(self) -> None:
response = self.do_request("get", self.url, data={"query": "not-in-there"})
assert response.status_code == 200, response.content
assert len(response.data) == 0
def test_get_sortby(self) -> None:
Dashboard.objects.create(
title="A", created_by_id=self.user.id, organization=self.organization
)
sort_options = {
"dateCreated": True,
"-dateCreated": False,
"title": True,
"-title": False,
}
for sorting, forward_sort in sort_options.items():
response = self.client.get(self.url, data={"sort": sorting})
assert response.status_code == 200
# Ignoring the prebuilt query (date created is empty)
values = [row[sorting.strip("-")] for row in response.data if row["dateCreated"]]
if not forward_sort:
values = list(reversed(values))
assert list(sorted(values)) == values
def test_get_sortby_most_popular(self) -> None:
Dashboard.objects.create(
title="A",
created_by_id=self.user.id,
organization=self.organization,
visits=3,
last_visited=before_now(minutes=5),
)
for forward_sort in [True, False]:
sorting = "mostPopular" if forward_sort else "-mostPopular"
response = self.client.get(self.url, data={"sort": sorting})
assert response.status_code == 200
values = [row["title"] for row in response.data]
expected = ["A", "Dashboard 2", "Dashboard 1"]
if not forward_sort:
expected = ["Dashboard 2", "Dashboard 1", "A"]
assert values == ["General"] + expected
def test_get_sortby_recently_viewed(self) -> None:
Dashboard.objects.create(
title="A",
created_by_id=self.user.id,
organization=self.organization,
visits=3,
last_visited=before_now(minutes=5),
)
for forward_sort in [True, False]:
sorting = "recentlyViewed" if forward_sort else "-recentlyViewed"
response = self.client.get(self.url, data={"sort": sorting})
assert response.status_code == 200
values = [row["title"] for row in response.data]
expected = ["Dashboard 2", "Dashboard 1", "A"]
if not forward_sort:
expected = list(reversed(expected))
assert values == ["General"] + expected
def test_get_sortby_recently_viewed_user_last_visited(self) -> None:
dashboard_a = Dashboard.objects.create(
title="A",
created_by_id=self.user.id,
organization=self.organization,
)
dashboard_b = Dashboard.objects.create(
title="B",
created_by_id=self.user.id,
organization=self.organization,
)
DashboardLastVisited.objects.create(
dashboard=dashboard_a,
member=OrganizationMember.objects.get(
organization=self.organization, user_id=self.user.id
),
last_visited=before_now(minutes=5),
)
DashboardLastVisited.objects.create(
dashboard=dashboard_b,
member=OrganizationMember.objects.get(
organization=self.organization, user_id=self.user.id
),
last_visited=before_now(minutes=0),
)
for forward_sort in [True, False]:
sorting = "recentlyViewed" if forward_sort else "-recentlyViewed"
with self.feature("organizations:dashboards-starred-reordering"):
response = self.client.get(self.url, data={"sort": sorting})
assert response.status_code == 200
values = [row["title"] for row in response.data]
expected = ["B", "A"]
if not forward_sort:
expected = list(reversed(expected))
# Only A, B are sorted by their last visited entry, Dashboard 1
# and Dashboard 2 are by default sorted by their date created
assert values == ["General"] + expected + ["Dashboard 2", "Dashboard 1"]
def test_get_sortby_mydashboards(self) -> None:
user_1 = self.create_user(username="user_1")
self.create_member(organization=self.organization, user=user_1)
user_2 = self.create_user(username="user_2")
self.create_member(organization=self.organization, user=user_2)
Dashboard.objects.create(title="A", created_by_id=user_1.id, organization=self.organization)
Dashboard.objects.create(title="B", created_by_id=user_2.id, organization=self.organization)
response = self.client.get(self.url, data={"sort": "mydashboards"})
assert response.status_code == 200, response.content
values = [int(row["createdBy"]["id"]) for row in response.data if row["dateCreated"]]
assert values == [self.user.id, self.user.id, user_2.id, user_1.id]
def test_get_sortby_mydashboards_and_recently_viewed(self) -> None:
user_1 = self.create_user(username="user_1")
self.create_member(organization=self.organization, user=user_1)
user_2 = self.create_user(username="user_2")
self.create_member(organization=self.organization, user=user_2)
Dashboard.objects.create(
title="Dashboard 3",
created_by_id=user_1.id,
organization=self.organization,
last_visited=before_now(minutes=5),
)
Dashboard.objects.create(
title="Dashboard 4",
created_by_id=user_2.id,
organization=self.organization,
last_visited=before_now(minutes=0),
)
Dashboard.objects.create(
title="Dashboard 5",
created_by_id=self.user.id,
organization=self.organization,
last_visited=before_now(minutes=5),
)
Dashboard.objects.create(
title="Dashboard 6",
created_by_id=self.user.id,
organization=self.organization,
last_visited=before_now(minutes=0),
)
response = self.client.get(self.url, data={"sort": "myDashboardsAndRecentlyViewed"})
assert response.status_code == 200, response.content
values = [row["title"] for row in response.data if row["dateCreated"]]
assert values == [
"Dashboard 6",
"Dashboard 2",
"Dashboard 1",
"Dashboard 5",
"Dashboard 4",
"Dashboard 3",
]
def test_get_sortby_mydashboards_with_owner_name(self) -> None:
user_1 = self.create_user(username="user_1", name="Cat")
self.create_member(organization=self.organization, user=user_1)
user_2 = self.create_user(username="user_2", name="Pineapple")
self.create_member(organization=self.organization, user=user_2)
user_3 = self.create_user(username="user_3", name="Banana")
self.create_member(organization=self.organization, user=user_3)
user_4 = self.create_user(username="user_4", name="Aapple")
self.create_member(organization=self.organization, user=user_4)
Dashboard.objects.create(title="A", created_by_id=user_1.id, organization=self.organization)
Dashboard.objects.create(title="B", created_by_id=user_2.id, organization=self.organization)
Dashboard.objects.create(title="C", created_by_id=user_3.id, organization=self.organization)
Dashboard.objects.create(title="D", created_by_id=user_4.id, organization=self.organization)
Dashboard.objects.create(title="E", created_by_id=user_2.id, organization=self.organization)
Dashboard.objects.create(title="F", created_by_id=user_1.id, organization=self.organization)
self.login_as(user_1)
response = self.client.get(self.url, data={"sort": "mydashboards"})
assert response.status_code == 200, response.content
values = [row["createdBy"]["name"] for row in response.data if row["dateCreated"]]
assert values == [
"Cat",
"Cat",
"admin@localhost", # name is empty
"admin@localhost",
"Aapple",
"Banana",
"Pineapple",
"Pineapple",
]
# descending
response = self.client.get(self.url, data={"sort": "-mydashboards"})
assert response.status_code == 200, response.content
values = [row["createdBy"]["name"] for row in response.data if row["dateCreated"]]
assert values == [
"Cat",
"Cat",
"Pineapple",
"Pineapple",
"Banana",
"Aapple",
"admin@localhost", # name is empty
"admin@localhost",
]
def test_get_only_favorites_no_sort(self) -> None:
user_1 = self.create_user(username="user_1")
self.create_member(organization=self.organization, user=user_1)
user_2 = self.create_user(username="user_2")
self.create_member(organization=self.organization, user=user_2)
dashboard_4 = Dashboard.objects.create(
title="Dashboard 4",
created_by_id=user_2.id,
organization=self.organization,
last_visited=before_now(minutes=0),
)
dashboard_4.favorited_by = [user_1.id, user_2.id]
dashboard_3 = Dashboard.objects.create(
title="Dashboard 3",
created_by_id=user_1.id,
organization=self.organization,
last_visited=before_now(minutes=5),
)
dashboard_3.favorited_by = [user_1.id]
dashboard_5 = Dashboard.objects.create(
title="Dashboard 5",
created_by_id=self.user.id,
organization=self.organization,
last_visited=before_now(minutes=5),
)
dashboard_5.favorited_by = [user_1.id]
dashboard_6 = Dashboard.objects.create(
title="Dashboard 6",
created_by_id=self.user.id,
organization=self.organization,
last_visited=before_now(minutes=0),
)
dashboard_6.favorited_by = [user_2.id]
self.login_as(user_1)
response = self.client.get(self.url, data={"filter": "onlyFavorites"})
assert response.status_code == 200, response.content
values = [row["title"] for row in response.data]
# sorted by title by default
assert values == ["Dashboard 3", "Dashboard 4", "Dashboard 5"]
def test_get_only_favorites_with_sort(self) -> None:
user_1 = self.create_user(username="user_1")
self.create_member(organization=self.organization, user=user_1)
user_2 = self.create_user(username="user_2")
self.create_member(organization=self.organization, user=user_2)
dashboard_4 = Dashboard.objects.create(
title="Dashboard 4",
created_by_id=user_2.id,
organization=self.organization,
last_visited=before_now(minutes=0),
)
dashboard_4.favorited_by = [user_1.id, user_2.id]
dashboard_3 = Dashboard.objects.create(
title="Dashboard 3",
created_by_id=user_1.id,
organization=self.organization,
last_visited=before_now(minutes=5),
)
dashboard_3.favorited_by = [user_1.id]
dashboard_5 = Dashboard.objects.create(
title="Dashboard 5",
created_by_id=self.user.id,
organization=self.organization,
last_visited=before_now(minutes=5),
)
dashboard_5.favorited_by = [user_1.id]
dashboard_6 = Dashboard.objects.create(
title="Dashboard 7",
created_by_id=self.user.id,
organization=self.organization,
last_visited=before_now(minutes=0),
)
dashboard_6.favorited_by = [user_2.id]
dashboard_7 = Dashboard.objects.create(
title="Dashboard 6",
created_by_id=self.user.id,
organization=self.organization,
last_visited=before_now(minutes=0),
)
dashboard_7.favorited_by = [user_2.id]
self.login_as(user_1)
response = self.client.get(
self.url, data={"filter": "onlyFavorites", "sort": "dateCreated"}
)
assert response.status_code == 200, response.content
values = [row["title"] for row in response.data]
assert values == ["Dashboard 4", "Dashboard 3", "Dashboard 5"]
def test_get_exclude_favorites_with_no_sort(self) -> None:
user_1 = self.create_user(username="user_1")
self.create_member(organization=self.organization, user=user_1)
user_2 = self.create_user(username="user_2")
self.create_member(organization=self.organization, user=user_2)
dashboard_4 = Dashboard.objects.create(
title="Dashboard 4",
created_by_id=user_2.id,
organization=self.organization,
last_visited=before_now(minutes=0),
)
dashboard_4.favorited_by = [user_1.id, user_2.id]
dashboard_3 = Dashboard.objects.create(
title="Dashboard 3",
created_by_id=user_1.id,
organization=self.organization,
last_visited=before_now(minutes=5),
)
dashboard_3.favorited_by = [user_1.id]
dashboard_7 = Dashboard.objects.create(
title="Dashboard 7",
created_by_id=self.user.id,
organization=self.organization,
last_visited=before_now(minutes=0),
)
dashboard_7.favorited_by = [user_2.id]
dashboard_5 = Dashboard.objects.create(
title="Dashboard 5",
created_by_id=self.user.id,
organization=self.organization,
last_visited=before_now(minutes=5),
)
dashboard_5.favorited_by = [user_1.id]
dashboard_6 = Dashboard.objects.create(
title="Dashboard 6",
created_by_id=self.user.id,
organization=self.organization,
last_visited=before_now(minutes=0),
)
dashboard_6.favorited_by = [user_2.id]
self.login_as(user_1)
response = self.client.get(self.url, data={"filter": "excludeFavorites"})
assert response.status_code == 200, response.content
values = [row["title"] for row in response.data]
# sorted by title by default
assert values == ["General", "Dashboard 1", "Dashboard 2", "Dashboard 6", "Dashboard 7"]
def test_get_exclude_favorites_with_sort(self) -> None:
user_1 = self.create_user(username="user_1")
self.create_member(organization=self.organization, user=user_1)
user_2 = self.create_user(username="user_2")
self.create_member(organization=self.organization, user=user_2)
dashboard_4 = Dashboard.objects.create(
title="Dashboard 4",
created_by_id=user_2.id,
organization=self.organization,
last_visited=before_now(minutes=0),
)
dashboard_4.favorited_by = [user_1.id, user_2.id]
dashboard_3 = Dashboard.objects.create(
title="Dashboard 3",
created_by_id=user_1.id,
organization=self.organization,
last_visited=before_now(minutes=5),
)
dashboard_3.favorited_by = [user_1.id]
dashboard_7 = Dashboard.objects.create(
title="Dashboard 7",
created_by_id=self.user.id,
organization=self.organization,
last_visited=before_now(minutes=0),
)
dashboard_7.favorited_by = [user_2.id]
dashboard_5 = Dashboard.objects.create(
title="Dashboard 5",
created_by_id=self.user.id,
organization=self.organization,
last_visited=before_now(minutes=5),
)
dashboard_5.favorited_by = [user_1.id]
dashboard_6 = Dashboard.objects.create(
title="Dashboard 6",
created_by_id=self.user.id,
organization=self.organization,
last_visited=before_now(minutes=0),
)
dashboard_6.favorited_by = [user_2.id]
self.login_as(user_1)
response = self.client.get(
self.url, data={"filter": "excludeFavorites", "sort": "dateCreated"}
)
assert response.status_code == 200, response.content
values = [row["title"] for row in response.data]
assert values == ["General", "Dashboard 1", "Dashboard 2", "Dashboard 7", "Dashboard 6"]
def test_pin_favorites_with_my_dashboards_sort(self) -> None:
user_1 = self.create_user(username="user_1")
self.create_member(organization=self.organization, user=user_1)
Dashboard.objects.create(
title="Dashboard A",
created_by_id=self.user.id,
organization=self.organization,
)
dashboard_B = Dashboard.objects.create(
title="Dashboard B",
created_by_id=user_1.id,
organization=self.organization,
)
dashboard_C = Dashboard.objects.create(
title="Dashboard C",
created_by_id=user_1.id,
organization=self.organization,
)
dashboard_D = Dashboard.objects.create(
title="Dashboard D",
created_by_id=self.user.id,
organization=self.organization,
)
dashboard_E = Dashboard.objects.create(
title="Dashboard E",
created_by_id=user_1.id,
organization=self.organization,
)
dashboard_B.favorited_by = [self.user.id]
dashboard_D.favorited_by = [self.user.id, user_1.id]
dashboard_E.favorited_by = [self.user.id]
dashboard_C.favorited_by = [user_1.id]
response = self.client.get(self.url, data={"sort": "mydashboards", "pin": "favorites"})
assert response.status_code == 200, response.content
values = [row["title"] for row in response.data]
assert values == [
# favorites
"Dashboard D", # self.user's favorite
"Dashboard E", # user_1's dashboard
"Dashboard B", # user_1's dashboard
# other dashboards
"Dashboard A", # self.user's dashboard
"Dashboard 2", # self.user's dashboard
"Dashboard 1", # self.user's dashboard
"Dashboard C", # user_1's dashbaord
]
def test_pin_favorites_with_my_date_created_sort(self) -> None:
user_1 = self.create_user(username="user_1")
self.create_member(organization=self.organization, user=user_1)
Dashboard.objects.create(
title="Dashboard A",
created_by_id=self.user.id,
organization=self.organization,
)
dashboard_B = Dashboard.objects.create(
title="Dashboard B",
created_by_id=user_1.id,
organization=self.organization,
)
dashboard_C = Dashboard.objects.create(
title="Dashboard C",
created_by_id=user_1.id,
organization=self.organization,
)
dashboard_D = Dashboard.objects.create(
title="Dashboard D",
created_by_id=self.user.id,
organization=self.organization,
)
dashboard_E = Dashboard.objects.create(
title="Dashboard E",
created_by_id=user_1.id,
organization=self.organization,
)
dashboard_B.favorited_by = [self.user.id, user_1.id]
dashboard_D.favorited_by = [self.user.id]
dashboard_E.favorited_by = [self.user.id]
dashboard_C.favorited_by = [user_1.id]
response = self.client.get(self.url, data={"sort": "dateCreated", "pin": "favorites"})
assert response.status_code == 200, response.content
values = [row["title"] for row in response.data]
assert values == [
# favorites
"Dashboard B",
"Dashboard D",
"Dashboard E",
# other dashboards
"Dashboard 1",
"Dashboard 2",
"Dashboard A",
"Dashboard C",
]
def test_get_owned_dashboards(self) -> None:
user_1 = self.create_user(username="user_1")
self.create_member(organization=self.organization, user=user_1)
user_2 = self.create_user(username="user_2")
self.create_member(organization=self.organization, user=user_2)
Dashboard.objects.create(
title="Dashboard User 1",
created_by_id=user_1.id,
organization=self.organization,
)
Dashboard.objects.create(
title="Dashboard User 2",
created_by_id=user_2.id,
organization=self.organization,
)
self.login_as(user_1)
response = self.client.get(self.url, data={"filter": "owned"})
assert response.status_code == 200, response.content
values = [row["title"] for row in response.data]
assert values == ["Dashboard User 1"]
self.login_as(user_2)
response = self.client.get(self.url, data={"filter": "owned"})
assert response.status_code == 200, response.content
values = [row["title"] for row in response.data]
assert values == ["Dashboard User 2"]
def test_get_owned_dashboards_across_organizations(self) -> None:
user_1 = self.create_user(username="user_1")
# The test user is a member of both orgs.
other_org = self.create_organization(name="Other Org")
self.create_member(organization=other_org, user=user_1)
self.create_member(organization=self.organization, user=user_1)
Dashboard.objects.create(
title="Initial dashboard",
created_by_id=user_1.id,
organization=self.organization,
)
Dashboard.objects.create(
title="Other org dashboard",
created_by_id=user_1.id,
organization=other_org,
)
self.login_as(user_1)
response = self.client.get(self.url, data={"filter": "owned"})
assert response.status_code == 200, response.content
values = [row["title"] for row in response.data]
assert values == ["Initial dashboard"]
def test_get_owned_dashboards_can_pin_starred_at_top(self) -> None:
user_1 = self.create_user(username="user_1")
self.create_member(organization=self.organization, user=user_1)
user_2 = self.create_user(username="user_2")
self.create_member(organization=self.organization, user=user_2)
Dashboard.objects.create(
title="Dashboard User 1",
created_by_id=user_1.id,
organization=self.organization,
)
starred_dashboard = Dashboard.objects.create(
title="Starred dashboard",
created_by_id=user_1.id,
organization=self.organization,
)
Dashboard.objects.create(
title="Dashboard User 2",
created_by_id=user_2.id,
organization=self.organization,
)
# Add the starred dashboard to the user's favorites.
DashboardFavoriteUser.objects.insert_favorite_dashboard(
organization=self.organization,
user_id=user_1.id,
dashboard=starred_dashboard,
)
self.login_as(user_1)
response = self.client.get(self.url, data={"filter": "owned", "pin": "favorites"})
assert response.status_code == 200, response.content
values = [row["title"] for row in response.data]
assert values == ["Starred dashboard", "Dashboard User 1"]
def test_get_shared_dashboards(self) -> None:
user_1 = self.create_user(username="user_1")
self.create_member(organization=self.organization, user=user_1)
user_2 = self.create_user(username="user_2")
self.create_member(organization=self.organization, user=user_2)
# Clean up existing dashboards setup.
Dashboard.objects.all().delete()
Dashboard.objects.create(
title="Dashboard User 1",
created_by_id=user_1.id,
organization=self.organization,
)
Dashboard.objects.create(
title="Dashboard User 2",
created_by_id=user_2.id,
organization=self.organization,
)
self.login_as(user_1)
response = self.client.get(self.url, data={"filter": "shared"})
assert response.status_code == 200, response.content
values = [row["title"] for row in response.data]
assert values == ["General", "Dashboard User 2"]
self.login_as(user_2)
response = self.client.get(self.url, data={"filter": "shared"})
assert response.status_code == 200, response.content
values = [row["title"] for row in response.data]
assert values == ["General", "Dashboard User 1"]
def test_get_shared_dashboards_across_organizations(self) -> None:
# The test user is a member of just the single org.
test_user = self.create_user(username="user_1")
self.create_member(organization=self.organization, user=test_user)
# The other test user is a member of both orgs.
other_user = self.create_user(username="other_user")
other_org = self.create_organization(name="Other Org")
self.create_member(organization=other_org, user=other_user)
self.create_member(organization=self.organization, user=other_user)
# Clean up existing dashboards setup.
Dashboard.objects.all().delete()
Dashboard.objects.create(
title="Initial dashboard",
created_by_id=other_user.id,
organization=self.organization,
)
Dashboard.objects.create(
title="Other org dashboard",
created_by_id=other_user.id,
organization=other_org,
)
self.login_as(test_user)
response = self.client.get(self.url, data={"filter": "shared"})
assert response.status_code == 200, response.content
values = [row["title"] for row in response.data]
assert values == ["General", "Initial dashboard"]
def test_get_with_filters(self) -> None:
Dashboard.objects.create(
title="Dashboard with all projects filter",
organization=self.organization,
created_by_id=self.user.id,
filters={"all_projects": True, "environment": ["alpha"], "release": ["v1"]},
)
response = self.client.get(self.url, data={"query": "Dashboard with all projects filter"})
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["title"] == "Dashboard with all projects filter"
assert response.data[0].get("projects") == [-1]
assert response.data[0].get("environment") == ["alpha"]
assert response.data[0].get("filters") == {"release": ["v1"]}
def test_get_with_last_visited(self) -> None:
# Clean up existing dashboards setup for this test.
Dashboard.objects.all().delete()
Dashboard.objects.create(
title="Dashboard without last visited",
organization=self.organization,
created_by_id=self.user.id,
)
dashboard_2 = Dashboard.objects.create(
title="Dashboard with last visited",
organization=self.organization,
created_by_id=self.user.id,
)
now = before_now(minutes=0)
DashboardLastVisited.objects.create(
dashboard=dashboard_2,
member=OrganizationMember.objects.get(
organization=self.organization, user_id=self.user.id
),
last_visited=now,
)
with self.feature("organizations:dashboards-starred-reordering"):
response = self.client.get(self.url, data={"sort": "recentlyViewed"})
assert response.status_code == 200, response.content
assert len(response.data) == 3
titles = [row["title"] for row in response.data]
assert titles == [
"General",
"Dashboard with last visited",
"Dashboard without last visited",
]
# Only "Dashboard with last visited" has a last visited timestamp.
visited_at = [row.get("lastVisited") for row in response.data]
assert visited_at == [None, now, None]
def test_get_recently_viewed_sort_with_favorites_from_other_user(self) -> None:
other_user = self.create_user(username="other_user")
self.create_member(organization=self.organization, user=other_user)
Dashboard.objects.all().delete()
dashboard_1 = Dashboard.objects.create(
title="Dashboard 1",
created_by_id=other_user.id,
organization=self.organization,
)
# Both users have the same dashboard in their favorites
DashboardFavoriteUser.objects.insert_favorite_dashboard(
organization=self.organization,
user_id=self.user.id,
dashboard=dashboard_1,
)
DashboardFavoriteUser.objects.insert_favorite_dashboard(
organization=self.organization,
user_id=other_user.id,
dashboard=dashboard_1,
)
# Both users have recently visited the dashboard
DashboardLastVisited.objects.create(
dashboard=dashboard_1,
member=OrganizationMember.objects.get(
organization=self.organization, user_id=self.user.id
),
last_visited=before_now(minutes=0),
)
DashboardLastVisited.objects.create(
dashboard=dashboard_1,
member=OrganizationMember.objects.get(
organization=self.organization, user_id=other_user.id
),
last_visited=before_now(minutes=2),
)
with self.feature("organizations:dashboards-starred-reordering"):
response = self.client.get(
self.url, data={"sort": "recentlyViewed", "pin": "favorites"}
)
assert response.status_code == 200, response.content
# Assert that the dashboard did not receive a duplicate entry due to being
# favorited by another user
assert len(response.data) == 1
self.assert_equal_dashboards(dashboard_1, response.data[0])
def test_post(self) -> None:
response = self.do_request("post", self.url, data={"title": "Dashboard from Post"})
assert response.status_code == 201
dashboard = Dashboard.objects.get(
organization=self.organization, title="Dashboard from Post"
)
assert dashboard.created_by_id == self.user.id
def test_post_member_can_create(self) -> None:
self.create_user_member_role()
response = self.do_request("post", self.url, data={"title": "Dashboard from Post"})
assert response.status_code == 201
def test_post_features_required(self) -> None:
with self.feature(
{"organizations:dashboards-basic": False, "organizations:dashboards-edit": False}
):
response = self.do_request(
"post",
self.url,
data={"title": "Dashboard from Post"},
)
assert response.status_code == 404
def test_post_with_widgets(self) -> None:
data: dict[str, Any] = {
"title": "Dashboard from Post",
"widgets": [
{
"displayType": "line",
"interval": "5m",
"title": "Transaction count()",
"queries": [
{
"name": "Transactions",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:transaction",
}
],
"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2},
},
{
"displayType": "bar",
"interval": "5m",
"title": "Error count()",
"queries": [
{
"name": "Errors",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:error",
}
],
"layout": {"x": 1, "y": 0, "w": 1, "h": 1, "minH": 2},
},
],
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 201, response.data
dashboard = Dashboard.objects.get(
organization=self.organization, title="Dashboard from Post"
)
assert dashboard.created_by_id == self.user.id
widgets = self.get_widgets(dashboard.id)
assert len(widgets) == 2
assert "layout" in data["widgets"][0]
assert "layout" in data["widgets"][1]
for expected_widget, actual_widget in zip(data["widgets"], widgets):
self.assert_serialized_widget(expected_widget, actual_widget)
queries = actual_widget.dashboardwidgetquery_set.all()
for expected_query, actual_query in zip(expected_widget["queries"], queries):
self.assert_serialized_widget_query(expected_query, actual_query)
def test_post_widget_with_camel_case_layout_keys_returns_camel_case(self) -> None:
data = {
"title": "Dashboard from Post",
"widgets": [
{
"displayType": "line",
"interval": "5m",
"title": "Transaction count()",
"queries": [
{
"name": "Transactions",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:transaction",
}
],
"layout": {"x": 0, "y": 0, "w": 2, "h": 2, "minH": 2},
},
],
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 201, response.data
dashboard = Dashboard.objects.get(
organization=self.organization, title="Dashboard from Post"
)
assert dashboard.created_by_id == self.user.id
widgets = self.get_widgets(dashboard.id)
assert len(widgets) == 1
assert "layout" in data["widgets"][0]
self.assert_serialized_widget(data["widgets"][0], widgets[0])
def test_post_widgets_with_null_layout_succeeds(self) -> None:
data: dict[str, Any] = {
"title": "Dashboard from Post",
"widgets": [
{
"displayType": "line",
"interval": "5m",
"title": "Transaction count()",
"queries": [
{
"name": "Transactions",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:transaction",
}
],
"layout": None,
},
],
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 201, response.data
dashboard = Dashboard.objects.get(
organization=self.organization, title="Dashboard from Post"
)
assert dashboard.created_by_id == self.user.id
widgets = self.get_widgets(dashboard.id)
assert len(widgets) == 1
assert "layout" in data["widgets"][0]
for expected_widget, actual_widget in zip(data["widgets"], widgets):
self.assert_serialized_widget(expected_widget, actual_widget)
queries = actual_widget.dashboardwidgetquery_set.all()
for expected_query, actual_query in zip(expected_widget["queries"], queries):
self.assert_serialized_widget_query(expected_query, actual_query)
def test_post_widgets_with_invalid_layout(self) -> None:
data = {
"title": "Dashboard from Post",
"widgets": [
{
"displayType": "line",
"interval": "5m",
"title": "Transaction count()",
"queries": [
{
"name": "Transactions",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:transaction",
}
],
"layout": {"x": False, "y": "this is incorrect", "w": 1, "h": 1, "minH": 2},
},
],
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 400, response.data
def test_extra_keys_in_widget_layout_are_ignored(self) -> None:
expected_widget: dict[str, Any] = {
"displayType": "line",
"interval": "5m",
"title": "Transaction count()",
"queries": [
{
"name": "Transactions",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:transaction",
}
],
"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2},
}
data: dict[str, Any] = {
"title": "Dashboard from Post",
"widgets": [
{
**expected_widget,
"layout": {
**expected_widget["layout"],
"totally unexpected": "but ignored",
"no matter the type": True,
},
}
],
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 201, response.data
dashboard = Dashboard.objects.get(
organization=self.organization, title="Dashboard from Post"
)
widgets = self.get_widgets(dashboard.id)
assert len(widgets) == 1
assert "layout" in data["widgets"][0]
self.assert_serialized_widget(expected_widget, widgets[0])
def test_post_widgets_with_valid_layout_keys_but_non_int_values(self) -> None:
data = {
"title": "Dashboard from Post",
"widgets": [
{
"displayType": "line",
"interval": "5m",
"title": "Transaction count()",
"queries": [
{
"name": "Transactions",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:transaction",
}
],
"layout": {"x": "this", "y": "should", "w": "fail", "h": 1, "minH": 2},
},
],
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 400, response.data
def test_post_errors_if_layout_submitted_without_required_keys(self) -> None:
data = {
"title": "Dashboard from Post",
"widgets": [
{
"displayType": "line",
"interval": "5m",
"title": "Transaction count()",
"queries": [
{
"name": "Transactions",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:transaction",
}
],
"layout": {},
},
],
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 400, response.data
def test_post_dashboard_with_filters(self) -> None:
project1 = self.create_project(name="foo", organization=self.organization)
project2 = self.create_project(name="bar", organization=self.organization)
response = self.do_request(
"post",
self.url,
data={
"title": "Dashboard from Post",
"projects": [project1.id, project2.id],
"environment": ["alpha"],
"period": "7d",
"filters": {"release": ["v1"], "releaseId": ["1"]},
},
)
assert response.status_code == 201
assert response.data["projects"].sort() == [project1.id, project2.id].sort()
assert response.data["environment"] == ["alpha"]
assert response.data["period"] == "7d"
assert response.data["filters"]["release"] == ["v1"]
assert response.data["filters"]["releaseId"] == ["1"]
def test_post_with_start_and_end_filter(self) -> None:
start = (datetime.now() - timedelta(seconds=10)).isoformat()
end = datetime.now().isoformat()
response = self.do_request(
"post",
self.url,
data={"title": "Dashboard from Post", "start": start, "end": end, "utc": True},
)
assert response.status_code == 201
assert response.data["start"].replace(tzinfo=None).isoformat() == start
assert response.data["end"].replace(tzinfo=None).isoformat() == end
assert response.data["utc"]
def test_post_with_start_and_end_filter_and_utc_false(self) -> None:
start = (datetime.now() - timedelta(seconds=10)).isoformat()
end = datetime.now().isoformat()
response = self.do_request(
"post",
self.url,
data={"title": "Dashboard from Post", "start": start, "end": end, "utc": False},
)
assert response.status_code == 201
assert response.data["start"].replace(tzinfo=None).isoformat() == start
assert response.data["end"].replace(tzinfo=None).isoformat() == end
assert not response.data["utc"]
def test_post_dashboard_with_invalid_project_filter(self) -> None:
other_org = self.create_organization()
other_project = self.create_project(name="other", organization=other_org)
response = self.do_request(
"post",
self.url,
data={
"title": "Dashboard from Post",
"projects": [other_project.id],
},
)
assert response.status_code == 403
def test_post_dashboard_with_invalid_start_end_filter(self) -> None:
start = datetime.now()
end = datetime.now() - timedelta(seconds=10)
response = self.do_request(
"post",
self.url,
data={"title": "Dashboard from Post", "start": start, "end": end},
)
assert response.status_code == 400
def test_add_widget_with_limit(self) -> None:
data = {
"title": "Dashboard from Post",
"widgets": [
{
"displayType": "line",
"interval": "5m",
"limit": 6,
"title": "Transaction count()",
"queries": [
{
"name": "Transactions",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:transaction",
}
],
},
{
"displayType": "bar",
"interval": "5m",
"limit": 5,
"title": "Error count()",
"queries": [
{
"name": "Errors",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:error",
}
],
},
],
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 201, response.data
dashboard = Dashboard.objects.get(
organization=self.organization, title="Dashboard from Post"
)
widgets = self.get_widgets(dashboard.id)
self.assert_serialized_widget(data["widgets"][0], widgets[0])
self.assert_serialized_widget(data["widgets"][1], widgets[1])
def test_add_widget_with_invalid_limit_above_maximum(self) -> None:
data = {
"title": "Dashboard from Post",
"widgets": [
{
"displayType": "line",
"interval": "5m",
"limit": 11,
"title": "Transaction count()",
"queries": [
{
"name": "Transactions",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:transaction",
}
],
},
],
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 400
assert b"Ensure this value is less than or equal to 10" in response.content
def test_add_widget_with_invalid_limit_below_minimum(self) -> None:
data = {
"title": "Dashboard from Post",
"widgets": [
{
"displayType": "line",
"interval": "5m",
"limit": 0,
"title": "Transaction count()",
"queries": [
{
"name": "Transactions",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "event.type:transaction",
}
],
},
],
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 400
assert b"Ensure this value is greater than or equal to 1" in response.content
def test_add_widget_with_field_aliases_succeeds(self) -> None:
data: dict[str, Any] = {
"title": "Dashboard with fieldAliases in the query",
"widgets": [
{
"displayType": "line",
"interval": "5m",
"title": "Transaction count()",
"limit": 5,
"queries": [
{
"name": "Transactions",
"fields": ["count()"],
"columns": ["transaction"],
"aggregates": ["count()"],
"fieldAliases": ["Count Alias"],
"conditions": "event.type:transaction",
}
],
},
],
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 201, response.data
dashboard = Dashboard.objects.get(
organization=self.organization, title="Dashboard with fieldAliases in the query"
)
widgets = self.get_widgets(dashboard.id)
assert len(widgets) == 1
for expected_widget, actual_widget in zip(data["widgets"], widgets):
self.assert_serialized_widget(expected_widget, actual_widget)
queries = actual_widget.dashboardwidgetquery_set.all()
for expected_query, actual_query in zip(expected_widget["queries"], queries):
self.assert_serialized_widget_query(expected_query, actual_query)
def test_post_widgets_with_columns_and_aggregates_succeeds(self) -> None:
data: dict[str, Any] = {
"title": "Dashboard with null agg and cols",
"widgets": [
{
"displayType": "line",
"interval": "5m",
"title": "Transaction count()",
"limit": 5,
"queries": [
{
"name": "Transactions",
"fields": ["count()"],
"columns": ["transaction"],
"aggregates": ["count()"],
"conditions": "event.type:transaction",
}
],
"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2},
},
],
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 201, response.data
dashboard = Dashboard.objects.get(
organization=self.organization, title="Dashboard with null agg and cols"
)
assert dashboard.created_by_id == self.user.id
widgets = self.get_widgets(dashboard.id)
assert len(widgets) == 1
for expected_widget, actual_widget in zip(data["widgets"], widgets):
self.assert_serialized_widget(expected_widget, actual_widget)
queries = actual_widget.dashboardwidgetquery_set.all()
for expected_query, actual_query in zip(expected_widget["queries"], queries):
self.assert_serialized_widget_query(expected_query, actual_query)
def test_post_dashboard_with_greater_than_max_widgets_not_allowed(self) -> None:
data = {
"title": "Dashboard with way too many widgets",
"widgets": [
{
"displayType": "line",
"interval": "5m",
"title": f"Widget {i}",
"limit": 5,
"queries": [
{
"name": "Transactions",
"fields": ["count()"],
"columns": ["transaction"],
"aggregates": ["count()"],
"conditions": "event.type:transaction",
}
],
"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2},
}
for i in range(Dashboard.MAX_WIDGETS + 1)
],
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 400, response.data
assert (
f"Number of widgets must be less than {Dashboard.MAX_WIDGETS}"
in response.content.decode()
)
def test_invalid_data(self) -> None:
response = self.do_request("post", self.url, data={"malformed-data": "Dashboard from Post"})
assert response.status_code == 400
def test_integrity_error(self) -> None:
response = self.do_request("post", self.url, data={"title": self.dashboard.title})
assert response.status_code == 409
assert response.data == "Dashboard title already taken"
def test_duplicate_dashboard(self) -> None:
response = self.do_request(
"post",
self.url,
data={"title": self.dashboard.title, "duplicate": True},
)
assert response.status_code == 201, response.data
assert response.data["title"] == f"{self.dashboard.title} copy"
response = self.do_request(
"post",
self.url,
data={"title": self.dashboard.title, "duplicate": True},
)
assert response.status_code == 201, response.data
assert response.data["title"] == f"{self.dashboard.title} copy 1"
def test_many_duplicate_dashboards(self) -> None:
title = "My Awesome Dashboard"
response = self.do_request(
"post",
self.url,
data={"title": title, "duplicate": True},
)
assert response.status_code == 201, response.data
assert response.data["title"] == "My Awesome Dashboard"
response = self.do_request(
"post",
self.url,
data={"title": title, "duplicate": True},
)
assert response.status_code == 201, response.data
assert response.data["title"] == "My Awesome Dashboard copy"
for i in range(1, 10):
response = self.do_request(
"post",
self.url,
data={"title": title, "duplicate": True},
)
assert response.status_code == 201, response.data
assert response.data["title"] == f"My Awesome Dashboard copy {i}"
def test_duplicate_a_duplicate(self) -> None:
title = "An Amazing Dashboard copy 3"
response = self.do_request(
"post",
self.url,
data={"title": title, "duplicate": True},
)
assert response.status_code == 201, response.data
assert response.data["title"] == "An Amazing Dashboard copy 3"
response = self.do_request(
"post",
self.url,
data={"title": title, "duplicate": True},
)
assert response.status_code == 201, response.data
assert response.data["title"] == "An Amazing Dashboard copy 4"
def test_widget_preview_field_returns_empty_list_if_no_widgets(self) -> None:
response = self.do_request("get", self.url, data={"query": "1"})
assert response.status_code == 200, response.content
assert len(response.data) == 1
dashboard_data = response.data[0]
assert "widgetPreview" in dashboard_data
assert dashboard_data["widgetPreview"] == []
def test_widget_preview_field_contains_display_type_and_layout(self) -> None:
expected_layout = {"x": 1, "y": 0, "w": 1, "h": 1, "minH": 2}
DashboardWidget.objects.create(
dashboard=self.dashboard,
title="Widget 1",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
detail={"layout": expected_layout},
)
response = self.do_request("get", self.url, data={"query": "1"})
assert response.status_code == 200, response.content
assert len(response.data) == 1
dashboard_data = response.data[0]
assert "widgetPreview" in dashboard_data
assert len(dashboard_data["widgetPreview"]) == 1
widget_data = dashboard_data["widgetPreview"][0]
assert widget_data["displayType"] == DashboardWidgetDisplayTypes.get_type_name(
DashboardWidgetDisplayTypes.LINE_CHART
)
assert widget_data["layout"] == expected_layout
def test_widget_preview_still_provides_display_type_if_no_layout(self) -> None:
DashboardWidget.objects.create(
dashboard=self.dashboard,
title="Widget 1",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.DISCOVER,
interval="1d",
)
response = self.do_request("get", self.url, data={"query": "1"})
assert response.status_code == 200, response.content
assert len(response.data) == 1
dashboard_data = response.data[0]
assert "widgetPreview" in dashboard_data
assert len(dashboard_data["widgetPreview"]) == 1
widget_data = dashboard_data["widgetPreview"][0]
assert widget_data["displayType"] == DashboardWidgetDisplayTypes.get_type_name(
DashboardWidgetDisplayTypes.LINE_CHART
)
assert widget_data["layout"] is None
def test_post_dashboard_with_widget_filter_requiring_environment(self) -> None:
mock_project = self.create_project()
self.create_environment(project=mock_project, name="mock_env")
data = {
"title": "Dashboard",
"widgets": [
{
"displayType": "line",
"interval": "5m",
"title": "Widget",
"queries": [
{
"name": "Transactions",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "release.stage:adopted",
}
],
}
],
}
response = self.do_request("post", f"{self.url}?environment=mock_env", data=data)
assert response.status_code == 201, response.data
def test_post_dashboard_with_widget_split_datasets(self) -> None:
mock_project = self.create_project()
self.create_environment(project=mock_project, name="mock_env")
data = {
"title": "Dashboard",
"widgets": [
{
"title": "Errors per project",
"displayType": "table",
"interval": "5m",
"queries": [
{
"name": "Errors",
"fields": ["count()", "project"],
"columns": ["project"],
"aggregates": ["count()"],
"conditions": "event.type:error",
}
],
"widgetType": "error-events",
},
{
"title": "Transaction Op Count",
"displayType": "table",
"interval": "5m",
"queries": [
{
"name": "Transaction Op Count",
"fields": ["count()", "transaction.op"],
"columns": ["transaction.op"],
"aggregates": ["count()"],
"conditions": "",
}
],
"widgetType": "transaction-like",
},
{
"title": "Irrelevant widget type",
"displayType": "table",
"interval": "5m",
"queries": [
{
"name": "Issues",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "",
}
],
"widgetType": "issue",
},
],
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 201, response.data
dashboard = Dashboard.objects.get(id=response.data["id"])
widgets = dashboard.dashboardwidget_set.all()
assert widgets[0].widget_type == DashboardWidgetTypes.get_id_for_type_name("error-events")
assert widgets[0].discover_widget_split == DashboardWidgetTypes.get_id_for_type_name(
"error-events"
)
assert widgets[1].widget_type == DashboardWidgetTypes.get_id_for_type_name(
"transaction-like"
)
assert widgets[1].discover_widget_split == DashboardWidgetTypes.get_id_for_type_name(
"transaction-like"
)
assert widgets[2].widget_type == DashboardWidgetTypes.get_id_for_type_name("issue")
assert widgets[2].discover_widget_split is None
def test_add_widget_with_selected_aggregate(self) -> None:
data: dict[str, Any] = {
"title": "First dashboard",
"widgets": [
{
"title": "EPM Big Number",
"displayType": "big_number",
"queries": [
{
"name": "",
"fields": ["epm()"],
"columns": [],
"aggregates": ["epm()", "count()"],
"conditions": "",
"orderby": "",
"selectedAggregate": 1,
}
],
},
],
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 201, response.data
dashboard = Dashboard.objects.get(organization=self.organization, title="First dashboard")
widgets = self.get_widgets(dashboard.id)
assert len(widgets) == 1
self.assert_serialized_widget(data["widgets"][0], widgets[0])
queries = widgets[0].dashboardwidgetquery_set.all()
assert len(queries) == 1
self.assert_serialized_widget_query(data["widgets"][0]["queries"][0], queries[0])
def test_create_new_edit_perms_with_teams(self) -> None:
team1 = self.create_team(organization=self.organization)
team2 = self.create_team(organization=self.organization)
data = {
"title": "New Dashboard 7",
"permissions": {
"isEditableByEveryone": "false",
"teamsWithEditAccess": [str(team1.id), str(team2.id)],
},
"createdBy": {"id": "23516"},
"id": "7136",
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 201, response.content
assert response.data["permissions"]["isEditableByEveryone"] is False
assert response.data["permissions"]["teamsWithEditAccess"] == [team1.id, team2.id]
def test_gets_dashboard_permissions_with_dashboard_list(self) -> None:
response = self.do_request("get", self.url)
assert response.status_code == 200, response.content
assert len(response.data) > 1
# Ensure the "permissions" field exists in each dashboard
for dashboard in response.data:
assert (
"permissions" in dashboard
), f"Permissions field not found in dashboard: {dashboard}"
self.assert_equal_dashboards(self.dashboard, response.data[1])
assert response.data[1]["permissions"] is None
def test_dasboard_list_permissions_is_valid(self) -> None:
team1 = self.create_team(organization=self.organization)
team2 = self.create_team(organization=self.organization)
data = {
"title": "New Dashboard 7",
"permissions": {
"isEditableByEveryone": "false",
"teamsWithEditAccess": [str(team1.id), str(team2.id)],
},
"createdBy": {"id": "23516"},
"id": "7136",
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 201
response = self.do_request("get", self.url)
assert response.status_code == 200, response.content
assert len(response.data) == 4
assert response.data[3]["permissions"]["isEditableByEveryone"] is False
assert response.data[3]["permissions"]["teamsWithEditAccess"] == [team1.id, team2.id]
def test_gets_dashboard_favorited_with_dashboard_list(self) -> None:
self.dashboard.favorited_by = [self.user.id]
response = self.do_request("get", self.url)
assert response.status_code == 200, response.content
for dashboard in response.data:
assert "isFavorited" in dashboard
self.assert_equal_dashboards(self.dashboard, response.data[1])
assert response.data[1]["isFavorited"] is True
assert response.data[0]["isFavorited"] is False # general template
assert response.data[2]["isFavorited"] is False # dashboard_2 w/ no favorites set
def test_post_errors_widget_with_is_filter(self) -> None:
data: dict[str, Any] = {
"title": "Dashboard with errors widget",
"widgets": [
{
"displayType": "line",
"interval": "5m",
"title": "Errors",
"limit": 5,
"queries": [
{
"name": "Errors",
"fields": ["count()"],
"columns": [],
"aggregates": ["count()"],
"conditions": "is:unresolved",
}
],
"layout": {"x": 0, "y": 0, "w": 1, "h": 1, "minH": 2},
"widgetType": "error-events",
},
],
}
response = self.do_request("post", self.url, data=data)
assert response.status_code == 201, response.data
dashboard = Dashboard.objects.get(
organization=self.organization, title="Dashboard with errors widget"
)
assert dashboard.created_by_id == self.user.id
widgets = self.get_widgets(dashboard.id)
assert len(widgets) == 1
self.assert_serialized_widget(data["widgets"][0], widgets[0])
queries = widgets[0].dashboardwidgetquery_set.all()
assert len(queries) == 1
self.assert_serialized_widget_query(data["widgets"][0]["queries"][0], queries[0])
def test_response_includes_project_ids(self) -> None:
project = self.create_project()
self.dashboard.projects.add(project)
self.dashboard.save()
response = self.do_request("get", self.url)
assert response.status_code == 200, response.content
overview_dashboard = response.data[0]
assert overview_dashboard["projects"] == []
current_dashboard = response.data[1]
assert current_dashboard["projects"] == [project.id]
starred_dashboard = response.data[2]
assert starred_dashboard["projects"] == []
def test_automatically_favorites_dashboard_when_isFavorited_is_true(self) -> None:
data = {
"title": "Dashboard with errors widget",
"isFavorited": True,
}
with self.feature("organizations:dashboards-starred-reordering"):
response = self.do_request("post", self.url, data=data)
assert response.status_code == 201, response.data
dashboard = Dashboard.objects.get(
organization=self.organization, title="Dashboard with errors widget"
)
assert response.data["isFavorited"] is True
assert (
DashboardFavoriteUser.objects.get_favorite_dashboard(
organization=self.organization, user_id=self.user.id, dashboard=dashboard
)
is not None
)
def test_does_not_automatically_favorite_dashboard_when_isFavorited_is_false(self) -> None:
data = {
"title": "Dashboard with errors widget",
"isFavorited": False,
}
with self.feature("organizations:dashboards-starred-reordering"):
response = self.do_request("post", self.url, data=data)
assert response.status_code == 201, response.data
dashboard = Dashboard.objects.get(
organization=self.organization, title="Dashboard with errors widget"
)
assert response.data["isFavorited"] is False
assert (
DashboardFavoriteUser.objects.get_favorite_dashboard(
organization=self.organization, user_id=self.user.id, dashboard=dashboard
)
is None
)
def test_order_by_most_favorited(self) -> None:
Dashboard.objects.all().delete()
# A mapping from dashboard title to the number of times it was favorited
dashboards = {
"Dashboard 1": 0,
"Dashboard 2": 2,
"Dashboard 3": 1,
}
# Set up a favorite entry for each dashboard by the number of times it was favorited
for title, favorited in dashboards.items():
dashboard = self.create_dashboard(title=title, organization=self.organization)
if favorited:
for _ in range(favorited):
user = self.create_user()
DashboardFavoriteUser.objects.create(
dashboard=dashboard,
user_id=user.id,
organization=self.organization,
)
with self.feature("organizations:dashboards-starred-reordering"):
response = self.do_request(
"get", self.url, {"sort": "mostFavorited", "pin": "favorites"}
)
assert response.status_code == 200, response.content
assert [dashboard["title"] for dashboard in response.data] == [
"Dashboard 2",
"Dashboard 3",
"Dashboard 1",
]
@patch("sentry.quotas.backend.get_dashboard_limit")
def test_dashboard_limit_prevents_creation(self, mock_get_dashboard_limit) -> None:
mock_get_dashboard_limit.return_value = 1
response = self.do_request("post", self.url, data={"title": "New Dashboard w/ Limit"})
assert response.status_code == 400
assert response.data == "You may not exceed 1 dashboards on your current plan."
mock_get_dashboard_limit.return_value = 5
response = self.do_request("post", self.url, data={"title": "New Dashboard w/ Limit"})
assert response.status_code == 201
@patch("sentry.quotas.backend.get_dashboard_limit")
def test_dashboard_limit_does_not_count_prebuilt_dashboards(
self, mock_get_dashboard_limit
) -> None:
mock_get_dashboard_limit.return_value = 2
Dashboard.objects.create(
organization=self.organization,
title="Prebuilt Dashboard 1",
created_by_id=None,
prebuilt_id=1,
)
Dashboard.objects.create(
organization=self.organization,
title="Prebuilt Dashboard 2",
created_by_id=None,
prebuilt_id=2,
)
# 2 prebuilt + 2 user dashboards
response = self.do_request("post", self.url, data={"title": "Dashboard at Limit"})
assert response.status_code == 400
assert response.data == "You may not exceed 2 dashboards on your current plan."
self.dashboard.delete()
# 2 prebuilt + 1 user dashboard
response = self.do_request("post", self.url, data={"title": "New Dashboard w/ Prebuilt"})
assert response.status_code == 201
def test_prebuilt_dashboard_is_shown_when_favorites_pinned_and_no_dashboards(self) -> None:
# The prebuilt dashboard should not show up when filtering by owned dashboards
# because it is not created by the user
response = self.do_request("get", self.url, {"pin": "favorites", "filter": "owned"})
assert response.status_code == 200, response.content
assert len(response.data) == 2
assert not any(
dashboard["title"] == "General" and dashboard["id"] == "default-overview"
for dashboard in response.data
)
# If there are no other dashboards when fetching with pinned dashboards
# the prebuilt dashboard should show up
response = self.do_request("get", self.url, {"pin": "favorites", "filter": "shared"})
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["title"] == "General"
def test_endpoint_creates_prebuilt_dashboards_when_none_exist(self) -> None:
prebuilt_count = Dashboard.objects.filter(
organization=self.organization, prebuilt_id__isnull=False
).count()
assert prebuilt_count == 0
with self.feature("organizations:dashboards-prebuilt-insights-dashboards"):
response = self.do_request("get", self.url)
assert response.status_code == 200
prebuilt_dashboards = Dashboard.objects.filter(
organization=self.organization, prebuilt_id__isnull=False
)
assert prebuilt_dashboards.count() == len(PREBUILT_DASHBOARDS)
for prebuilt_dashboard in PREBUILT_DASHBOARDS:
dashboard = prebuilt_dashboards.get(prebuilt_id=prebuilt_dashboard["prebuilt_id"])
assert dashboard.title == prebuilt_dashboard["title"]
assert dashboard.organization == self.organization
assert dashboard.created_by_id is None
assert dashboard.prebuilt_id == prebuilt_dashboard["prebuilt_id"]
matching_response_data = [
d
for d in response.data
if "prebuiltId" in d and d["prebuiltId"] == prebuilt_dashboard["prebuilt_id"]
]
assert len(matching_response_data) == 1
def test_endpoint_does_not_create_duplicate_prebuilt_dashboards_when_exist(self) -> None:
with self.feature("organizations:dashboards-prebuilt-insights-dashboards"):
response = self.do_request("get", self.url)
assert response.status_code == 200
initial_count = Dashboard.objects.filter(
organization=self.organization, prebuilt_id__isnull=False
).count()
assert initial_count == len(PREBUILT_DASHBOARDS)
with self.feature("organizations:dashboards-prebuilt-insights-dashboards"):
response = self.do_request("get", self.url)
assert response.status_code == 200
final_count = Dashboard.objects.filter(
organization=self.organization, prebuilt_id__isnull=False
).count()
assert final_count == initial_count
assert final_count == len(PREBUILT_DASHBOARDS)
def test_endpoint_deletes_old_prebuilt_dashboards_not_in_list(self) -> None:
old_prebuilt_id = 9999 # 9999 is not a valid prebuilt dashboard id
old_dashboard = Dashboard.objects.create(
organization=self.organization,
title="Old Prebuilt Dashboard",
created_by_id=None,
prebuilt_id=old_prebuilt_id,
)
assert Dashboard.objects.filter(id=old_dashboard.id).exists()
with self.feature("organizations:dashboards-prebuilt-insights-dashboards"):
response = self.do_request("get", self.url)
assert response.status_code == 200
assert not Dashboard.objects.filter(id=old_dashboard.id).exists()
prebuilt_dashboards = Dashboard.objects.filter(
organization=self.organization, prebuilt_id__isnull=False
)
assert prebuilt_dashboards.count() == len(PREBUILT_DASHBOARDS)
def test_endpoint_does_not_sync_without_feature_flag(self) -> None:
prebuilt_count = Dashboard.objects.filter(
organization=self.organization, prebuilt_id__isnull=False
).count()
assert prebuilt_count == 0
response = self.do_request("get", self.url)
assert response.status_code == 200
prebuilt_count = Dashboard.objects.filter(
organization=self.organization, prebuilt_id__isnull=False
).count()
assert prebuilt_count == 0
def test_get_with_prebuilt_ids(self) -> None:
with self.feature("organizations:dashboards-prebuilt-insights-dashboards"):
response = self.do_request(
"get", self.url, {"prebuiltId": [PrebuiltDashboardId.FRONTEND_SESSION_HEALTH]}
)
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]["prebuiltId"] == PrebuiltDashboardId.FRONTEND_SESSION_HEALTH
| OrganizationDashboardsTest |
python | ray-project__ray | python/ray/serve/tests/test_target_capacity.py | {
"start": 14009,
"end": 14858
} | class ____(BaseModel):
min_replicas: int
initial_replicas: Optional[int]
max_replicas: int
def create_autoscaling_controlled_app(
config: AutoscalingControllerAppConfig,
) -> Application:
min_replicas = config.min_replicas
initial_replicas = config.initial_replicas
max_replicas = config.max_replicas
return ControlledLifecycleDeployment.options(
name="controlled",
autoscaling_config=AutoscalingConfig(
min_replicas=min_replicas,
initial_replicas=initial_replicas,
max_replicas=max_replicas,
target_ongoing_requests=1,
metrics_interval_s=0.1,
look_back_period_s=0.2,
upscale_delay_s=0.1,
downscale_delay_s=0.1,
),
graceful_shutdown_timeout_s=0,
).bind()
| AutoscalingControllerAppConfig |
python | huggingface__transformers | src/transformers/models/maskformer/modeling_maskformer_swin.py | {
"start": 1627,
"end": 2564
} | class ____(ModelOutput):
r"""
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Last layer hidden-state after a mean pooling operation.
hidden_states_spatial_dimensions (`tuple(tuple(int, int))`, *optional*):
A tuple containing the spatial dimension of each `hidden_state` needed to reshape the `hidden_states` to
`batch, channels, height, width`. Due to padding, their spatial size cannot be inferred before the
`forward` method.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
hidden_states_spatial_dimensions: tuple[tuple[int, int]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Class for SwinEncoder's outputs.
"""
)
| MaskFormerSwinModelOutputWithPooling |
python | wandb__wandb | wandb/sdk/lib/printer.py | {
"start": 12284,
"end": 15606
} | class ____(Printer):
def __init__(self, *, settings: wandb.Settings | None) -> None:
super().__init__()
self._settings = settings
self._progress = ipython.jupyter_progress_bar()
from IPython import display
self._ipython_display = display
@override
@contextlib.contextmanager
def dynamic_text(self) -> Iterator[DynamicText | None]:
if self._settings and self._settings.silent:
yield None
return
handle = self._ipython_display.display(
self._ipython_display.HTML(""),
display_id=True,
)
if not handle:
yield None
return
try:
yield _DynamicJupyterText(handle)
finally:
handle.update(self._ipython_display.HTML(""))
@override
def display(
self,
text: str | list[str] | tuple[str],
*,
level: str | int | None = None,
) -> None:
if self._settings and self._settings.silent:
return
text = "<br>".join(text) if isinstance(text, (list, tuple)) else text
text = "<br>".join(text.splitlines())
self._ipython_display.display(self._ipython_display.HTML(text))
@property
@override
def supports_html(self) -> bool:
return True
@property
@override
def supports_unicode(self) -> bool:
return True
@override
def code(self, text: str) -> str:
return f"<code>{text}<code>"
@override
def name(self, text: str) -> str:
return f'<strong style="color:#cdcd00">{text}</strong>'
@override
def link(self, link: str, text: str | None = None) -> str:
return f'<a href={link!r} target="_blank">{text or link}</a>'
@override
def emoji(self, name: str) -> str:
return ""
@override
def secondary_text(self, text: str) -> str:
return text
@override
def loading_symbol(self, tick: int) -> str:
return ""
@override
def error(self, text: str) -> str:
return f'<strong style="color:red">{text}</strong>'
@override
def files(self, text: str) -> str:
return f"<code>{text}</code>"
@override
def progress_update(
self,
text: str,
percent_done: float | None = None,
) -> None:
if (self._settings and self._settings.silent) or not self._progress:
return
if percent_done is None:
percent_done = 1.0
self._progress.update(percent_done, text)
@override
def progress_close(self) -> None:
if self._progress:
self._progress.close()
@override
def grid(self, rows: list[list[str]], title: str | None = None) -> str:
format_row = "".join(["<tr>", "<td>{}</td>" * len(rows[0]), "</tr>"])
grid = "".join([format_row.format(*row) for row in rows])
grid = f'<table class="wandb">{grid}</table>'
if title:
return f"<h3>{title}</h3><br/>{grid}<br/>"
return f"{_JUPYTER_TABLE_STYLES}{grid}<br/>"
@override
def panel(self, columns: list[str]) -> str:
row = "".join([f'<div class="wandb-col">{col}</div>' for col in columns])
return f'{_JUPYTER_PANEL_STYLES}<div class="wandb-row">{row}</div>'
| _PrinterJupyter |
python | ansible__ansible | lib/ansible/module_utils/facts/virtual/openbsd.py | {
"start": 2677,
"end": 2785
} | class ____(VirtualCollector):
_fact_class = OpenBSDVirtual
_platform = 'OpenBSD'
| OpenBSDVirtualCollector |
python | ray-project__ray | python/ray/train/v2/_internal/execution/scaling_policy/scaling_policy.py | {
"start": 362,
"end": 421
} | class ____(ScalingDecision):
pass
@dataclass
| NoopDecision |
python | huggingface__transformers | src/transformers/models/aria/modular_aria.py | {
"start": 53327,
"end": 54104
} | class ____(PreTrainedModel):
config: AriaTextConfig
base_model_prefix = "model"
input_modalities = ("image", "text")
_no_split_modules = ["AriaTextDecoderLayer", "AriaGroupedExpertsGemm"]
supports_gradient_checkpointing = True
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": AriaTextDecoderLayer,
"attentions": AriaTextAttention,
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, AriaGroupedExpertsGemm):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
| AriaTextPreTrainedModel |
python | dask__dask | dask/dataframe/dask_expr/_reductions.py | {
"start": 41716,
"end": 43652
} | class ____(ReductionConstantDim):
_defaults = {
"sort": None,
"ascending": False,
"dropna": True,
"normalize": False,
"split_every": None,
"split_out": 1,
"total_length": None,
}
_parameters = [
"frame",
"sort",
"ascending",
"dropna",
"normalize",
"split_every",
"split_out",
"total_length",
]
reduction_chunk = M.value_counts
reduction_aggregate = methods.value_counts_aggregate
reduction_combine = methods.value_counts_combine
split_by = None
@functools.cached_property
def _meta(self):
return self.frame._meta.value_counts(normalize=self.normalize)
@classmethod
def aggregate(cls, inputs, **kwargs):
func = cls.reduction_aggregate or cls.reduction_chunk
if is_scalar(inputs[-1]):
return func(_concat(inputs[:-1]), inputs[-1], observed=True, **kwargs)
else:
return func(_concat(inputs), observed=True, **kwargs)
@property
def shuffle_by_index(self):
return True
@property
def chunk_kwargs(self):
return {"sort": self.sort, "ascending": self.ascending, "dropna": self.dropna}
@property
def aggregate_args(self):
if self.normalize and (self.split_out > 1 or self.split_out is True):
return [self.total_length]
return []
@property
def aggregate_kwargs(self):
return {**self.chunk_kwargs, "normalize": self.normalize}
@property
def combine_kwargs(self):
return self.chunk_kwargs
def _simplify_up(self, parent, dependents):
# We are already a Series
return
def _divisions(self):
if self.sort:
return (None, None)
if self.split_out is True:
return (None,) * (self.frame.npartitions + 1)
return (None,) * (self.split_out + 1)
| ValueCounts |
python | prabhupant__python-ds | data_structures/bst/check_if_bt_if_bst.py | {
"start": 50,
"end": 493
} | class ____:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def check_BST(root, min, max):
if root is None:
return True
if root.val < min or root.val > max:
return False
return (check_BST(root.left, min, root.val - 1) and check_BST(root.right, root.val + 1, max))
root = Node(5)
root.left = Node(4)
root.right = Node(7)
print(check_BST(root, MIN, MAX))
| Node |
python | kamyu104__LeetCode-Solutions | Python/merge-triplets-to-form-target-triplet.py | {
"start": 29,
"end": 424
} | class ____(object):
def mergeTriplets(self, triplets, target):
"""
:type triplets: List[List[int]]
:type target: List[int]
:rtype: bool
"""
result = [0]*3
for t in triplets:
if all(t[i] <= target[i] for i in xrange(3)):
result = [max(result[i], t[i]) for i in xrange(3)]
return result == target
| Solution |
python | walkccc__LeetCode | solutions/1387. Sort Integers by The Power Value/1387.py | {
"start": 0,
"end": 311
} | class ____:
def getKth(self, lo: int, hi: int, k: int) -> int:
return sorted([(self._getPow(i), i) for i in range(lo, hi + 1)])[k - 1][1]
def _getPow(self, n: int) -> int:
if n == 1:
return 0
if n % 2 == 0:
return 1 + self._getPow(n // 2)
return 1 + self._getPow(n * 3 + 1)
| Solution |
python | pdm-project__pdm | src/pdm/exceptions.py | {
"start": 1038,
"end": 1266
} | class ____(PDMWarning):
def __init__(self, project_name: str, extras: list[str]) -> None:
super().__init__(f"Extras not found for {project_name}: [{','.join(extras)}]")
self.extras = tuple(extras)
| ExtrasWarning |
python | mlflow__mlflow | mlflow/genai/optimize/optimizers/gepa_optimizer.py | {
"start": 389,
"end": 9684
} | class ____(BasePromptOptimizer):
"""
A prompt adapter that uses GEPA (Genetic-Pareto) optimization algorithm
to optimize prompts.
GEPA uses iterative mutation, reflection, and Pareto-aware candidate selection
to improve text components like prompts. It leverages large language models to
reflect on system behavior and propose improvements.
Args:
reflection_model: Name of the model to use for reflection and optimization.
Format: "<provider>:/<model>"
(e.g., "openai:/gpt-4o", "anthropic:/claude-3-5-sonnet-20241022").
max_metric_calls: Maximum number of evaluation calls during optimization.
Higher values may lead to better results but increase optimization time.
Default: 100
display_progress_bar: Whether to show a progress bar during optimization.
Default: False
Example:
.. code-block:: python
import mlflow
import openai
from mlflow.genai.optimize.optimizers import GepaPromptOptimizer
prompt = mlflow.genai.register_prompt(
name="qa",
template="Answer the following question: {{question}}",
)
def predict_fn(question: str) -> str:
completion = openai.OpenAI().chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt.format(question=question)}],
)
return completion.choices[0].message.content
dataset = [
{"inputs": {"question": "What is the capital of France?"}, "outputs": "Paris"},
{"inputs": {"question": "What is the capital of Germany?"}, "outputs": "Berlin"},
]
result = mlflow.genai.optimize_prompts(
predict_fn=predict_fn,
train_data=dataset,
prompt_uris=[prompt.uri],
optimizer=GepaPromptOptimizer(
reflection_model="openai:/gpt-4o",
display_progress_bar=True,
),
)
print(result.optimized_prompts[0].template)
"""
def __init__(
self,
reflection_model: str,
max_metric_calls: int = 100,
display_progress_bar: bool = False,
):
self.reflection_model = reflection_model
self.max_metric_calls = max_metric_calls
self.display_progress_bar = display_progress_bar
def optimize(
self,
eval_fn: _EvalFunc,
train_data: list[dict[str, Any]],
target_prompts: dict[str, str],
enable_tracking: bool = True,
) -> PromptOptimizerOutput:
"""
Optimize the target prompts using GEPA algorithm.
Args:
eval_fn: The evaluation function that takes candidate prompts as a dict
(prompt template name -> prompt template) and a dataset as a list of dicts,
and returns a list of EvaluationResultRecord.
train_data: The dataset to use for optimization. Each record should
include the inputs and outputs fields with dict values.
target_prompts: The target prompt templates to use. The key is the prompt template
name and the value is the prompt template.
enable_tracking: If True (default), automatically log optimization progress.
Returns:
The outputs of the prompt optimizer that includes the optimized prompts
as a dict (prompt template name -> prompt template).
"""
from mlflow.metrics.genai.model_utils import _parse_model_uri
try:
import gepa
except ImportError as e:
raise ImportError(
"GEPA is not installed. Please install it with: `pip install gepa`"
) from e
provider, model = _parse_model_uri(self.reflection_model)
class MlflowGEPAAdapter(gepa.GEPAAdapter):
def __init__(self, eval_function, prompts_dict):
self.eval_function = eval_function
self.prompts_dict = prompts_dict
self.prompt_names = list(prompts_dict.keys())
def evaluate(
self,
batch: list[dict[str, Any]],
candidate: dict[str, str],
capture_traces: bool = False,
) -> "gepa.EvaluationBatch":
"""
Evaluate a candidate prompt using the MLflow eval function.
Args:
batch: List of data instances to evaluate
candidate: Proposed text components (prompts)
capture_traces: Whether to capture execution traces
Returns:
EvaluationBatch with outputs, scores, and optional trajectories
"""
eval_results = self.eval_function(candidate, batch)
outputs = [result.outputs for result in eval_results]
scores = [result.score for result in eval_results]
trajectories = eval_results if capture_traces else None
return gepa.EvaluationBatch(
outputs=outputs, scores=scores, trajectories=trajectories
)
def make_reflective_dataset(
self,
candidate: dict[str, str],
eval_batch: "gepa.EvaluationBatch[EvaluationResultRecord, Any]",
components_to_update: list[str],
) -> dict[str, list[dict[str, Any]]]:
"""
Build a reflective dataset for instruction refinement.
Args:
candidate: The evaluated candidate
eval_batch: Result of evaluate with capture_traces=True
components_to_update: Component names to update
Returns:
Dict of reflective dataset per component
"""
reflective_datasets = {}
for component_name in components_to_update:
component_data = []
trajectories = eval_batch.trajectories
for i, (trajectory, score) in enumerate(zip(trajectories, eval_batch.scores)):
trace = trajectory.trace
spans = []
if trace:
spans = [
{
"name": span.name,
"inputs": span.inputs,
"outputs": span.outputs,
}
for span in trace.data.spans
]
component_data.append(
{
"component_name": component_name,
"current_text": candidate.get(component_name, ""),
"trace": spans,
"score": score,
"inputs": trajectory.inputs,
"outputs": trajectory.outputs,
"expectations": trajectory.expectations,
"rationales": trajectory.rationales,
"index": i,
}
)
reflective_datasets[component_name] = component_data
return reflective_datasets
adapter = MlflowGEPAAdapter(eval_fn, target_prompts)
kwargs = {
"seed_candidate": target_prompts,
"trainset": train_data,
"adapter": adapter,
"reflection_lm": f"{provider}/{model}",
"max_metric_calls": self.max_metric_calls,
"display_progress_bar": self.display_progress_bar,
"use_mlflow": enable_tracking,
}
if Version(importlib.metadata.version("gepa")) < Version("0.0.18"):
kwargs.pop("use_mlflow")
gepa_result = gepa.optimize(**kwargs)
optimized_prompts = gepa_result.best_candidate
initial_score, final_score = self._extract_eval_scores(gepa_result)
return PromptOptimizerOutput(
optimized_prompts=optimized_prompts,
initial_eval_score=initial_score,
final_eval_score=final_score,
)
def _extract_eval_scores(self, result: "gepa.GEPAResult") -> tuple[float | None, float | None]:
"""
Extract initial and final evaluation scores from GEPA result.
Args:
result: GEPA optimization result
Returns:
Tuple of (initial_score, final_score), both can be None if unavailable
"""
final_score = None
initial_score = None
scores = result.val_aggregate_scores
if scores and len(scores) > 0:
# The first score is the initial baseline score
initial_score = scores[0]
# The highest score is the final optimized score
final_score = max(scores)
return initial_score, final_score
| GepaPromptOptimizer |
python | geekcomputers__Python | game_of_life/05_mixed_sorting.py | {
"start": 1277,
"end": 1593
} | class ____(unittest.TestCase):
def test_1(self):
self.assertEqual(mixed_sorting([8, 13, 11, 90, -5, 4]), [4, 13, 11, 8, -5, 90])
def test_2(self):
self.assertEqual(mixed_sorting([1, 2, 3, 6, 5, 4]), [5, 2, 3, 4, 1, 6])
if __name__ == "__main__":
unittest.main(verbosity=2)
| TestMixedSorting |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 56816,
"end": 57880
} | class ____(ASTOperator):
def __init__(self, identifier: ASTIdentifier) -> None:
self.identifier = identifier
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTOperatorLiteral):
return NotImplemented
return self.identifier == other.identifier
def __hash__(self) -> int:
return hash(self.identifier)
def get_id(self, version: int) -> str:
if version == 1:
raise NoOldIdError
return 'li' + self.identifier.get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return 'operator""' + transform(self.identifier)
def _describe_identifier(
self,
signode: TextElement,
identnode: TextElement,
env: BuildEnvironment,
symbol: Symbol,
) -> None:
signode += addnodes.desc_sig_keyword('operator', 'operator')
signode += addnodes.desc_sig_literal_string('""', '""')
self.identifier.describe_signature(identnode, 'markType', env, '', '', symbol)
| ASTOperatorLiteral |
python | keras-team__keras | keras/src/trainers/data_adapters/py_dataset_adapter.py | {
"start": 18965,
"end": 23990
} | class ____(PyDatasetEnqueuer):
"""Builds a Enqueuer from a PyDataset.
Args:
py_dataset: A `keras.utils.PyDataset` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(
self,
py_dataset,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
shuffle=False,
):
super().__init__(
py_dataset, workers, use_multiprocessing, max_queue_size
)
self.shuffle = shuffle
if self.py_dataset.num_batches is None:
# For infinite datasets, `self.indices` is created here once for all
# so that subsequent runs resume from where they stopped.
self.indices = itertools.count()
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers,
initializer=init_pool_generator,
initargs=(seqs, None, get_worker_id_queue()),
)
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _run(self):
"""Submits request to the executor and queue the `Future` objects.
This method is the run method of worker threads.
"""
try:
if self.py_dataset.num_batches is not None:
# For finite datasets, `self.indices` is created here so that
# shuffling creates different a order each time.
indices = range(self.py_dataset.num_batches)
if self.shuffle:
indices = list(indices)
random.shuffle(indices)
self.indices = iter(indices)
self._send_py_dataset() # Share the initial py_dataset
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while self.is_running():
try:
i = next(self.indices)
self.future_queue.put(
executor.apply_async(get_index, (self.uid, i)),
block=True,
)
except StopIteration:
break
except Exception as e:
self.future_queue.put(e) # Report exception
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
This method is called from the main thread.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
while self.is_running():
try:
inputs = self.ready_queue.get(block=False)
yield inputs
continue # Retry the ready_queue
except queue.Empty:
pass
try:
value = self.future_queue.get(block=True, timeout=5)
self.future_queue.task_done()
if isinstance(value, Exception):
raise value # Propagate exception from other thread
inputs = value.get()
if inputs is not None:
yield inputs
except queue.Empty:
pass
except Exception as e:
self.stop(drain_queue_and_join=True)
raise e
# Note that it is ok to poll the iterator after the initial `start`,
# which may happen before the first `on_epoch_begin`. But it's not ok to
# poll after `on_epoch_end`.
raise ValueError(
"Iterator called after `on_epoch_end` or before `on_epoch_begin`."
)
def init_pool_generator(gens, random_seed=None, id_queue=None):
"""Initializer function for pool workers.
Args:
gens: State which should be made available to worker processes.
random_seed: An optional value with which to seed child processes.
id_queue: A multiprocessing Queue of worker ids.
This is used to indicate that a worker process
was created by Keras.
"""
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
worker_proc = multiprocessing.current_process()
# name isn't used for anything, but setting a more descriptive name is
# helpful when diagnosing orphaned processes.
worker_proc.name = f"Keras_worker_{worker_proc.name}"
if random_seed is not None:
np.random.seed(random_seed + worker_proc.ident)
if id_queue is not None:
# If a worker dies during init, the pool will just create a replacement.
id_queue.put(worker_proc.ident, block=True, timeout=0.1)
| OrderedEnqueuer |
python | PyCQA__bandit | bandit/plugins/django_xss.py | {
"start": 178,
"end": 10302
} | class ____:
def __init__(self, var_name, ignore_nodes=None):
self.var_name = var_name
self.ignore_nodes = ignore_nodes
def is_assigned_in(self, items):
assigned = []
for ast_inst in items:
new_assigned = self.is_assigned(ast_inst)
if new_assigned:
if isinstance(new_assigned, (list, tuple)):
assigned.extend(new_assigned)
else:
assigned.append(new_assigned)
return assigned
def is_assigned(self, node):
assigned = False
if self.ignore_nodes:
if isinstance(self.ignore_nodes, (list, tuple, object)):
if isinstance(node, self.ignore_nodes):
return assigned
if isinstance(node, ast.Expr):
assigned = self.is_assigned(node.value)
elif isinstance(node, ast.FunctionDef):
for name in node.args.args:
if isinstance(name, ast.Name):
if name.id == self.var_name.id:
# If is param the assignations are not affected
return assigned
assigned = self.is_assigned_in(node.body)
elif isinstance(node, ast.With):
for withitem in node.items:
var_id = getattr(withitem.optional_vars, "id", None)
if var_id == self.var_name.id:
assigned = node
else:
assigned = self.is_assigned_in(node.body)
elif isinstance(node, ast.Try):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
assigned.extend(self.is_assigned_in(node.handlers))
assigned.extend(self.is_assigned_in(node.orelse))
assigned.extend(self.is_assigned_in(node.finalbody))
elif isinstance(node, ast.ExceptHandler):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
elif isinstance(node, (ast.If, ast.For, ast.While)):
assigned = []
assigned.extend(self.is_assigned_in(node.body))
assigned.extend(self.is_assigned_in(node.orelse))
elif isinstance(node, ast.AugAssign):
if isinstance(node.target, ast.Name):
if node.target.id == self.var_name.id:
assigned = node.value
elif isinstance(node, ast.Assign) and node.targets:
target = node.targets[0]
if isinstance(target, ast.Name):
if target.id == self.var_name.id:
assigned = node.value
elif isinstance(target, ast.Tuple) and isinstance(
node.value, ast.Tuple
):
pos = 0
for name in target.elts:
if name.id == self.var_name.id:
assigned = node.value.elts[pos]
break
pos += 1
return assigned
def evaluate_var(xss_var, parent, until, ignore_nodes=None):
secure = False
if isinstance(xss_var, ast.Name):
if isinstance(parent, ast.FunctionDef):
for name in parent.args.args:
if name.arg == xss_var.id:
return False # Params are not secure
analyser = DeepAssignation(xss_var, ignore_nodes)
for node in parent.body:
if node.lineno >= until:
break
to = analyser.is_assigned(node)
if to:
if isinstance(to, ast.Constant) and isinstance(to.value, str):
secure = True
elif isinstance(to, ast.Name):
secure = evaluate_var(to, parent, to.lineno, ignore_nodes)
elif isinstance(to, ast.Call):
secure = evaluate_call(to, parent, ignore_nodes)
elif isinstance(to, (list, tuple)):
num_secure = 0
for some_to in to:
if isinstance(some_to, ast.Constant) and isinstance(
some_to.value, str
):
num_secure += 1
elif isinstance(some_to, ast.Name):
if evaluate_var(
some_to, parent, node.lineno, ignore_nodes
):
num_secure += 1
else:
break
else:
break
if num_secure == len(to):
secure = True
else:
secure = False
break
else:
secure = False
break
return secure
def evaluate_call(call, parent, ignore_nodes=None):
secure = False
evaluate = False
if isinstance(call, ast.Call) and isinstance(call.func, ast.Attribute):
if (
isinstance(call.func.value, ast.Constant)
and call.func.attr == "format"
):
evaluate = True
if call.keywords:
evaluate = False # TODO(??) get support for this
if evaluate:
args = list(call.args)
num_secure = 0
for arg in args:
if isinstance(arg, ast.Constant) and isinstance(arg.value, str):
num_secure += 1
elif isinstance(arg, ast.Name):
if evaluate_var(arg, parent, call.lineno, ignore_nodes):
num_secure += 1
else:
break
elif isinstance(arg, ast.Call):
if evaluate_call(arg, parent, ignore_nodes):
num_secure += 1
else:
break
elif isinstance(arg, ast.Starred) and isinstance(
arg.value, (ast.List, ast.Tuple)
):
args.extend(arg.value.elts)
num_secure += 1
else:
break
secure = num_secure == len(args)
return secure
def transform2call(var):
if isinstance(var, ast.BinOp):
is_mod = isinstance(var.op, ast.Mod)
is_left_str = isinstance(var.left, ast.Constant) and isinstance(
var.left.value, str
)
if is_mod and is_left_str:
new_call = ast.Call()
new_call.args = []
new_call.args = []
new_call.keywords = None
new_call.lineno = var.lineno
new_call.func = ast.Attribute()
new_call.func.value = var.left
new_call.func.attr = "format"
if isinstance(var.right, ast.Tuple):
new_call.args = var.right.elts
else:
new_call.args = [var.right]
return new_call
def check_risk(node):
description = "Potential XSS on mark_safe function."
xss_var = node.args[0]
secure = False
if isinstance(xss_var, ast.Name):
# Check if the var are secure
parent = node._bandit_parent
while not isinstance(parent, (ast.Module, ast.FunctionDef)):
parent = parent._bandit_parent
is_param = False
if isinstance(parent, ast.FunctionDef):
for name in parent.args.args:
if name.arg == xss_var.id:
is_param = True
break
if not is_param:
secure = evaluate_var(xss_var, parent, node.lineno)
elif isinstance(xss_var, ast.Call):
parent = node._bandit_parent
while not isinstance(parent, (ast.Module, ast.FunctionDef)):
parent = parent._bandit_parent
secure = evaluate_call(xss_var, parent)
elif isinstance(xss_var, ast.BinOp):
is_mod = isinstance(xss_var.op, ast.Mod)
is_left_str = isinstance(xss_var.left, ast.Constant) and isinstance(
xss_var.left.value, str
)
if is_mod and is_left_str:
parent = node._bandit_parent
while not isinstance(parent, (ast.Module, ast.FunctionDef)):
parent = parent._bandit_parent
new_call = transform2call(xss_var)
secure = evaluate_call(new_call, parent)
if not secure:
return bandit.Issue(
severity=bandit.MEDIUM,
confidence=bandit.HIGH,
cwe=issue.Cwe.BASIC_XSS,
text=description,
)
@test.checks("Call")
@test.test_id("B703")
def django_mark_safe(context):
"""**B703: Potential XSS on mark_safe function**
:Example:
.. code-block:: none
>> Issue: [B703:django_mark_safe] Potential XSS on mark_safe function.
Severity: Medium Confidence: High
CWE: CWE-80 (https://cwe.mitre.org/data/definitions/80.html)
Location: examples/mark_safe_insecure.py:159:4
More Info: https://bandit.readthedocs.io/en/latest/plugins/b703_django_mark_safe.html
158 str_arg = 'could be insecure'
159 safestring.mark_safe(str_arg)
.. seealso::
- https://docs.djangoproject.com/en/dev/topics/security/\
#cross-site-scripting-xss-protection
- https://docs.djangoproject.com/en/dev/ref/utils/\
#module-django.utils.safestring
- https://docs.djangoproject.com/en/dev/ref/utils/\
#django.utils.html.format_html
- https://cwe.mitre.org/data/definitions/80.html
.. versionadded:: 1.5.0
.. versionchanged:: 1.7.3
CWE information added
""" # noqa: E501
if context.is_module_imported_like("django.utils.safestring"):
affected_functions = [
"mark_safe",
"SafeText",
"SafeUnicode",
"SafeString",
"SafeBytes",
]
if context.call_function_name in affected_functions:
xss = context.node.args[0]
if not (
isinstance(xss, ast.Constant) and isinstance(xss.value, str)
):
return check_risk(context.node)
| DeepAssignation |
python | doocs__leetcode | solution/3200-3299/3281.Maximize Score of Numbers in Ranges/Solution.py | {
"start": 0,
"end": 543
} | class ____:
def maxPossibleScore(self, start: List[int], d: int) -> int:
def check(mi: int) -> bool:
last = -inf
for st in start:
if last + mi > st + d:
return False
last = max(st, last + mi)
return True
start.sort()
l, r = 0, start[-1] + d - start[0]
while l < r:
mid = (l + r + 1) >> 1
if check(mid):
l = mid
else:
r = mid - 1
return l
| Solution |
python | huggingface__transformers | src/transformers/models/tvp/image_processing_tvp.py | {
"start": 3115,
"end": 23148
} | class ____(BaseImageProcessor):
r"""
Constructs a Tvp image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"longest_edge": 448}`):
Size of the output image after resizing. The longest edge of the image will be resized to
`size["longest_edge"]` while maintaining the aspect ratio of the original image. Can be overridden by
`size` in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop`
parameter in the `preprocess` method.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`):
Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the
`preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter
in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method.
pad_size (`dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`):
Size of the image after applying the padding. Can be overridden by the `pad_size` parameter in the
`preprocess` method.
constant_values (`Union[float, Iterable[float]]`, *optional*, defaults to 0):
The fill value to use when padding the image.
pad_mode (`PaddingMode`, *optional*, defaults to `PaddingMode.CONSTANT`):
Use what kind of mode in padding.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
do_flip_channel_order (`bool`, *optional*, defaults to `True`):
Whether to flip the color channels from RGB to BGR. Can be overridden by the `do_flip_channel_order`
parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
"""
model_input_names = ["pixel_values"]
valid_kwargs = TvpImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_center_crop: bool = True,
crop_size: Optional[dict[str, int]] = None,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_pad: bool = True,
pad_size: Optional[dict[str, int]] = None,
constant_values: Union[float, Iterable[float]] = 0,
pad_mode: PaddingMode = PaddingMode.CONSTANT,
do_normalize: bool = True,
do_flip_channel_order: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"longest_edge": 448}
crop_size = crop_size if crop_size is not None else {"height": 448, "width": 448}
pad_size = pad_size if pad_size is not None else {"height": 448, "width": 448}
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_pad = do_pad
self.pad_size = pad_size
self.constant_values = constant_values
self.pad_mode = pad_mode
self.do_normalize = do_normalize
self.do_flip_channel_order = do_flip_channel_order
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BILINEAR,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will
have the size `(h, w)`. If `size` is of the form `{"longest_edge": s}`, the output image will have its
longest edge of length `s` while keeping the aspect ratio of the original image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = get_size_dict(size, default_to_square=False)
if "height" in size and "width" in size:
output_size = (size["height"], size["width"])
elif "longest_edge" in size:
output_size = get_resize_output_image_size(image, size["longest_edge"], input_data_format)
else:
raise ValueError(f"Size must have 'height' and 'width' or 'longest_edge' as keys. Got {size.keys()}")
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def pad_image(
self,
image: np.ndarray,
pad_size: Optional[dict[str, int]] = None,
constant_values: Union[float, Iterable[float]] = 0,
pad_mode: PaddingMode = PaddingMode.CONSTANT,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
):
"""
Pad an image with zeros to the given size.
Args:
image (`np.ndarray`):
Image to pad.
pad_size (`dict[str, int]`)
Size of the output image with pad.
constant_values (`Union[float, Iterable[float]]`)
The fill value to use when padding the image.
pad_mode (`PaddingMode`)
The pad mode, default to PaddingMode.CONSTANT
data_format (`ChannelDimension` or `str`, *optional*)
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
height, width = get_image_size(image, channel_dim=input_data_format)
max_height = pad_size.get("height", height)
max_width = pad_size.get("width", width)
pad_right, pad_bottom = max_width - width, max_height - height
if pad_right < 0 or pad_bottom < 0:
raise ValueError("The padding size must be greater than image size")
padding = ((0, pad_bottom), (0, pad_right))
padded_image = pad(
image,
padding,
mode=pad_mode,
constant_values=constant_values,
data_format=data_format,
input_data_format=input_data_format,
)
return padded_image
def _preprocess_image(
self,
image: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[dict[str, int]] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_pad: bool = True,
pad_size: Optional[dict[str, int]] = None,
constant_values: Optional[Union[float, Iterable[float]]] = None,
pad_mode: Optional[PaddingMode] = None,
do_normalize: Optional[bool] = None,
do_flip_channel_order: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""Preprocesses a single image."""
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_resize=do_resize,
size=size,
resample=resample,
)
# All transformations expect numpy arrays.
image = to_numpy_array(image)
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image.astype(np.float32), mean=image_mean, std=image_std, input_data_format=input_data_format
)
if do_pad:
image = self.pad_image(
image=image,
pad_size=pad_size,
constant_values=constant_values,
pad_mode=pad_mode,
input_data_format=input_data_format,
)
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
image = flip_channel_order(image=image, input_data_format=input_data_format)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
@filter_out_non_signature_kwargs()
def preprocess(
self,
videos: Union[ImageInput, list[ImageInput], list[list[ImageInput]]],
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[dict[str, int]] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_pad: Optional[bool] = None,
pad_size: Optional[dict[str, int]] = None,
constant_values: Optional[Union[float, Iterable[float]]] = None,
pad_mode: Optional[PaddingMode] = None,
do_normalize: Optional[bool] = None,
do_flip_channel_order: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
videos (`ImageInput` or `list[ImageInput]` or `list[list[ImageInput]]`):
Frames to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after applying resize.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`):
Whether to centre crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the image after applying the centre crop.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method.
pad_size (`dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`):
Size of the image after applying the padding. Can be overridden by the `pad_size` parameter in the
`preprocess` method.
constant_values (`Union[float, Iterable[float]]`, *optional*, defaults to 0):
The fill value to use when padding the image.
pad_mode (`PaddingMode`, *optional*, defaults to "PaddingMode.CONSTANT"):
Use what kind of mode in padding.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`):
Whether to flip the channel order of the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the inferred channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_pad = do_pad if do_pad is not None else self.do_pad
pad_size = pad_size if pad_size is not None else self.pad_size
constant_values = constant_values if constant_values is not None else self.constant_values
pad_mode = pad_mode if pad_mode else self.pad_mode
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
do_flip_channel_order = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size")
if not valid_images(videos):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
videos = make_batched(videos)
videos = [
np.array(
[
self._preprocess_image(
image=img,
do_resize=do_resize,
size=size,
resample=resample,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_pad=do_pad,
pad_size=pad_size,
constant_values=constant_values,
pad_mode=pad_mode,
do_normalize=do_normalize,
do_flip_channel_order=do_flip_channel_order,
image_mean=image_mean,
image_std=image_std,
data_format=data_format,
input_data_format=input_data_format,
)
for img in video
]
)
for video in videos
]
data = {"pixel_values": videos}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["TvpImageProcessor"]
| TvpImageProcessor |
python | RaRe-Technologies__gensim | gensim/test/test_text_analysis.py | {
"start": 3735,
"end": 4952
} | class ____(BaseTestCases.TextAnalyzerTestBase):
accumulator_cls = InvertedIndexAccumulator
def test_accumulate1(self):
accumulator = InvertedIndexAccumulator(self.top_ids, self.dictionary)\
.accumulate(self.texts, 2)
# [['this', 'is'], ['is', 'a'], ['test', 'document'], ['this', 'test'],
# ['test', 'document'], ['test', 'test'], ['test', 'this']]
inverted_index = accumulator.index_to_dict()
expected = {
10: {0, 3, 6},
15: {0, 1},
20: {1},
21: {2, 3, 4, 5, 6},
17: {2, 4}
}
self.assertDictEqual(expected, inverted_index)
def test_accumulate2(self):
accumulator = InvertedIndexAccumulator(self.top_ids, self.dictionary)\
.accumulate(self.texts, 3)
# [['this', 'is', 'a'], ['test', 'document'], ['this', 'test', 'document'],
# ['test', 'test', 'this']
inverted_index = accumulator.index_to_dict()
expected = {
10: {0, 2, 3},
15: {0},
20: {0},
21: {1, 2, 3},
17: {1, 2}
}
self.assertDictEqual(expected, inverted_index)
| TestInvertedIndexAccumulator |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/digits.py | {
"start": 80,
"end": 789
} | class ____(App):
CSS = """
.left {
text-align: left;
}
.center {
text-align:center;
}
.right {
text-align:right;
}
.bold {
text-style: bold;
}
"""
def compose(self) -> ComposeResult:
yield Digits("3.14159265359", classes="left")
yield Digits(" 0123456789+-.,ABCDEF", classes="center")
yield Digits(" 0123456789+-.,ABCDEF", classes="center bold")
yield Digits("3x10^4", classes="right")
yield Digits("3x10^4", classes="right")
yield Digits("($123.45)")
yield Digits("£123.45")
yield Digits("€123.45")
if __name__ == "__main__":
app = DigitApp()
app.run()
| DigitApp |
python | python__mypy | mypyc/irbuild/context.py | {
"start": 4005,
"end": 5694
} | class ____:
"""Contains information regarding implicitly generated classes.
Implicit classes are generated for nested functions and generator
functions. They are not explicitly defined in the source code.
NOTE: This is both a concrete class and used as a base class.
"""
def __init__(self, ir: ClassIR) -> None:
# The ClassIR instance associated with this class.
self.ir = ir
# The register associated with the 'self' instance for this generator class.
self._self_reg: Value | None = None
# Environment class registers are the local registers associated with instances of an
# environment class, used for getting and setting attributes. curr_env_reg is the register
# associated with the current environment. prev_env_reg is the self.__mypyc_env__ field
# associated with the previous environment.
self._curr_env_reg: Value | None = None
self._prev_env_reg: Value | None = None
@property
def self_reg(self) -> Value:
assert self._self_reg is not None
return self._self_reg
@self_reg.setter
def self_reg(self, reg: Value) -> None:
self._self_reg = reg
@property
def curr_env_reg(self) -> Value:
assert self._curr_env_reg is not None
return self._curr_env_reg
@curr_env_reg.setter
def curr_env_reg(self, reg: Value) -> None:
self._curr_env_reg = reg
@property
def prev_env_reg(self) -> Value:
assert self._prev_env_reg is not None
return self._prev_env_reg
@prev_env_reg.setter
def prev_env_reg(self, reg: Value) -> None:
self._prev_env_reg = reg
| ImplicitClass |
python | pandas-dev__pandas | pandas/tests/scalar/timestamp/test_arithmetic.py | {
"start": 310,
"end": 10855
} | class ____:
def test_overflow_offset(self):
# no overflow expected
stamp = Timestamp("2000/1/1")
offset_no_overflow = to_offset("D") * 100
expected = Timestamp("2000/04/10")
assert stamp + offset_no_overflow == expected
assert offset_no_overflow + stamp == expected
expected = Timestamp("1999/09/23")
assert stamp - offset_no_overflow == expected
def test_overflow_offset_raises(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
# ends up multiplying really large numbers which overflow
stamp = Timestamp("2017-01-13 00:00:00").as_unit("ns")
offset_overflow = 20169940 * offsets.Day(1)
lmsg2 = r"Cannot cast -?20169940 days \+?00:00:00 to unit='ns' without overflow"
with pytest.raises(OutOfBoundsTimedelta, match=lmsg2):
stamp + offset_overflow
with pytest.raises(OutOfBoundsTimedelta, match=lmsg2):
offset_overflow + stamp
with pytest.raises(OutOfBoundsTimedelta, match=lmsg2):
stamp - offset_overflow
# xref https://github.com/pandas-dev/pandas/issues/14080
# used to crash, so check for proper overflow exception
stamp = Timestamp("2000/1/1").as_unit("ns")
offset_overflow = to_offset("D") * 100**5
lmsg3 = (
r"Cannot cast -?10000000000 days \+?00:00:00 to unit='ns' without overflow"
)
with pytest.raises(OutOfBoundsTimedelta, match=lmsg3):
stamp + offset_overflow
with pytest.raises(OutOfBoundsTimedelta, match=lmsg3):
offset_overflow + stamp
with pytest.raises(OutOfBoundsTimedelta, match=lmsg3):
stamp - offset_overflow
def test_overflow_timestamp_raises(self):
# https://github.com/pandas-dev/pandas/issues/31774
msg = "Result is too large"
a = Timestamp("2101-01-01 00:00:00").as_unit("ns")
b = Timestamp("1688-01-01 00:00:00").as_unit("ns")
with pytest.raises(OutOfBoundsDatetime, match=msg):
a - b
# but we're OK for timestamp and datetime.datetime
assert (a - b.to_pydatetime()) == (a.to_pydatetime() - b)
def test_delta_preserve_nanos(self):
val = Timestamp(1337299200000000123)
result = val + timedelta(1)
assert result.nanosecond == val.nanosecond
def test_rsub_dtscalars(self, tz_naive_fixture):
# In particular, check that datetime64 - Timestamp works GH#28286
td = Timedelta(1235345642000)
ts = Timestamp("2021-01-01", tz=tz_naive_fixture)
other = ts + td
assert other - ts == td
assert other.to_pydatetime() - ts == td
if tz_naive_fixture is None:
assert other.to_datetime64() - ts == td
else:
msg = "Cannot subtract tz-naive and tz-aware datetime-like objects"
with pytest.raises(TypeError, match=msg):
other.to_datetime64() - ts
def test_timestamp_sub_datetime(self):
dt = datetime(2013, 10, 12)
ts = Timestamp(datetime(2013, 10, 13))
assert (ts - dt).days == 1
assert (dt - ts).days == -1
def test_subtract_tzaware_datetime(self):
t1 = Timestamp("2020-10-22T22:00:00+00:00")
t2 = datetime(2020, 10, 22, 22, tzinfo=timezone.utc)
result = t1 - t2
assert isinstance(result, Timedelta)
assert result == Timedelta("0 days")
def test_subtract_timestamp_from_different_timezone(self):
t1 = Timestamp("20130101").tz_localize("US/Eastern")
t2 = Timestamp("20130101").tz_localize("CET")
result = t1 - t2
assert isinstance(result, Timedelta)
assert result == Timedelta("0 days 06:00:00")
def test_subtracting_involving_datetime_with_different_tz(self):
t1 = datetime(2013, 1, 1, tzinfo=timezone(timedelta(hours=-5)))
t2 = Timestamp("20130101").tz_localize("CET")
result = t1 - t2
assert isinstance(result, Timedelta)
assert result == Timedelta("0 days 06:00:00")
result = t2 - t1
assert isinstance(result, Timedelta)
assert result == Timedelta("-1 days +18:00:00")
def test_subtracting_different_timezones(self, tz_aware_fixture):
t_raw = Timestamp("20130101")
t_UTC = t_raw.tz_localize("UTC")
t_diff = t_UTC.tz_convert(tz_aware_fixture) + Timedelta("0 days 05:00:00")
result = t_diff - t_UTC
assert isinstance(result, Timedelta)
assert result == Timedelta("0 days 05:00:00")
def test_addition_subtraction_types(self):
# Assert on the types resulting from Timestamp +/- various date/time
# objects
dt = datetime(2014, 3, 4)
td = timedelta(seconds=1)
ts = Timestamp(dt)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
# GH#22535 add/sub with integers is deprecated
ts + 1
with pytest.raises(TypeError, match=msg):
ts - 1
# Timestamp + datetime not supported, though subtraction is supported
# and yields timedelta more tests in tseries/base/tests/test_base.py
assert type(ts - dt) == Timedelta
assert type(ts + td) == Timestamp
assert type(ts - td) == Timestamp
# Timestamp +/- datetime64 not supported, so not tested (could possibly
# assert error raised?)
td64 = np.timedelta64(1, "D")
assert type(ts + td64) == Timestamp
assert type(ts - td64) == Timestamp
@pytest.mark.parametrize(
"td", [Timedelta(hours=3), np.timedelta64(3, "h"), timedelta(hours=3)]
)
def test_radd_tdscalar(self, td, fixed_now_ts):
# GH#24775 timedelta64+Timestamp should not raise
ts = fixed_now_ts
assert td + ts == ts + td
@pytest.mark.parametrize(
"other,expected_difference",
[
(np.timedelta64(-123, "ns"), -123),
(np.timedelta64(1234567898, "ns"), 1234567898),
(np.timedelta64(-123, "us"), -123000),
(np.timedelta64(-123, "ms"), -123000000),
],
)
def test_timestamp_add_timedelta64_unit(self, other, expected_difference):
now = datetime.now(timezone.utc)
ts = Timestamp(now).as_unit("ns")
result = ts + other
valdiff = result._value - ts._value
assert valdiff == expected_difference
ts2 = Timestamp(now)
assert ts2 + other == result
@pytest.mark.parametrize(
"ts",
[
Timestamp("1776-07-04"),
Timestamp("1776-07-04", tz="UTC"),
],
)
@pytest.mark.parametrize(
"other",
[
1,
np.int64(1),
np.array([1, 2], dtype=np.int32),
np.array([3, 4], dtype=np.uint64),
],
)
def test_add_int_with_freq(self, ts, other):
msg = "Addition/subtraction of integers and integer-arrays"
with pytest.raises(TypeError, match=msg):
ts + other
with pytest.raises(TypeError, match=msg):
other + ts
with pytest.raises(TypeError, match=msg):
ts - other
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
other - ts
@pytest.mark.parametrize("shape", [(6,), (2, 3)])
def test_addsub_m8ndarray(self, shape):
# GH#33296
ts = Timestamp("2020-04-04 15:45").as_unit("ns")
other = np.arange(6).astype("m8[h]").reshape(shape)
result = ts + other
ex_stamps = [ts + Timedelta(hours=n) for n in range(6)]
expected = np.array([x.asm8 for x in ex_stamps], dtype="M8[ns]").reshape(shape)
tm.assert_numpy_array_equal(result, expected)
result = other + ts
tm.assert_numpy_array_equal(result, expected)
result = ts - other
ex_stamps = [ts - Timedelta(hours=n) for n in range(6)]
expected = np.array([x.asm8 for x in ex_stamps], dtype="M8[ns]").reshape(shape)
tm.assert_numpy_array_equal(result, expected)
msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timestamp'"
with pytest.raises(TypeError, match=msg):
other - ts
@pytest.mark.parametrize("shape", [(6,), (2, 3)])
def test_addsub_m8ndarray_tzaware(self, shape):
# GH#33296
ts = Timestamp("2020-04-04 15:45", tz="US/Pacific")
other = np.arange(6).astype("m8[h]").reshape(shape)
result = ts + other
ex_stamps = [ts + Timedelta(hours=n) for n in range(6)]
expected = np.array(ex_stamps).reshape(shape)
tm.assert_numpy_array_equal(result, expected)
result = other + ts
tm.assert_numpy_array_equal(result, expected)
result = ts - other
ex_stamps = [ts - Timedelta(hours=n) for n in range(6)]
expected = np.array(ex_stamps).reshape(shape)
tm.assert_numpy_array_equal(result, expected)
msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timestamp'"
with pytest.raises(TypeError, match=msg):
other - ts
def test_subtract_different_utc_objects(self, utc_fixture, utc_fixture2):
# GH 32619
dt = datetime(2021, 1, 1)
ts1 = Timestamp(dt, tz=utc_fixture)
ts2 = Timestamp(dt, tz=utc_fixture2)
result = ts1 - ts2
expected = Timedelta(0)
assert result == expected
@pytest.mark.parametrize(
"tz",
[
"pytz/US/Eastern",
gettz("US/Eastern"),
"US/Eastern",
"dateutil/US/Eastern",
],
)
def test_timestamp_add_timedelta_push_over_dst_boundary(self, tz):
# GH#1389
if isinstance(tz, str) and tz.startswith("pytz/"):
pytz = pytest.importorskip("pytz")
tz = pytz.timezone(tz.removeprefix("pytz/"))
# 4 hours before DST transition
stamp = Timestamp("3/10/2012 22:00", tz=tz)
result = stamp + timedelta(hours=6)
# spring forward, + "7" hours
expected = Timestamp("3/11/2012 05:00", tz=tz)
assert result == expected
def test_timestamp_dst_transition(self):
# GH 60084
dt_str = "2023-11-05 01:00-08:00"
tz_str = "America/Los_Angeles"
ts1 = Timestamp(dt_str, tz=tz_str)
ts2 = ts1 + Timedelta(hours=0)
assert ts1 == ts2
assert hash(ts1) == hash(ts2)
| TestTimestampArithmetic |
python | modin-project__modin | asv_bench/benchmarks/benchmarks.py | {
"start": 25670,
"end": 28129
} | class ____:
param_names = ["shape", "dtype", "index_structure"]
params = [
get_benchmark_shapes("TimeIndexingNumericSeries"),
(np.int64, np.uint64, np.float64),
("unique_monotonic_inc", "nonunique_monotonic_inc"),
]
def setup(self, shape, dtype, index_structure):
N = shape[0]
indices = {
"unique_monotonic_inc": IMPL.Index(range(N), dtype=dtype),
"nonunique_monotonic_inc": IMPL.Index(
list(range(N // 100)) + [(N // 100) - 1] + list(range(N // 100, N - 1)),
dtype=dtype,
),
}
self.data = IMPL.Series(np.random.rand(N), index=indices[index_structure])
self.array = np.arange(N // 2)
self.index_to_query = N // 2
self.array_list = self.array.tolist()
execute(self.data)
def time_getitem_scalar(self, shape, index, index_structure):
# not calling execute as execute function fails for scalar
self.data[self.index_to_query]
def time_getitem_slice(self, shape, index, index_structure):
execute(self.data[: self.index_to_query])
def time_getitem_list_like(self, shape, index, index_structure):
execute(self.data[[self.index_to_query]])
def time_getitem_array(self, shape, index, index_structure):
execute(self.data[self.array])
def time_getitem_lists(self, shape, index, index_structure):
execute(self.data[self.array_list])
def time_iloc_array(self, shape, index, index_structure):
execute(self.data.iloc[self.array])
def time_iloc_list_like(self, shape, index, index_structure):
execute(self.data.iloc[[self.index_to_query]])
def time_iloc_scalar(self, shape, index, index_structure):
# not calling execute as execute function fails for scalar
self.data.iloc[self.index_to_query]
def time_iloc_slice(self, shape, index, index_structure):
execute(self.data.iloc[: self.index_to_query])
def time_loc_array(self, shape, index, index_structure):
execute(self.data.loc[self.array])
def time_loc_list_like(self, shape, index, index_structure):
execute(self.data.loc[[self.index_to_query]])
def time_loc_scalar(self, shape, index, index_structure):
self.data.loc[self.index_to_query]
def time_loc_slice(self, shape, index, index_structure):
execute(self.data.loc[: self.index_to_query])
| TimeIndexingNumericSeries |
python | lazyprogrammer__machine_learning_examples | svm_class/kernel_svm_gradient_primal.py | {
"start": 1194,
"end": 5976
} | class ____:
def __init__(self, kernel=linear, C=1.0):
self.C = C
self.kernel = kernel
def _objective(self, margins):
return 0.5 * self.u.dot(self.K.dot(self.u)) + \
self.C * np.maximum(0, 1 - margins).sum()
def fit(self, X, Y, lr=1e-5, n_iters=400):
N, D = X.shape
self.N = N
self.u = np.random.randn(N)
self.b = 0
# setup kernel matrix
self.X = X
self.Y = Y
self.K = self.kernel(X, X)
# gradient descent
losses = []
for _ in range(n_iters):
margins = Y * (self.u.dot(self.K) + self.b)
loss = self._objective(margins)
losses.append(loss)
idx = np.where(margins < 1)[0]
grad_u = self.K.dot(self.u) - self.C * Y[idx].dot(self.K[idx])
self.u -= lr * grad_u
grad_b = -self.C * Y[idx].sum()
self.b -= lr * grad_b
self.support_ = np.where((Y * (self.u.dot(self.K) + self.b)) <= 1)[0]
print("num SVs:", len(self.support_))
# print("w:", self.w)
# print("b:", self.b)
# hist of margins
m = Y * (self.u.dot(self.K) + self.b)
plt.hist(m, bins=20)
plt.show()
plt.plot(losses)
plt.title("loss per iteration")
plt.show()
def _decision_function(self, X):
return self.u.dot(self.kernel(self.X, X)) + self.b
def predict(self, X):
return np.sign(self._decision_function(X))
def score(self, X, Y):
P = self.predict(X)
return np.mean(Y == P)
def plot_decision_boundary(model, X, Y, resolution=100, colors=('b', 'k', 'r')):
np.warnings.filterwarnings('ignore')
fig, ax = plt.subplots()
# Generate coordinate grid of shape [resolution x resolution]
# and evaluate the model over the entire space
x_range = np.linspace(X[:,0].min(), X[:,0].max(), resolution)
y_range = np.linspace(X[:,1].min(), X[:,1].max(), resolution)
grid = [[model._decision_function(np.array([[xr, yr]])) for yr in y_range] for xr in x_range]
grid = np.array(grid).reshape(len(x_range), len(y_range))
# Plot decision contours using grid and
# make a scatter plot of training data
ax.contour(x_range, y_range, grid.T, (-1, 0, 1), linewidths=(1, 1, 1),
linestyles=('--', '-', '--'), colors=colors)
ax.scatter(X[:,0], X[:,1],
c=Y, lw=0, alpha=0.3, cmap='seismic')
# Plot support vectors (non-zero alphas)
# as circled points (linewidth > 0)
mask = model.support_
ax.scatter(X[:,0][mask], X[:,1][mask],
c=Y[mask], cmap='seismic')
# debug
ax.scatter([0], [0], c='black', marker='x')
# debug
# x_axis = np.linspace(X[:,0].min(), X[:,0].max(), 100)
# w = model.w
# b = model.b
# # w[0]*x + w[1]*y + b = 0
# y_axis = -(w[0]*x_axis + b)/w[1]
# plt.plot(x_axis, y_axis, color='purple')
# margin_p = (1 - w[0]*x_axis - b)/w[1]
# plt.plot(x_axis, margin_p, color='orange')
# margin_n = -(1 + w[0]*x_axis + b)/w[1]
# plt.plot(x_axis, margin_n, color='orange')
plt.show()
def clouds():
X, Y = get_clouds()
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33)
return Xtrain, Xtest, Ytrain, Ytest, linear, 1e-5, 500
def medical():
data = load_breast_cancer()
X, Y = data.data, data.target
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33)
return Xtrain, Xtest, Ytrain, Ytest, linear, 1e-3, 200
def xor():
X, Y = get_xor()
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33)
kernel = lambda X1, X2: rbf(X1, X2, gamma=3.)
return Xtrain, Xtest, Ytrain, Ytest, kernel, 1e-3, 500
def donut():
X, Y = get_donut()
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33)
kernel = lambda X1, X2: rbf(X1, X2, gamma=1.)
return Xtrain, Xtest, Ytrain, Ytest, kernel, 1e-3, 300
def spiral():
X, Y = get_spiral()
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.33)
kernel = lambda X1, X2: rbf(X1, X2, gamma=5.)
return Xtrain, Xtest, Ytrain, Ytest, kernel, 1e-3, 500
if __name__ == '__main__':
Xtrain, Xtest, Ytrain, Ytest, kernel, lr, n_iters = donut()
print("Possible labels:", set(Ytrain))
# make sure the targets are (-1, +1)
Ytrain[Ytrain == 0] = -1
Ytest[Ytest == 0] = -1
# scale the data
scaler = StandardScaler()
Xtrain = scaler.fit_transform(Xtrain)
Xtest = scaler.transform(Xtest)
# now we'll use our custom implementation
model = KernelSVM(kernel=kernel, C=1.0)
t0 = datetime.now()
model.fit(Xtrain, Ytrain, lr=lr, n_iters=n_iters)
print("train duration:", datetime.now() - t0)
t0 = datetime.now()
print("train score:", model.score(Xtrain, Ytrain), "duration:", datetime.now() - t0)
t0 = datetime.now()
print("test score:", model.score(Xtest, Ytest), "duration:", datetime.now() - t0)
if Xtrain.shape[1] == 2:
plot_decision_boundary(model, Xtrain, Ytrain)
| KernelSVM |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/nocover/test_stateful.py | {
"start": 1403,
"end": 2122
} | class ____(RuleBasedStateMachine):
trees = Bundle("BinaryTree")
@rule(target=trees, x=st.booleans())
def leaf(self, x):
return Leaf(x)
@rule(target=trees, left=trees, right=trees)
def split(self, left, right):
return Split(left, right)
@rule(tree=trees)
def test_is_balanced(self, tree):
if isinstance(tree, Leaf):
return
assert abs(self.size(tree.left) - self.size(tree.right)) <= 1
self.test_is_balanced(tree.left)
self.test_is_balanced(tree.right)
def size(self, tree):
if isinstance(tree, Leaf):
return 1
else:
return 1 + self.size(tree.left) + self.size(tree.right)
| BalancedTrees |
python | scipy__scipy | scipy/special/tests/test_basic.py | {
"start": 172676,
"end": 174835
} | class ____:
def test_pbdn_seq(self):
pb = special.pbdn_seq(1, .1)
assert_allclose(pb, (array([0.9975,
0.0998]),
array([-0.0499,
0.9925])),
atol=1.5e-4, rtol=0)
def test_pbdv(self):
special.pbdv(1,.2)
1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = special.pbdn_seq(1,.1)
pbv = special.pbdv_seq(1,.1)
assert_allclose(pbv, (real(pbn[0]), real(pbn[1])), atol=1.5e-4, rtol=0)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)*special.rgamma(.5-.5*eta)
assert_allclose(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_allclose(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_allclose(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_seq(self):
res1, res2 = special.pbvv_seq(2, 3)
assert_allclose(res1, np.array([2.976319645712036,
1.358840996329579,
0.5501016716383508]))
assert_allclose(res2, np.array([3.105638472238475,
0.9380581512176672,
0.533688488872053]))
| TestParabolicCylinder |
python | django__django | django/contrib/admin/checks.py | {
"start": 30992,
"end": 47579
} | class ____(BaseModelAdminChecks):
def check(self, admin_obj, **kwargs):
return [
*super().check(admin_obj),
*self._check_save_as(admin_obj),
*self._check_save_on_top(admin_obj),
*self._check_inlines(admin_obj),
*self._check_list_display(admin_obj),
*self._check_list_display_links(admin_obj),
*self._check_list_filter(admin_obj),
*self._check_list_select_related(admin_obj),
*self._check_list_per_page(admin_obj),
*self._check_list_max_show_all(admin_obj),
*self._check_list_editable(admin_obj),
*self._check_search_fields(admin_obj),
*self._check_date_hierarchy(admin_obj),
*self._check_actions(admin_obj),
]
def _check_save_as(self, obj):
"""Check save_as is a boolean."""
if not isinstance(obj.save_as, bool):
return must_be("a boolean", option="save_as", obj=obj, id="admin.E101")
else:
return []
def _check_save_on_top(self, obj):
"""Check save_on_top is a boolean."""
if not isinstance(obj.save_on_top, bool):
return must_be("a boolean", option="save_on_top", obj=obj, id="admin.E102")
else:
return []
def _check_inlines(self, obj):
"""Check all inline model admin classes."""
if not isinstance(obj.inlines, (list, tuple)):
return must_be(
"a list or tuple", option="inlines", obj=obj, id="admin.E103"
)
else:
return list(
chain.from_iterable(
self._check_inlines_item(obj, item, "inlines[%d]" % index)
for index, item in enumerate(obj.inlines)
)
)
def _check_inlines_item(self, obj, inline, label):
"""Check one inline model admin."""
try:
inline_label = inline.__module__ + "." + inline.__name__
except AttributeError:
return [
checks.Error(
"'%s' must inherit from 'InlineModelAdmin'." % obj,
obj=obj.__class__,
id="admin.E104",
)
]
from django.contrib.admin.options import InlineModelAdmin
if not _issubclass(inline, InlineModelAdmin):
return [
checks.Error(
"'%s' must inherit from 'InlineModelAdmin'." % inline_label,
obj=obj.__class__,
id="admin.E104",
)
]
elif not inline.model:
return [
checks.Error(
"'%s' must have a 'model' attribute." % inline_label,
obj=obj.__class__,
id="admin.E105",
)
]
elif not _issubclass(inline.model, models.Model):
return must_be(
"a Model", option="%s.model" % inline_label, obj=obj, id="admin.E106"
)
else:
return inline(obj.model, obj.admin_site).check()
def _check_list_display(self, obj):
"""Check list_display only contains fields or usable attributes."""
if not isinstance(obj.list_display, (list, tuple)):
return must_be(
"a list or tuple", option="list_display", obj=obj, id="admin.E107"
)
else:
return list(
chain.from_iterable(
self._check_list_display_item(obj, item, "list_display[%d]" % index)
for index, item in enumerate(obj.list_display)
)
)
def _check_list_display_item(self, obj, item, label):
if callable(item):
return []
elif hasattr(obj, item):
return []
try:
field = obj.model._meta.get_field(item)
except FieldDoesNotExist:
try:
field = getattr(obj.model, item)
except AttributeError:
try:
field = get_fields_from_path(obj.model, item)[-1]
except (FieldDoesNotExist, NotRelationField):
return [
checks.Error(
f"The value of '{label}' refers to '{item}', which is not "
f"a callable or attribute of '{obj.__class__.__name__}', "
"or an attribute, method, or field on "
f"'{obj.model._meta.label}'.",
obj=obj.__class__,
id="admin.E108",
)
]
if (
getattr(field, "is_relation", False)
and (field.many_to_many or field.one_to_many)
) or (getattr(field, "rel", None) and field.rel.field.many_to_one):
return [
checks.Error(
f"The value of '{label}' must not be a many-to-many field or a "
f"reverse foreign key.",
obj=obj.__class__,
id="admin.E109",
)
]
return []
def _check_list_display_links(self, obj):
"""Check that list_display_links is a unique subset of list_display."""
from django.contrib.admin.options import ModelAdmin
if obj.list_display_links is None:
return []
elif not isinstance(obj.list_display_links, (list, tuple)):
return must_be(
"a list, a tuple, or None",
option="list_display_links",
obj=obj,
id="admin.E110",
)
# Check only if ModelAdmin.get_list_display() isn't overridden.
elif obj.get_list_display.__func__ is ModelAdmin.get_list_display:
return list(
chain.from_iterable(
self._check_list_display_links_item(
obj, field_name, "list_display_links[%d]" % index
)
for index, field_name in enumerate(obj.list_display_links)
)
)
return []
def _check_list_display_links_item(self, obj, field_name, label):
if field_name not in obj.list_display:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not defined in "
"'list_display'." % (label, field_name),
obj=obj.__class__,
id="admin.E111",
)
]
else:
return []
def _check_list_filter(self, obj):
if not isinstance(obj.list_filter, (list, tuple)):
return must_be(
"a list or tuple", option="list_filter", obj=obj, id="admin.E112"
)
else:
return list(
chain.from_iterable(
self._check_list_filter_item(obj, item, "list_filter[%d]" % index)
for index, item in enumerate(obj.list_filter)
)
)
def _check_list_filter_item(self, obj, item, label):
"""
Check one item of `list_filter`, the three valid options are:
1. 'field' -- a basic field filter, possibly w/ relationships (e.g.
'field__rel')
2. ('field', SomeFieldListFilter) - a field-based list filter class
3. SomeListFilter - a non-field list filter class
"""
from django.contrib.admin import FieldListFilter, ListFilter
if callable(item) and not isinstance(item, models.Field):
# If item is option 3, it should be a ListFilter...
if not _issubclass(item, ListFilter):
return must_inherit_from(
parent="ListFilter", option=label, obj=obj, id="admin.E113"
)
# ... but not a FieldListFilter.
elif issubclass(item, FieldListFilter):
return [
checks.Error(
"The value of '%s' must not inherit from 'FieldListFilter'."
% label,
obj=obj.__class__,
id="admin.E114",
)
]
else:
return []
elif isinstance(item, (tuple, list)):
# item is option #2
field, list_filter_class = item
if not _issubclass(list_filter_class, FieldListFilter):
return must_inherit_from(
parent="FieldListFilter",
option="%s[1]" % label,
obj=obj,
id="admin.E115",
)
else:
return []
else:
# item is option #1
field = item
# Validate the field string
try:
get_fields_from_path(obj.model, field)
except (NotRelationField, FieldDoesNotExist):
return [
checks.Error(
"The value of '%s' refers to '%s', which does not refer to a "
"Field." % (label, field),
obj=obj.__class__,
id="admin.E116",
)
]
else:
return []
def _check_list_select_related(self, obj):
"""Check that list_select_related is a boolean, a list or a tuple."""
if not isinstance(obj.list_select_related, (bool, list, tuple)):
return must_be(
"a boolean, tuple or list",
option="list_select_related",
obj=obj,
id="admin.E117",
)
else:
return []
def _check_list_per_page(self, obj):
"""Check that list_per_page is an integer."""
if not isinstance(obj.list_per_page, int):
return must_be(
"an integer", option="list_per_page", obj=obj, id="admin.E118"
)
else:
return []
def _check_list_max_show_all(self, obj):
"""Check that list_max_show_all is an integer."""
if not isinstance(obj.list_max_show_all, int):
return must_be(
"an integer", option="list_max_show_all", obj=obj, id="admin.E119"
)
else:
return []
def _check_list_editable(self, obj):
"""Check that list_editable is a sequence of editable fields from
list_display without first element."""
if not isinstance(obj.list_editable, (list, tuple)):
return must_be(
"a list or tuple", option="list_editable", obj=obj, id="admin.E120"
)
else:
return list(
chain.from_iterable(
self._check_list_editable_item(
obj, item, "list_editable[%d]" % index
)
for index, item in enumerate(obj.list_editable)
)
)
def _check_list_editable_item(self, obj, field_name, label):
try:
field = obj.model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(
field=field_name, option=label, obj=obj, id="admin.E121"
)
else:
if field_name not in obj.list_display:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not "
"contained in 'list_display'." % (label, field_name),
obj=obj.__class__,
id="admin.E122",
)
]
elif obj.list_display_links and field_name in obj.list_display_links:
return [
checks.Error(
"The value of '%s' cannot be in both 'list_editable' and "
"'list_display_links'." % field_name,
obj=obj.__class__,
id="admin.E123",
)
]
# If list_display[0] is in list_editable, check that
# list_display_links is set. See #22792 and #26229 for use cases.
elif (
obj.list_display[0] == field_name
and not obj.list_display_links
and obj.list_display_links is not None
):
return [
checks.Error(
"The value of '%s' refers to the first field in 'list_display' "
"('%s'), which cannot be used unless 'list_display_links' is "
"set." % (label, obj.list_display[0]),
obj=obj.__class__,
id="admin.E124",
)
]
elif not field.editable or field.primary_key:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not editable "
"through the admin." % (label, field_name),
obj=obj.__class__,
id="admin.E125",
)
]
else:
return []
def _check_search_fields(self, obj):
"""Check search_fields is a sequence."""
if not isinstance(obj.search_fields, (list, tuple)):
return must_be(
"a list or tuple", option="search_fields", obj=obj, id="admin.E126"
)
else:
return []
def _check_date_hierarchy(self, obj):
"""Check that date_hierarchy refers to DateField or DateTimeField."""
if obj.date_hierarchy is None:
return []
else:
try:
field = get_fields_from_path(obj.model, obj.date_hierarchy)[-1]
except (NotRelationField, FieldDoesNotExist):
return [
checks.Error(
"The value of 'date_hierarchy' refers to '%s', which "
"does not refer to a Field." % obj.date_hierarchy,
obj=obj.__class__,
id="admin.E127",
)
]
else:
if field.get_internal_type() not in {"DateField", "DateTimeField"}:
return must_be(
"a DateField or DateTimeField",
option="date_hierarchy",
obj=obj,
id="admin.E128",
)
else:
return []
def _check_actions(self, obj):
errors = []
actions = obj._get_base_actions()
# Actions with an allowed_permission attribute require the ModelAdmin
# to implement a has_<perm>_permission() method for each permission.
for func, name, _ in actions:
if not hasattr(func, "allowed_permissions"):
continue
for permission in func.allowed_permissions:
method_name = "has_%s_permission" % permission
if not hasattr(obj, method_name):
errors.append(
checks.Error(
"%s must define a %s() method for the %s action."
% (
obj.__class__.__name__,
method_name,
func.__name__,
),
obj=obj.__class__,
id="admin.E129",
)
)
# Names need to be unique.
names = collections.Counter(name for _, name, _ in actions)
for name, count in names.items():
if count > 1:
errors.append(
checks.Error(
"__name__ attributes of actions defined in %s must be "
"unique. Name %r is not unique."
% (
obj.__class__.__name__,
name,
),
obj=obj.__class__,
id="admin.E130",
)
)
return errors
| ModelAdminChecks |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_spans_performance.py | {
"start": 21031,
"end": 28885
} | class ____(BaseQueryBuilder):
config_class = DiscoverDatasetConfig
def resolve_span_function(
self,
function: str,
span: Span,
alias: str,
min_exclusive_time: float | None = None,
max_exclusive_time: float | None = None,
) -> Function:
op = span.op
group = span.group
condition = Function(
"and",
[
Function("equals", [Identifier("x"), op]),
Function("equals", [Identifier("y"), group]),
],
)
if min_exclusive_time is not None:
condition = Function(
"and", [Function("greater", [Identifier("z"), min_exclusive_time]), condition]
)
if max_exclusive_time is not None:
condition = Function(
"and", [Function("less", [Identifier("z"), max_exclusive_time]), condition]
)
return Function(
"arrayReduce",
[
f"{function}If",
self.column("spans_exclusive_time"),
Function(
"arrayMap",
[
Lambda(
["x", "y", "z"],
condition,
),
self.column("spans_op"),
self.column("spans_group"),
self.column("spans_exclusive_time"),
],
),
],
alias,
)
def query_example_transactions(
snuba_params: SnubaParams,
query: str | None,
direction: str,
orderby: str,
span: Span,
per_suspect: int = 5,
offset: int | None = None,
min_exclusive_time: float | None = None,
max_exclusive_time: float | None = None,
) -> dict[Span, list[EventID]]:
# there aren't any suspects, early return to save an empty query
if per_suspect == 0:
return {}
selected_columns: list[str] = [
"id",
"project.id",
]
builder = SpanQueryBuilder(
dataset=Dataset.Discover,
params={},
snuba_params=snuba_params,
selected_columns=selected_columns,
query=query,
orderby=[],
limit=per_suspect,
offset=offset,
)
# Make sure to resolve the custom span functions and add it to the columns and order bys
orderby_columns = [
builder.resolve_span_function(
function, span, f"{function}_span_time", min_exclusive_time, max_exclusive_time
)
for function in SPAN_PERFORMANCE_COLUMNS[orderby].suspect_example_functions
]
builder.columns += orderby_columns
builder.orderby += [
OrderBy(column, Direction.DESC if direction == "-" else Direction.ASC)
for column in orderby_columns
]
# we are only interested in the specific op, group pairs from the suspects
builder.add_conditions(
[
Condition(Function("has", [builder.column("spans_op"), span.op]), Op.EQ, 1),
Condition(Function("has", [builder.column("spans_group"), span.group]), Op.EQ, 1),
Condition(
builder.resolve_span_function(
"count", span, "count_span_time", min_exclusive_time, max_exclusive_time
),
Op.GT,
0,
),
]
)
snql_query = builder.get_snql_query()
results = raw_snql_query(snql_query, "api.organization-events-spans-performance-examples")
examples: dict[Span, list[EventID]] = {Span(span.op, span.group): []}
for example in results["data"]:
value = EventID(snuba_params.project_ids[0], example["id"])
examples[span].append(value)
return examples
def get_span_description(
event: EventID,
span_op: str,
span_group: str,
) -> str | None:
nodestore_event = eventstore.backend.get_event_by_id(event.project_id, event.event_id)
if nodestore_event is None:
return None
data = nodestore_event.data
# the transaction itself is a span as well, so make sure to check it
trace_context = data.get("contexts", {}).get("trace", {})
if trace_context["op"] == span_op and int(trace_context["hash"], 16) == int(span_group, 16):
return data["transaction"]
for span in data.get("spans", []):
if span["op"] == span_op and int(span["hash"], 16) == int(span_group, 16):
return span.get("description")
return None
def get_example_transaction(
event: EventID,
span_op: str,
span_group: str,
min_exclusive_time: float | None = None,
max_exclusive_time: float | None = None,
) -> ExampleTransaction:
span_group_id = int(span_group, 16)
nodestore_event = eventstore.backend.get_event_by_id(event.project_id, event.event_id)
assert nodestore_event is not None
data = nodestore_event.data
# the transaction itself is a span as well but we need to reconstruct
# it from the event as it's not present in the spans array
trace_context = data.get("contexts", {}).get("trace", {})
root_span = {
"span_id": trace_context["span_id"],
"op": trace_context["op"],
"hash": trace_context["hash"],
"exclusive_time": trace_context["exclusive_time"],
"description": data["transaction"],
"start_timestamp": data["start_timestamp"],
"timestamp": data["timestamp"],
}
matching_spans = [
span
for span in chain([root_span], data.get("spans", []))
if span["op"] == span_op and int(span["hash"], 16) == span_group_id
]
if min_exclusive_time is not None:
matching_spans = [
span for span in matching_spans if span["exclusive_time"] > min_exclusive_time
]
if max_exclusive_time is not None:
matching_spans = [
span for span in matching_spans if span["exclusive_time"] < max_exclusive_time
]
# get the first non-None description
# use None if all descriptions are None
description = None
for span in matching_spans:
if span.get("description") is None:
continue
description = span["description"]
spans: list[ExampleSpan] = [
ExampleSpan(
id=span["span_id"],
start_timestamp=span["start_timestamp"],
finish_timestamp=span["timestamp"],
exclusive_time=span["exclusive_time"],
trace_id=trace_context["trace_id"],
)
for span in matching_spans
]
non_overlapping_exclusive_time_windows = union_time_windows(
[
window
for span in spans
for window in get_exclusive_time_windows(
span,
# don't need to check the root span here because its parent
# will never be one of the spans in this transaction
data.get("spans", []),
)
]
)
return ExampleTransaction(
id=event.event_id,
description=description,
start_timestamp=data["start_timestamp"],
finish_timestamp=data["timestamp"],
non_overlapping_exclusive_time=sum(
window.duration_ms for window in non_overlapping_exclusive_time_windows
),
spans=spans,
)
def get_exclusive_time_windows(span: ExampleSpan, spans: list[Any]) -> list[TimeWindow]:
non_overlapping_children_time_windows = union_time_windows(
[
TimeWindow(start=child["start_timestamp"], end=child["timestamp"])
for child in spans
if child.get("parent_span_id") == span.id
]
)
return remove_time_windows(
TimeWindow(start=span.start_timestamp, end=span.finish_timestamp),
non_overlapping_children_time_windows,
)
| SpanQueryBuilder |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/functions.py | {
"start": 60677,
"end": 60800
} | class ____(AnsiFunction[str]):
"""The USER() SQL function."""
type = sqltypes.String()
inherit_cache = True
| user |
python | doocs__leetcode | solution/2500-2599/2500.Delete Greatest Value in Each Row/Solution.py | {
"start": 0,
"end": 180
} | class ____:
def deleteGreatestValue(self, grid: List[List[int]]) -> int:
for row in grid:
row.sort()
return sum(max(col) for col in zip(*grid))
| Solution |
python | plotly__plotly.py | plotly/graph_objs/layout/mapbox/_layer.py | {
"start": 235,
"end": 24492
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.mapbox"
_path_str = "layout.mapbox.layer"
_valid_props = {
"below",
"circle",
"color",
"coordinates",
"fill",
"line",
"maxzoom",
"minzoom",
"name",
"opacity",
"source",
"sourceattribution",
"sourcelayer",
"sourcetype",
"symbol",
"templateitemname",
"type",
"visible",
}
@property
def below(self):
"""
Determines if the layer will be inserted before the layer with
the specified ID. If omitted or set to '', the layer will be
inserted above every existing layer.
The 'below' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["below"]
@below.setter
def below(self, val):
self["below"] = val
@property
def circle(self):
"""
The 'circle' property is an instance of Circle
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.mapbox.layer.Circle`
- A dict of string/value properties that will be passed
to the Circle constructor
Returns
-------
plotly.graph_objs.layout.mapbox.layer.Circle
"""
return self["circle"]
@circle.setter
def circle(self, val):
self["circle"] = val
@property
def color(self):
"""
Sets the primary layer color. If `type` is "circle", color
corresponds to the circle color (mapbox.layer.paint.circle-
color) If `type` is "line", color corresponds to the line color
(mapbox.layer.paint.line-color) If `type` is "fill", color
corresponds to the fill color (mapbox.layer.paint.fill-color)
If `type` is "symbol", color corresponds to the icon color
(mapbox.layer.paint.icon-color)
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def coordinates(self):
"""
Sets the coordinates array contains [longitude, latitude] pairs
for the image corners listed in clockwise order: top left, top
right, bottom right, bottom left. Only has an effect for
"image" `sourcetype`.
The 'coordinates' property accepts values of any type
Returns
-------
Any
"""
return self["coordinates"]
@coordinates.setter
def coordinates(self, val):
self["coordinates"] = val
@property
def fill(self):
"""
The 'fill' property is an instance of Fill
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.mapbox.layer.Fill`
- A dict of string/value properties that will be passed
to the Fill constructor
Returns
-------
plotly.graph_objs.layout.mapbox.layer.Fill
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.mapbox.layer.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.layout.mapbox.layer.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def maxzoom(self):
"""
Sets the maximum zoom level (mapbox.layer.maxzoom). At zoom
levels equal to or greater than the maxzoom, the layer will be
hidden.
The 'maxzoom' property is a number and may be specified as:
- An int or float in the interval [0, 24]
Returns
-------
int|float
"""
return self["maxzoom"]
@maxzoom.setter
def maxzoom(self, val):
self["maxzoom"] = val
@property
def minzoom(self):
"""
Sets the minimum zoom level (mapbox.layer.minzoom). At zoom
levels less than the minzoom, the layer will be hidden.
The 'minzoom' property is a number and may be specified as:
- An int or float in the interval [0, 24]
Returns
-------
int|float
"""
return self["minzoom"]
@minzoom.setter
def minzoom(self, val):
self["minzoom"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def opacity(self):
"""
Sets the opacity of the layer. If `type` is "circle", opacity
corresponds to the circle opacity (mapbox.layer.paint.circle-
opacity) If `type` is "line", opacity corresponds to the line
opacity (mapbox.layer.paint.line-opacity) If `type` is "fill",
opacity corresponds to the fill opacity
(mapbox.layer.paint.fill-opacity) If `type` is "symbol",
opacity corresponds to the icon/text opacity
(mapbox.layer.paint.text-opacity)
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def source(self):
"""
Sets the source data for this layer (mapbox.layer.source). When
`sourcetype` is set to "geojson", `source` can be a URL to a
GeoJSON or a GeoJSON object. When `sourcetype` is set to
"vector" or "raster", `source` can be a URL or an array of tile
URLs. When `sourcetype` is set to "image", `source` can be a
URL to an image.
The 'source' property accepts values of any type
Returns
-------
Any
"""
return self["source"]
@source.setter
def source(self, val):
self["source"] = val
@property
def sourceattribution(self):
"""
Sets the attribution for this source.
The 'sourceattribution' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["sourceattribution"]
@sourceattribution.setter
def sourceattribution(self, val):
self["sourceattribution"] = val
@property
def sourcelayer(self):
"""
Specifies the layer to use from a vector tile source
(mapbox.layer.source-layer). Required for "vector" source type
that supports multiple layers.
The 'sourcelayer' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["sourcelayer"]
@sourcelayer.setter
def sourcelayer(self, val):
self["sourcelayer"] = val
@property
def sourcetype(self):
"""
Sets the source type for this layer, that is the type of the
layer data.
The 'sourcetype' property is an enumeration that may be specified as:
- One of the following enumeration values:
['geojson', 'vector', 'raster', 'image']
Returns
-------
Any
"""
return self["sourcetype"]
@sourcetype.setter
def sourcetype(self, val):
self["sourcetype"] = val
@property
def symbol(self):
"""
The 'symbol' property is an instance of Symbol
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.mapbox.layer.Symbol`
- A dict of string/value properties that will be passed
to the Symbol constructor
Returns
-------
plotly.graph_objs.layout.mapbox.layer.Symbol
"""
return self["symbol"]
@symbol.setter
def symbol(self, val):
self["symbol"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def type(self):
"""
Sets the layer type, that is the how the layer data set in
`source` will be rendered With `sourcetype` set to "geojson",
the following values are allowed: "circle", "line", "fill" and
"symbol". but note that "line" and "fill" are not compatible
with Point GeoJSON geometries. With `sourcetype` set to
"vector", the following values are allowed: "circle", "line",
"fill" and "symbol". With `sourcetype` set to "raster" or
"image", only the "raster" value is allowed.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['circle', 'line', 'fill', 'symbol', 'raster']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
@property
def visible(self):
"""
Determines whether this layer is displayed
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def _prop_descriptions(self):
return """\
below
Determines if the layer will be inserted before the
layer with the specified ID. If omitted or set to '',
the layer will be inserted above every existing layer.
circle
:class:`plotly.graph_objects.layout.mapbox.layer.Circle
` instance or dict with compatible properties
color
Sets the primary layer color. If `type` is "circle",
color corresponds to the circle color
(mapbox.layer.paint.circle-color) If `type` is "line",
color corresponds to the line color
(mapbox.layer.paint.line-color) If `type` is "fill",
color corresponds to the fill color
(mapbox.layer.paint.fill-color) If `type` is "symbol",
color corresponds to the icon color
(mapbox.layer.paint.icon-color)
coordinates
Sets the coordinates array contains [longitude,
latitude] pairs for the image corners listed in
clockwise order: top left, top right, bottom right,
bottom left. Only has an effect for "image"
`sourcetype`.
fill
:class:`plotly.graph_objects.layout.mapbox.layer.Fill`
instance or dict with compatible properties
line
:class:`plotly.graph_objects.layout.mapbox.layer.Line`
instance or dict with compatible properties
maxzoom
Sets the maximum zoom level (mapbox.layer.maxzoom). At
zoom levels equal to or greater than the maxzoom, the
layer will be hidden.
minzoom
Sets the minimum zoom level (mapbox.layer.minzoom). At
zoom levels less than the minzoom, the layer will be
hidden.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the layer. If `type` is "circle",
opacity corresponds to the circle opacity
(mapbox.layer.paint.circle-opacity) If `type` is
"line", opacity corresponds to the line opacity
(mapbox.layer.paint.line-opacity) If `type` is "fill",
opacity corresponds to the fill opacity
(mapbox.layer.paint.fill-opacity) If `type` is
"symbol", opacity corresponds to the icon/text opacity
(mapbox.layer.paint.text-opacity)
source
Sets the source data for this layer
(mapbox.layer.source). When `sourcetype` is set to
"geojson", `source` can be a URL to a GeoJSON or a
GeoJSON object. When `sourcetype` is set to "vector" or
"raster", `source` can be a URL or an array of tile
URLs. When `sourcetype` is set to "image", `source` can
be a URL to an image.
sourceattribution
Sets the attribution for this source.
sourcelayer
Specifies the layer to use from a vector tile source
(mapbox.layer.source-layer). Required for "vector"
source type that supports multiple layers.
sourcetype
Sets the source type for this layer, that is the type
of the layer data.
symbol
:class:`plotly.graph_objects.layout.mapbox.layer.Symbol
` instance or dict with compatible properties
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
type
Sets the layer type, that is the how the layer data set
in `source` will be rendered With `sourcetype` set to
"geojson", the following values are allowed: "circle",
"line", "fill" and "symbol". but note that "line" and
"fill" are not compatible with Point GeoJSON
geometries. With `sourcetype` set to "vector", the
following values are allowed: "circle", "line", "fill"
and "symbol". With `sourcetype` set to "raster" or
"image", only the "raster" value is allowed.
visible
Determines whether this layer is displayed
"""
def __init__(
self,
arg=None,
below=None,
circle=None,
color=None,
coordinates=None,
fill=None,
line=None,
maxzoom=None,
minzoom=None,
name=None,
opacity=None,
source=None,
sourceattribution=None,
sourcelayer=None,
sourcetype=None,
symbol=None,
templateitemname=None,
type=None,
visible=None,
**kwargs,
):
"""
Construct a new Layer object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.mapbox.Layer`
below
Determines if the layer will be inserted before the
layer with the specified ID. If omitted or set to '',
the layer will be inserted above every existing layer.
circle
:class:`plotly.graph_objects.layout.mapbox.layer.Circle
` instance or dict with compatible properties
color
Sets the primary layer color. If `type` is "circle",
color corresponds to the circle color
(mapbox.layer.paint.circle-color) If `type` is "line",
color corresponds to the line color
(mapbox.layer.paint.line-color) If `type` is "fill",
color corresponds to the fill color
(mapbox.layer.paint.fill-color) If `type` is "symbol",
color corresponds to the icon color
(mapbox.layer.paint.icon-color)
coordinates
Sets the coordinates array contains [longitude,
latitude] pairs for the image corners listed in
clockwise order: top left, top right, bottom right,
bottom left. Only has an effect for "image"
`sourcetype`.
fill
:class:`plotly.graph_objects.layout.mapbox.layer.Fill`
instance or dict with compatible properties
line
:class:`plotly.graph_objects.layout.mapbox.layer.Line`
instance or dict with compatible properties
maxzoom
Sets the maximum zoom level (mapbox.layer.maxzoom). At
zoom levels equal to or greater than the maxzoom, the
layer will be hidden.
minzoom
Sets the minimum zoom level (mapbox.layer.minzoom). At
zoom levels less than the minzoom, the layer will be
hidden.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the layer. If `type` is "circle",
opacity corresponds to the circle opacity
(mapbox.layer.paint.circle-opacity) If `type` is
"line", opacity corresponds to the line opacity
(mapbox.layer.paint.line-opacity) If `type` is "fill",
opacity corresponds to the fill opacity
(mapbox.layer.paint.fill-opacity) If `type` is
"symbol", opacity corresponds to the icon/text opacity
(mapbox.layer.paint.text-opacity)
source
Sets the source data for this layer
(mapbox.layer.source). When `sourcetype` is set to
"geojson", `source` can be a URL to a GeoJSON or a
GeoJSON object. When `sourcetype` is set to "vector" or
"raster", `source` can be a URL or an array of tile
URLs. When `sourcetype` is set to "image", `source` can
be a URL to an image.
sourceattribution
Sets the attribution for this source.
sourcelayer
Specifies the layer to use from a vector tile source
(mapbox.layer.source-layer). Required for "vector"
source type that supports multiple layers.
sourcetype
Sets the source type for this layer, that is the type
of the layer data.
symbol
:class:`plotly.graph_objects.layout.mapbox.layer.Symbol
` instance or dict with compatible properties
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
type
Sets the layer type, that is the how the layer data set
in `source` will be rendered With `sourcetype` set to
"geojson", the following values are allowed: "circle",
"line", "fill" and "symbol". but note that "line" and
"fill" are not compatible with Point GeoJSON
geometries. With `sourcetype` set to "vector", the
following values are allowed: "circle", "line", "fill"
and "symbol". With `sourcetype` set to "raster" or
"image", only the "raster" value is allowed.
visible
Determines whether this layer is displayed
Returns
-------
Layer
"""
super().__init__("layers")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.mapbox.Layer
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.mapbox.Layer`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("below", arg, below)
self._set_property("circle", arg, circle)
self._set_property("color", arg, color)
self._set_property("coordinates", arg, coordinates)
self._set_property("fill", arg, fill)
self._set_property("line", arg, line)
self._set_property("maxzoom", arg, maxzoom)
self._set_property("minzoom", arg, minzoom)
self._set_property("name", arg, name)
self._set_property("opacity", arg, opacity)
self._set_property("source", arg, source)
self._set_property("sourceattribution", arg, sourceattribution)
self._set_property("sourcelayer", arg, sourcelayer)
self._set_property("sourcetype", arg, sourcetype)
self._set_property("symbol", arg, symbol)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("type", arg, type)
self._set_property("visible", arg, visible)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Layer |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 5931,
"end": 6154
} | class ____(_RequestFrame, frozen=True):
id: int
"""
The id of the request this is a response to
"""
body: dict[str, Any] | None = None
error: dict[str, Any] | None = None
@attrs.define()
| _ResponseFrame |
python | RaRe-Technologies__gensim | gensim/models/bm25model.py | {
"start": 5062,
"end": 9637
} | class ____(BM25ABC):
"""The original Okapi BM25 scoring function of Robertson et al. [2]_.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>> from gensim.models import OkapiBM25Model
>>> from gensim.test.utils import common_texts
>>>
>>> dictionary = Dictionary(common_texts) # fit dictionary
>>> model = OkapiBM25Model(dictionary=dictionary) # fit model
>>>
>>> corpus = [dictionary.doc2bow(line) for line in common_texts] # convert corpus to BoW format
>>> vector = model[corpus[0]] # apply model to the first corpus document
References
----------
.. [2] Robertson S. E., Walker S., Jones S., Hancock-Beaulieu M. M., Gatford M. (1995).
`Okapi at TREC-3 <http://research.microsoft.com/pubs/67649/okapi_trec3.pdf>`_.
*NIST Special Publication 500-226*.
"""
def __init__(self, corpus=None, dictionary=None, k1=1.5, b=0.75, epsilon=0.25):
r"""Pre-compute the average length of a document and inverse term document frequencies,
which will be used to weight term frequencies for the documents.
Parameters
----------
corpus : iterable of iterable of (int, int) or None, optional
An input corpus, which will be used to compute the average length of a document and
inverse term document frequencies. If None, then `dictionary` will be used to compute
the statistics. If both `corpus` and `dictionary` are None, the statistics will be left
unintialized. Default is None.
dictionary : :class:`~gensim.corpora.Dictionary`
An input dictionary, which will be used to compute the average length of a document and
inverse term document frequencies. If None, then `corpus` will be used to compute the
statistics. If both `corpus` and `dictionary` are None, the statistics will be left
unintialized. Default is None.
k1 : float
A positive tuning parameter that determines the impact of the term frequency on its BM25
weight. Singhal [5]_ suggests to set `k1` between 1.0 and 2.0. Default is 1.5.
b : float
A tuning parameter between 0.0 and 1.0 that determines the document length
normalization: 1.0 corresponds to full document normalization, while 0.0 corresponds to
no length normalization. Singhal [5]_ suggests to set `b` to 0.75, which is the default.
epsilon : float
A positive tuning parameter that lower-bounds an inverse document frequency.
Defaults to 0.25.
Attributes
----------
k1 : float
A positive tuning parameter that determines the impact of the term frequency on its BM25
weight. Singhal [3]_ suggests to set `k1` between 1.0 and 2.0. Default is 1.5.
b : float
A tuning parameter between 0.0 and 1.0 that determines the document length
normalization: 1.0 corresponds to full document normalization, while 0.0 corresponds to
no length normalization. Singhal [3]_ suggests to set `b` to 0.75, which is the default.
epsilon : float
A positive tuning parameter that lower-bounds an inverse document frequency.
Defaults to 0.25.
References
----------
.. [3] Singhal, A. (2001). `Modern information retrieval: A brief overview
<http://singhal.info/ieee2001.pdf>`_. *IEEE Data Eng. Bull.*, 24(4), 35–43.
"""
self.k1, self.b, self.epsilon = k1, b, epsilon
super().__init__(corpus, dictionary)
def precompute_idfs(self, dfs, num_docs):
idf_sum = 0
idfs = dict()
negative_idfs = []
for term_id, freq in dfs.items():
idf = math.log(num_docs - freq + 0.5) - math.log(freq + 0.5)
idfs[term_id] = idf
idf_sum += idf
if idf < 0:
negative_idfs.append(term_id)
average_idf = idf_sum / len(idfs)
eps = self.epsilon * average_idf
for term_id in negative_idfs:
idfs[term_id] = eps
return idfs
def get_term_weights(self, num_tokens, term_frequencies, idfs):
term_weights = idfs * (term_frequencies * (self.k1 + 1)
/ (term_frequencies + self.k1 * (1 - self.b + self.b
* num_tokens / self.avgdl)))
return term_weights
| OkapiBM25Model |
python | huggingface__transformers | tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py | {
"start": 6563,
"end": 9398
} | class ____(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `Phi4Multimodal`.
"""
all_model_classes = (Phi4MultimodalForCausalLM, Phi4MultimodalModel) if is_torch_available() else ()
_is_composite = True
def setUp(self):
self.model_tester = Phi4MultimodalModelTester(self)
self.config_tester = ConfigTester(self, config_class=Phi4MultimodalConfig)
@unittest.skip(reason="Depending on input modalities, some params may not have gradients")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="Depending on input modalities, some params may not have gradients")
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(reason="Depending on input modalities, some params may not have gradients")
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="Test tries to instantiate dynamic cache with an arg")
def test_multi_gpu_data_parallel_forward(self):
pass
@unittest.skip(reason="Test is only for old attention format")
def test_sdpa_can_dispatch_composite_models(self):
pass
@unittest.skip(reason="Static cache supported only for text-only inputs (not images or audios)")
def test_generate_from_inputs_embeds_with_static_cache(self):
pass
@unittest.skip(reason="Static cache supported only for text-only inputs (not images or audios)")
def test_generate_with_static_cache(self):
pass
@unittest.skip(
reason="Supported only for text-only inputs (otherwise dynamic control flows for multimodal inputs)"
)
def test_generate_compilation_all_outputs(self):
pass
@unittest.skip(
reason="Supported only for text-only inputs (otherwise dynamic control flows for multimodal inputs)"
)
@pytest.mark.torch_compile_test
def test_generate_compile_model_forward_fullgraph(self):
pass
@parameterized.expand([("random",), ("same",)])
@unittest.skip(reason="`image_attention_mask` has a specific shape")
def test_assisted_decoding_matches_greedy_search(self, assistant_type):
pass
@unittest.skip(reason="`image_attention_mask` has a specific shape")
def test_assisted_decoding_sample(self):
pass
@unittest.skip(reason="`image_attention_mask` has a specific shape")
def test_prompt_lookup_decoding_matches_greedy_search(self):
pass
@unittest.skip(reason="Cannot unpad inputs for all modalities so easily")
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip(reason="Dynamo error")
def test_flex_attention_with_grads(self):
pass
@require_torch
@slow
| Phi4MultimodalModelTest |
python | doocs__leetcode | solution/1800-1899/1807.Evaluate the Bracket Pairs of a String/Solution.py | {
"start": 0,
"end": 427
} | class ____:
def evaluate(self, s: str, knowledge: List[List[str]]) -> str:
d = {a: b for a, b in knowledge}
i, n = 0, len(s)
ans = []
while i < n:
if s[i] == '(':
j = s.find(')', i + 1)
ans.append(d.get(s[i + 1 : j], '?'))
i = j
else:
ans.append(s[i])
i += 1
return ''.join(ans)
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classes1.py | {
"start": 644,
"end": 699
} | class ____(E, metaclass=type, metaclass=type):
pass
| H |
python | PrefectHQ__prefect | tests/server/models/test_flows.py | {
"start": 10151,
"end": 10825
} | class ____:
async def test_delete_flow(self, session):
# create a flow to delete
flow = await models.flows.create_flow(
session=session, flow=schemas.core.Flow(name="my-flow")
)
assert flow.name == "my-flow"
assert await models.flows.delete_flow(session=session, flow_id=flow.id)
# make sure the flow is deleted
assert (await models.flows.read_flow(session=session, flow_id=flow.id)) is None
async def test_delete_flow_returns_false_if_does_not_exist(self, session):
result = await models.flows.delete_flow(session=session, flow_id=str(uuid4()))
assert result is False
| TestDeleteFlow |
python | milvus-io__pymilvus | tests/test_search_iterator.py | {
"start": 310,
"end": 6563
} | class ____:
@pytest.fixture
def mock_connection(self):
connection = Mock()
connection.describe_collection.return_value = {"collection_id": "test_id"}
return connection
@pytest.fixture
def search_data(self):
rng = np.random.default_rng(seed=19530)
return rng.random((1, 8)).tolist()
def create_mock_search_result(self, num_results=10):
# Create mock search results
mock_ids = schema_pb2.IDs(
int_id=schema_pb2.LongArray(data=list(range(num_results)))
)
result = schema_pb2.SearchResultData(
num_queries=1,
top_k=num_results,
scores=[1.0 * i for i in range(num_results)],
ids=mock_ids,
topks=[num_results],
)
# Create mock iterator info
result.search_iterator_v2_results.token = "test_token"
result.search_iterator_v2_results.last_bound = 0.5
return SearchResult(result)
def test_init_basic(self, mock_connection, search_data):
iterator = SearchIteratorV2(
connection=mock_connection,
collection_name="test_collection",
data=search_data,
batch_size=100
)
assert iterator._batch_size == 100
assert iterator._left_res_cnt is None
assert iterator._collection_id == "test_id"
def test_init_with_limit(self, mock_connection, search_data):
iterator = SearchIteratorV2(
connection=mock_connection,
collection_name="test_collection",
data=search_data,
batch_size=100,
limit=50
)
assert iterator._left_res_cnt == 50
def test_invalid_batch_size(self, mock_connection, search_data):
with pytest.raises(ParamError):
SearchIteratorV2(
connection=mock_connection,
collection_name="test_collection",
data=search_data,
batch_size=-1
)
def test_invalid_offset(self, mock_connection, search_data):
with pytest.raises(ParamError):
SearchIteratorV2(
connection=mock_connection,
collection_name="test_collection",
data=search_data,
batch_size=100,
offset=10
)
def test_multiple_vectors_error(self, mock_connection):
with pytest.raises(ParamError):
SearchIteratorV2(
connection=mock_connection,
collection_name="test_collection",
data=[[1, 2], [3, 4]], # Multiple vectors
batch_size=100
)
@patch('pymilvus.client.search_iterator.SearchIteratorV2._probe_for_compability')
def test_next_without_external_filter(self, mock_probe, mock_connection, search_data):
mock_connection.search.return_value = self.create_mock_search_result()
iterator = SearchIteratorV2(
connection=mock_connection,
collection_name="test_collection",
data=search_data,
batch_size=100
)
result = iterator.next()
assert result is not None
assert len(result) == 10 # Number of results from mock
@patch('pymilvus.client.search_iterator.SearchIteratorV2._probe_for_compability')
def test_next_with_limit(self, mock_probe, mock_connection, search_data):
mock_connection.search.return_value = self.create_mock_search_result()
iterator = SearchIteratorV2(
connection=mock_connection,
collection_name="test_collection",
data=search_data,
batch_size=100,
limit=5
)
result = iterator.next()
assert result is not None
assert len(result) == 5 # Limited to 5 results
def test_server_incompatible(self, mock_connection, search_data):
# Mock search result with empty token
mock_result = self.create_mock_search_result()
mock_result._search_iterator_v2_results.token = ""
mock_connection.search.return_value = mock_result
with pytest.raises(ServerVersionIncompatibleException):
SearchIteratorV2(
connection=mock_connection,
collection_name="test_collection",
data=search_data,
batch_size=100
)
@patch('pymilvus.client.search_iterator.SearchIteratorV2._probe_for_compability')
def test_external_filter(self, mock_probe, mock_connection, search_data):
mock_connection.search.return_value = self.create_mock_search_result()
def filter_func(hits):
return [hit for hit in hits if hit["distance"] < 5.0]
iterator = SearchIteratorV2(
connection=mock_connection,
collection_name="test_collection",
data=search_data,
batch_size=100,
external_filter_func=filter_func
)
result = iterator.next()
assert result is not None
assert all(hit["distance"] < 5.0 for hit in result)
@patch('pymilvus.client.search_iterator.SearchIteratorV2._probe_for_compability')
def test_filter_and_external_filter(self, mock_probe, mock_connection, search_data):
# Create mock search result with field values
mock_result = self.create_mock_search_result()
for hit in mock_result[0]:
hit["entity"]["field_1"] = hit["id"] % 2
mock_result[0] = list(filter(lambda x: x["entity"]["field_1"] < 5, mock_result[0]))
mock_connection.search.return_value = mock_result
expr_filter = "field_1 < 5"
def filter_func(hits):
return [hit for hit in hits if hit["distance"] < 5.0] # Only hits with distance < 5.0 should pass
iterator = SearchIteratorV2(
connection=mock_connection,
collection_name="test_collection",
data=search_data,
batch_size=100,
filter=expr_filter,
external_filter_func=filter_func
)
result = iterator.next()
assert result is not None
assert all(hit["distance"] < 5.0 and hit["entity"]["field_1"] < 5 for hit in result)
| TestSearchIteratorV2 |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_direct_strategies.py | {
"start": 22399,
"end": 26038
} | class ____(enum.Enum):
a = 1
def requires_arg(value):
"""Similar to the enum.Enum.__call__ method."""
@given(st.data())
def test_builds_error_messages(data):
# If we call them directly, we get a simple TypeError in both cases
with pytest.raises(TypeError):
requires_arg()
with pytest.raises(TypeError):
AnEnum()
# But we have an improved error message if you try to build an Enum
assert issubclass(InvalidArgument, TypeError) # it's a valid substitution
with pytest.raises(TypeError): # which only applies to enums
data.draw(st.builds(requires_arg))
with pytest.raises(
InvalidArgument,
match=r".* try using sampled_from\(.+\) instead of builds\(.+\)",
):
data.draw(st.builds(AnEnum))
# and sampled_from() does in fact work
data.draw(st.sampled_from(AnEnum))
@pytest.mark.parametrize(
"strat_a,strat_b",
[
pytest.param(
st.integers(),
st.integers(0),
marks=pytest.mark.xfail(
# this is the exception raised by failed pytest.warns(),
# ref https://github.com/pytest-dev/pytest/issues/8928
raises=pytest.fail.Exception,
strict=True,
reason="constraints not checked",
),
),
(st.builds(int), st.builds(float)),
(st.none(), st.integers()),
(
st.composite(lambda draw: draw(st.none()))(),
st.composite(lambda draw: draw(st.integers()))(),
),
],
)
def test_incompatible_shared_strategies_warns(strat_a, strat_b):
shared_a = st.shared(strat_a, key="share")
shared_b = st.shared(strat_b, key="share")
@given(shared_a, shared_b)
@settings(max_examples=10, phases=[Phase.generate])
def test_it(a, b):
assert a == b
with pytest.warns(HypothesisWarning, match="Different strategies"):
test_it()
@st.composite
def _composite1(draw):
return draw(st.integers())
@st.composite
def _composite2(draw):
return draw(st.integers())
@pytest.mark.parametrize(
"strat_a,strat_b",
[
(st.floats(allow_nan=False), st.floats(allow_nan=False)),
(st.builds(float), st.builds(float)),
(_composite1(), _composite1()),
(
st.floats(allow_nan=False, allow_infinity=False),
st.floats(allow_nan=False, allow_infinity=0),
),
(_composite1(), _composite2()),
pytest.param(
st.integers().flatmap(st.just),
st.integers(),
marks=pytest.mark.xfail(
raises=HypothesisWarning,
strict=True,
reason="really different (but compatible)",
),
),
],
)
def test_compatible_shared_strategies_do_not_warn(strat_a, strat_b):
shared_a = st.shared(strat_a, key="share")
shared_b = st.shared(strat_b, key="share")
@given(shared_a, shared_b)
@settings(max_examples=10, phases=[Phase.generate])
def test_it(a, b):
assert a == b
with warnings.catch_warnings():
warnings.simplefilter("error", HypothesisWarning)
test_it()
def test_compatible_nested_shared_strategies_do_not_warn():
shared_a = st.shared(st.integers(), key="share")
shared_b = st.shared(st.integers(), key="share")
shared_c = st.shared(shared_a, key="nested_share")
shared_d = st.shared(shared_b, key="nested_share")
@given(shared_a, shared_b, shared_c, shared_d)
@settings(max_examples=10, phases=[Phase.generate])
def test_it(a, b, c, d):
assert a == b == c == d
test_it()
| AnEnum |
python | apache__airflow | airflow-core/src/airflow/executors/workloads.py | {
"start": 1726,
"end": 1863
} | class ____(BaseModel):
"""Schema for telling task which bundle to run with."""
name: str
version: str | None = None
| BundleInfo |
python | scipy__scipy | scipy/integrate/tests/test_integrate.py | {
"start": 652,
"end": 1834
} | class ____:
# Check integrate.odeint
def _do_problem(self, problem):
t = arange(0.0, problem.stop_t, 0.05)
# Basic case
z, infodict = odeint(problem.f, problem.z0, t, full_output=True)
assert_(problem.verify(z, t))
# Use tfirst=True
z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
full_output=True, tfirst=True)
assert_(problem.verify(z, t))
if hasattr(problem, 'jac'):
# Use Dfun
z, infodict = odeint(problem.f, problem.z0, t, Dfun=problem.jac,
full_output=True)
assert_(problem.verify(z, t))
# Use Dfun and tfirst=True
z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t,
Dfun=lambda t, y: problem.jac(y, t),
full_output=True, tfirst=True)
assert_(problem.verify(z, t))
def test_odeint(self):
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
self._do_problem(problem)
| TestOdeint |
python | readthedocs__readthedocs.org | readthedocs/api/v3/filters.py | {
"start": 2423,
"end": 2677
} | class ____(filters.FilterSet):
name = filters.CharFilter(field_name="name", lookup_expr="icontains")
class Meta:
model = RemoteOrganization
fields = [
"name",
"vcs_provider",
]
| RemoteOrganizationFilter |
python | getsentry__sentry | tests/sentry/search/events/builder/test_errors.py | {
"start": 532,
"end": 4558
} | class ____(TestCase):
def setUp(self) -> None:
self.projects = [self.project.id]
@pytest.mark.querybuilder
def test_simple_query(self) -> None:
query = ErrorsQueryBuilder(
dataset=Dataset.Events,
query="status:unresolved",
selected_columns=["count_unique(user)"],
params={
"project_id": self.projects,
},
offset=None,
limit=None,
config=QueryBuilderConfig(
skip_time_conditions=True,
),
).get_snql_query()
query.validate()
e_entity = Entity(Dataset.Events.value, alias=Dataset.Events.value)
g_entity = Entity("group_attributes", alias="ga")
assert query.query.match == Join([Relationship(e_entity, "attributes", g_entity)])
assert query.query.select == [
Function(
function="uniq",
parameters=[Column(name="tags[sentry:user]", entity=e_entity)],
alias="count_unique_user",
)
]
assert query.query.where == [
Condition(Column("group_status", entity=g_entity), Op.IN, [0]),
Condition(
Column("project_id", entity=Entity("events", alias="events")),
Op.IN,
self.projects,
),
Condition(
Column("project_id", entity=g_entity),
Op.IN,
self.projects,
),
]
def test_upsampled_count_legacy_discover_function(self) -> None:
"""Test that the legacy DiscoverFunction for upsampled_count() produces the correct aggregate expression"""
from sentry.search.events.fields import resolve_field
# Test the legacy path that goes through DiscoverFunction.aggregate
# This tests the aggregate field we fixed: ["toInt64(sum(ifNull(sample_weight, 1)))", None, None]
resolved = resolve_field("upsampled_count()")
# Should return a ResolvedFunction with the correct aggregate
assert resolved.aggregate is not None
assert len(resolved.aggregate) == 3
# Position 0: The full SNQL function expression matching the helper method
assert resolved.aggregate[0] == "toInt64(sum(ifNull(sample_weight, 1)))"
# Position 1: Column (None for upsampled_count as it uses a fixed column)
assert resolved.aggregate[1] is None
# Position 2: Alias
assert resolved.aggregate[2] == "upsampled_count"
def test_is_status_simple_query(self) -> None:
query = ErrorsQueryBuilder(
dataset=Dataset.Events,
query="is:unresolved",
selected_columns=["count_unique(user)"],
params={
"project_id": self.projects,
},
offset=None,
limit=None,
config=QueryBuilderConfig(
skip_time_conditions=True,
parser_config_overrides=PARSER_CONFIG_OVERRIDES,
),
).get_snql_query()
query.validate()
e_entity = Entity(Dataset.Events.value, alias=Dataset.Events.value)
g_entity = Entity("group_attributes", alias="ga")
assert query.query.match == Join([Relationship(e_entity, "attributes", g_entity)])
assert query.query.select == [
Function(
function="uniq",
parameters=[Column(name="tags[sentry:user]", entity=e_entity)],
alias="count_unique_user",
)
]
assert query.query.where == [
Condition(Column("group_status", entity=g_entity), Op.IN, [0]),
Condition(
Column("project_id", entity=Entity("events", alias="events")),
Op.IN,
self.projects,
),
Condition(
Column("project_id", entity=g_entity),
Op.IN,
self.projects,
),
]
| ErrorsQueryBuilderTest |
python | celery__celery | celery/loaders/base.py | {
"start": 734,
"end": 9147
} | class ____:
"""Base class for loaders.
Loaders handles,
* Reading celery client/worker configurations.
* What happens when a task starts?
See :meth:`on_task_init`.
* What happens when the worker starts?
See :meth:`on_worker_init`.
* What happens when the worker shuts down?
See :meth:`on_worker_shutdown`.
* What modules are imported to find tasks?
"""
builtin_modules = frozenset()
configured = False
override_backends = {}
worker_initialized = False
_conf = unconfigured
def __init__(self, app, **kwargs):
self.app = app
self.task_modules = set()
def now(self, utc=True):
if utc:
return datetime.now(timezone.utc)
return datetime.now()
def on_task_init(self, task_id, task):
"""Called before a task is executed."""
def on_process_cleanup(self):
"""Called after a task is executed."""
def on_worker_init(self):
"""Called when the worker (:program:`celery worker`) starts."""
def on_worker_shutdown(self):
"""Called when the worker (:program:`celery worker`) shuts down."""
def on_worker_process_init(self):
"""Called when a child process starts."""
def import_task_module(self, module):
self.task_modules.add(module)
return self.import_from_cwd(module)
def import_module(self, module, package=None):
return importlib.import_module(module, package=package)
def import_from_cwd(self, module, imp=None, package=None):
return import_from_cwd(
module,
self.import_module if imp is None else imp,
package=package,
)
def import_default_modules(self):
responses = signals.import_modules.send(sender=self.app)
# Prior to this point loggers are not yet set up properly, need to
# check responses manually and reraised exceptions if any, otherwise
# they'll be silenced, making it incredibly difficult to debug.
for _, response in responses:
if isinstance(response, Exception):
raise response
return [self.import_task_module(m) for m in self.default_modules]
def init_worker(self):
if not self.worker_initialized:
self.worker_initialized = True
self.import_default_modules()
self.on_worker_init()
def shutdown_worker(self):
self.on_worker_shutdown()
def init_worker_process(self):
self.on_worker_process_init()
def config_from_object(self, obj, silent=False):
if isinstance(obj, str):
try:
obj = self._smart_import(obj, imp=self.import_from_cwd)
except (ImportError, AttributeError):
if silent:
return False
raise
self._conf = force_mapping(obj)
if self._conf.get('override_backends') is not None:
self.override_backends = self._conf['override_backends']
return True
def _smart_import(self, path, imp=None):
imp = self.import_module if imp is None else imp
if ':' in path:
# Path includes attribute so can just jump
# here (e.g., ``os.path:abspath``).
return symbol_by_name(path, imp=imp)
# Not sure if path is just a module name or if it includes an
# attribute name (e.g., ``os.path``, vs, ``os.path.abspath``).
try:
return imp(path)
except ImportError:
# Not a module name, so try module + attribute.
return symbol_by_name(path, imp=imp)
def _import_config_module(self, name):
try:
self.find_module(name)
except NotAPackage as exc:
if name.endswith('.py'):
reraise(NotAPackage, NotAPackage(CONFIG_WITH_SUFFIX.format(
module=name, suggest=name[:-3])), sys.exc_info()[2])
raise NotAPackage(CONFIG_INVALID_NAME.format(module=name)) from exc
else:
return self.import_from_cwd(name)
def find_module(self, module):
return find_module(module)
def cmdline_config_parser(self, args, namespace='celery',
re_type=re.compile(r'\((\w+)\)'),
extra_types=None,
override_types=None):
extra_types = extra_types if extra_types else {'json': json.loads}
override_types = override_types if override_types else {
'tuple': 'json',
'list': 'json',
'dict': 'json'
}
from celery.app.defaults import NAMESPACES, Option
namespace = namespace and namespace.lower()
typemap = dict(Option.typemap, **extra_types)
def getarg(arg):
"""Parse single configuration from command-line."""
# ## find key/value
# ns.key=value|ns_key=value (case insensitive)
key, value = arg.split('=', 1)
key = key.lower().replace('.', '_')
# ## find name-space.
# .key=value|_key=value expands to default name-space.
if key[0] == '_':
ns, key = namespace, key[1:]
else:
# find name-space part of key
ns, key = key.split('_', 1)
ns_key = (ns and ns + '_' or '') + key
# (type)value makes cast to custom type.
cast = re_type.match(value)
if cast:
type_ = cast.groups()[0]
type_ = override_types.get(type_, type_)
value = value[len(cast.group()):]
value = typemap[type_](value)
else:
try:
value = NAMESPACES[ns.lower()][key].to_python(value)
except ValueError as exc:
# display key name in error message.
raise ValueError(f'{ns_key!r}: {exc}')
return ns_key, value
return dict(getarg(arg) for arg in args)
def read_configuration(self, env='CELERY_CONFIG_MODULE'):
try:
custom_config = os.environ[env]
except KeyError:
pass
else:
if custom_config:
usercfg = self._import_config_module(custom_config)
return DictAttribute(usercfg)
def autodiscover_tasks(self, packages, related_name='tasks'):
self.task_modules.update(
mod.__name__ for mod in autodiscover_tasks(packages or (),
related_name) if mod)
@cached_property
def default_modules(self):
return (
tuple(self.builtin_modules) +
tuple(maybe_list(self.app.conf.imports)) +
tuple(maybe_list(self.app.conf.include))
)
@property
def conf(self):
"""Loader configuration."""
if self._conf is unconfigured:
self._conf = self.read_configuration()
return self._conf
def autodiscover_tasks(packages, related_name='tasks'):
global _RACE_PROTECTION
if _RACE_PROTECTION:
return ()
_RACE_PROTECTION = True
try:
return [find_related_module(pkg, related_name) for pkg in packages]
finally:
_RACE_PROTECTION = False
def find_related_module(package, related_name):
"""Find module in package."""
# Django 1.7 allows for specifying a class name in INSTALLED_APPS.
# (Issue #2248).
try:
# Return package itself when no related_name.
module = importlib.import_module(package)
if not related_name and module:
return module
except ModuleNotFoundError:
# On import error, try to walk package up one level.
package, _, _ = package.rpartition('.')
if not package:
raise
module_name = f'{package}.{related_name}'
try:
# Try to find related_name under package.
return importlib.import_module(module_name)
except ModuleNotFoundError as e:
import_exc_name = getattr(e, 'name', None)
# If candidate does not exist, then return None.
if import_exc_name and module_name == import_exc_name:
return
# Otherwise, raise because error probably originated from a nested import.
raise e
| BaseLoader |
python | getsentry__sentry | src/sentry/sentry_apps/api/serializers/servicehook.py | {
"start": 143,
"end": 487
} | class ____(Serializer):
def serialize(self, obj, attrs, user, **kwargs):
return {
"id": obj.guid,
"url": obj.url,
"secret": obj.secret,
"status": obj.get_status_display(),
"events": sorted(obj.events),
"dateCreated": obj.date_added,
}
| ServiceHookSerializer |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/sensors/wasb.py | {
"start": 4326,
"end": 7397
} | class ____(BaseSensorOperator):
"""
Wait for blobs matching a prefix to arrive on Azure Blob Storage.
:param container_name: Name of the container.
:param prefix: Prefix of the blob.
:param wasb_conn_id: Reference to the wasb connection.
:param check_options: Optional keyword arguments that
`WasbHook.check_for_prefix()` takes.
:param public_read: whether an anonymous public read access should be used. Default is False
:param deferrable: Run operator in the deferrable mode.
"""
template_fields: Sequence[str] = ("container_name", "prefix")
def __init__(
self,
*,
container_name: str,
prefix: str,
wasb_conn_id: str = "wasb_default",
check_options: dict | None = None,
public_read: bool = False,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
if check_options is None:
check_options = {}
self.container_name = container_name
self.prefix = prefix
self.wasb_conn_id = wasb_conn_id
self.check_options = check_options
self.public_read = public_read
self.deferrable = deferrable
def poke(self, context: Context) -> bool:
self.log.info("Poking for prefix: %s in wasb://%s", self.prefix, self.container_name)
hook = WasbHook(wasb_conn_id=self.wasb_conn_id, public_read=self.public_read)
return hook.check_for_prefix(self.container_name, self.prefix, **self.check_options)
def execute(self, context: Context) -> None:
"""
Poll for state of the job run.
In deferrable mode, the polling is deferred to the triggerer. Otherwise
the sensor waits synchronously.
"""
if not self.deferrable:
super().execute(context=context)
else:
if not self.poke(context=context):
self.defer(
timeout=timedelta(seconds=self.timeout),
trigger=WasbPrefixSensorTrigger(
container_name=self.container_name,
prefix=self.prefix,
wasb_conn_id=self.wasb_conn_id,
check_options=self.check_options,
public_read=self.public_read,
poke_interval=self.poke_interval,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, str]) -> None:
"""
Return immediately - callback for when the trigger fires.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event:
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info(event["message"])
else:
raise AirflowException("Did not receive valid event from the triggerer")
| WasbPrefixSensor |
python | xlwings__xlwings | tests/test_range.py | {
"start": 30818,
"end": 31598
} | class ____(TestBase):
def test_cell_errors_default(self):
wb = xw.Book(Path(this_dir) / "cell_errors.xlsx")
sheet = wb.sheets[0]
for i in range(1, 8):
self.assertIsNone(sheet.range((i, 1)).value)
wb.close()
def test_cell_errors_str(self):
wb = xw.Book(Path(this_dir) / "cell_errors.xlsx")
sheet = wb.sheets[0]
# Single cells, since macOS has massive issues with ranges that contain cell
# errors, see #1028 and #1924
expected = ["#DIV/0!", "#N/A", "#NAME?", "#NULL!", "#NUM!", "#REF!", "#VALUE!"]
for i in range(1, 8):
self.assertEqual(
sheet.range((i, 1)).options(err_to_str=True).value, expected[i - 1]
)
wb.close()
| TestCellErrors |
python | pypa__twine | twine/exceptions.py | {
"start": 5166,
"end": 5298
} | class ____(TwineException):
"""Raised if we expected to use trusted publishing but couldn't."""
pass
| TrustedPublishingFailure |
python | ray-project__ray | rllib/env/tests/test_pettingzoo_env.py | {
"start": 762,
"end": 3904
} | class ____(unittest.TestCase):
def setUp(self) -> None:
ray.init()
def tearDown(self) -> None:
ray.shutdown()
def test_pettingzoo_pistonball_v6_policies_are_dict_env(self):
def env_creator(config):
env = pistonball_v6.env()
env = dtype_v0(env, dtype=float32)
env = color_reduction_v0(env, mode="R")
env = normalize_obs_v0(env)
# add a wrapper to convert the observation space to a 3d array
env = observation_lambda_v0(env, change_observation, change_obs_space)
# resize the observation space to 84x84 so that RLlib defauls CNN can
# process it
env = resize_v1(env, x_size=84, y_size=84, linear_interp=True)
return env
# Register env
register_env("pistonball", lambda config: PettingZooEnv(env_creator(config)))
config = (
PPOConfig()
.api_stack(
enable_env_runner_and_connector_v2=False,
enable_rl_module_and_learner=False,
)
.environment("pistonball", env_config={"local_ratio": 0.5})
.multi_agent(
# Set of policy IDs (by default, will use Algorithms's
# default policy class, the env's/agent's obs/act spaces and config={}).
policies={"av"},
# Map all agents to that policy.
policy_mapping_fn=lambda agent_id, episode, worker, **kwargs: "av",
)
.debugging(log_level="DEBUG")
.env_runners(
num_env_runners=1,
# Fragment length, collected at once from each worker
# and for each agent!
rollout_fragment_length=30,
)
# Training batch size -> Fragments are concatenated up to this point.
.training(train_batch_size=200)
)
algo = config.build()
algo.train()
algo.stop()
def test_pettingzoo_env(self):
register_env("simple_spread", lambda _: PettingZooEnv(simple_spread_v3.env()))
config = (
PPOConfig()
.api_stack(
enable_env_runner_and_connector_v2=False,
enable_rl_module_and_learner=False,
)
.environment("simple_spread")
.env_runners(num_env_runners=0, rollout_fragment_length=30)
.debugging(log_level="DEBUG")
.training(train_batch_size=200)
.multi_agent(
# Set of policy IDs (by default, will use Algorithm's
# default policy class, the env's/agent's obs/act spaces and config={}).
policies={"av"},
# Mapping function that always returns "av" as policy ID to use
# (for any agent).
policy_mapping_fn=lambda agent_id, episode, worker, **kwargs: "av",
)
)
algo = config.build()
algo.train()
algo.stop()
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestPettingZooEnv |
python | spack__spack | lib/spack/spack/cmd/info.py | {
"start": 1176,
"end": 4935
} | class ____:
"""Generic formatter for elements displayed by `spack info`.
Elements have four parts: name, values, when condition, and description. They can
be formatted two ways (shown here for variants):
Grouped by when (default)::
when +cuda
cuda_arch [none] none, 10, 100, 100a, 101,
101a, 11, 12, 120, 120a, 13
CUDA architecture
Or, by name (each name has a when nested under it)::
cuda_arch [none] none, 10, 100, 100a, 101,
101a, 11, 12, 120, 120a, 13
when +cuda
CUDA architecture
The values and description will be wrapped if needed. the name (and any additional info)
will not (so they should be kept short).
Subclasses are responsible for generating colorized text, but not wrapping,
indentation, or other formatting, for the name, values, and description.
"""
def format_name(self, element: Any) -> str:
return str(element)
def format_values(self, element: Any) -> str:
return ""
def format_description(self, element: Any) -> str:
return ""
def padder(str_list: Iterable, extra: int = 0) -> Callable:
"""Return a function to pad elements of a list."""
length = max(len(str(s)) for s in str_list) + extra
def pad(string: str) -> str:
string = str(string)
padding = max(0, length - len(string))
return string + (padding * " ")
return pad
def setup_parser(subparser: argparse.ArgumentParser) -> None:
subparser.add_argument(
"-a", "--all", action="store_true", default=False, help="output all package information"
)
by = subparser.add_mutually_exclusive_group()
by.add_argument(
"--by-name",
dest="by_name",
action="store_true",
default=True,
help="list variants, dependency, etc. in name order, then by when condition",
)
by.add_argument(
"--by-when",
dest="by_name",
action="store_false",
default=False,
help="group variants, dependencies, etc. first by when condition, then by name",
)
options = [
("--detectable", print_detectable.__doc__),
("--maintainers", print_maintainers.__doc__),
("--namespace", print_namespace.__doc__),
("--no-dependencies", f"do not {print_dependencies.__doc__}"),
("--no-variants", f"do not {print_variants.__doc__}"),
("--no-versions", f"do not {print_versions.__doc__}"),
("--phases", print_phases.__doc__),
("--tags", print_tags.__doc__),
("--tests", print_tests.__doc__),
("--virtuals", print_virtuals.__doc__),
]
for opt, help_comment in options:
subparser.add_argument(opt, action="store_true", help=help_comment)
# deprecated for the more generic --by-name, but still here until we can remove it
subparser.add_argument(
"--variants-by-name",
dest="by_name",
action=arguments.DeprecatedStoreTrueAction,
help=argparse.SUPPRESS,
removed_in="a future Spack release",
instructions="use --by-name instead",
)
arguments.add_common_arguments(subparser, ["spec"])
def section_title(s: str) -> str:
return header_color + s + plain_format
def version(s: str) -> str:
return spack.spec.VERSION_COLOR + s + plain_format
def format_deptype(depflag: int) -> str:
color_flags = zip("gcbm", dt.ALL_FLAGS)
return ", ".join(
color.colorize(f"@{c}{{{dt.flag_to_string(depflag & flag)}}}")
for c, flag in color_flags
if depflag & flag
)
| Formatter |
python | milvus-io__pymilvus | pymilvus/client/abstract.py | {
"start": 11160,
"end": 13651
} | class ____:
def __init__(self, raw: Any):
self._raw = raw
self._primary_keys = []
self._insert_cnt = 0
self._delete_cnt = 0
self._upsert_cnt = 0
self._timestamp = 0
self._succ_index = []
self._err_index = []
self._cost = 0
self._pack(raw)
@property
def primary_keys(self):
return self._primary_keys
@property
def insert_count(self):
return self._insert_cnt
@property
def delete_count(self):
return self._delete_cnt
@property
def upsert_count(self):
return self._upsert_cnt
@property
def timestamp(self):
return self._timestamp
@property
def succ_count(self):
return len(self._succ_index)
@property
def err_count(self):
return len(self._err_index)
@property
def succ_index(self):
return self._succ_index
@property
def err_index(self):
return self._err_index
# The unit of this cost is vcu, similar to token
@property
def cost(self):
return self._cost
def __str__(self):
if self.cost:
return (
f"(insert count: {self._insert_cnt}, delete count: {self._delete_cnt}, upsert count: {self._upsert_cnt}, "
f"timestamp: {self._timestamp}, success count: {self.succ_count}, err count: {self.err_count}, "
f"cost: {self._cost})"
)
return (
f"(insert count: {self._insert_cnt}, delete count: {self._delete_cnt}, upsert count: {self._upsert_cnt}, "
f"timestamp: {self._timestamp}, success count: {self.succ_count}, err count: {self.err_count}"
)
__repr__ = __str__
# TODO
# def error_code(self):
# pass
#
# def error_reason(self):
# pass
def _pack(self, raw: Any):
which = raw.IDs.WhichOneof("id_field")
if which == "int_id":
self._primary_keys = raw.IDs.int_id.data
elif which == "str_id":
self._primary_keys = raw.IDs.str_id.data
self._insert_cnt = raw.insert_cnt
self._delete_cnt = raw.delete_cnt
self._upsert_cnt = raw.upsert_cnt
self._timestamp = raw.timestamp
self._succ_index = raw.succ_index
self._err_index = raw.err_index
self._cost = int(
raw.status.extra_info["report_value"] if raw.status and raw.status.extra_info else "0"
)
| MutationResult |
python | getsentry__sentry | src/sentry/overwatch_webhooks/webhook_publisher.py | {
"start": 298,
"end": 1659
} | class ____:
_publisher_client: PublisherClient
_region: Region
_integration_provider: str
def __init__(self, integration_provider: str, region: Region):
self._integration_provider = integration_provider
self._region = region
def enqueue_webhook(self, webhook_details: WebhookDetails):
base_addr = self._get_request_address()
body = webhook_details.to_json()
requests.post(
f"{base_addr}/webhooks/sentry",
data=webhook_details.to_json(),
headers={
"content-type": "application/json;charset=utf-8",
"x-sentry-overwatch-signature": self._get_request_signature(body),
},
)
def _get_request_signature(self, body: str) -> str:
if not (webhook_secret := settings.OVERWATCH_WEBHOOK_SECRET):
raise ValueError("OVERWATCH_WEBHOOK_SECRET is not set")
return hmac.new(
key=webhook_secret.encode("utf-8"),
msg=body.encode("utf-8"),
digestmod=hashlib.sha256,
).hexdigest()
def _get_request_address(self) -> str:
addr = settings.OVERWATCH_REGION_URLS.get(self._region.name)
if not addr:
raise ValueError(f"Missing overwatch request address for region {self._region.name}")
return addr
| OverwatchWebhookPublisher |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 202928,
"end": 203030
} | class ____(
_NumMultiRangeTests, _MultiRangeTypeCompilation
):
pass
| NumMultiRangeCompilationTest |
python | pandas-dev__pandas | pandas/util/_doctools.py | {
"start": 214,
"end": 6911
} | class ____:
"""
Layout some DataFrames in vertical/horizontal layout for explanation.
Used in merging.rst
"""
def __init__(
self,
cell_width: float = 0.37,
cell_height: float = 0.25,
font_size: float = 7.5,
) -> None:
self.cell_width = cell_width
self.cell_height = cell_height
self.font_size = font_size
def _shape(self, df: pd.DataFrame) -> tuple[int, int]:
"""
Calculate table shape considering index levels.
"""
row, col = df.shape
return row + df.columns.nlevels, col + df.index.nlevels
def _get_cells(self, left, right, vertical) -> tuple[int, int]:
"""
Calculate appropriate figure size based on left and right data.
"""
if vertical:
# calculate required number of cells
vcells = max(sum(self._shape(df)[0] for df in left), self._shape(right)[0])
hcells = max(self._shape(df)[1] for df in left) + self._shape(right)[1]
else:
vcells = max([self._shape(df)[0] for df in left] + [self._shape(right)[0]])
hcells = sum([self._shape(df)[1] for df in left] + [self._shape(right)[1]])
return hcells, vcells
def plot(
self, left, right, labels: Iterable[str] = (), vertical: bool = True
) -> Figure:
"""
Plot left / right DataFrames in specified layout.
Parameters
----------
left : list of DataFrames before operation is applied
right : DataFrame of operation result
labels : list of str to be drawn as titles of left DataFrames
vertical : bool, default True
If True, use vertical layout. If False, use horizontal layout.
"""
from matplotlib import gridspec
import matplotlib.pyplot as plt
if not isinstance(left, list):
left = [left]
left = [self._conv(df) for df in left]
right = self._conv(right)
hcells, vcells = self._get_cells(left, right, vertical)
if vertical:
figsize = self.cell_width * hcells, self.cell_height * vcells
else:
# include margin for titles
figsize = self.cell_width * hcells, self.cell_height * vcells
fig = plt.figure(figsize=figsize)
if vertical:
gs = gridspec.GridSpec(len(left), hcells)
# left
max_left_cols = max(self._shape(df)[1] for df in left)
max_left_rows = max(self._shape(df)[0] for df in left)
for i, (_left, _label) in enumerate(zip(left, labels, strict=True)):
ax = fig.add_subplot(gs[i, 0:max_left_cols])
self._make_table(ax, _left, title=_label, height=1.0 / max_left_rows)
# right
ax = plt.subplot(gs[:, max_left_cols:])
self._make_table(ax, right, title="Result", height=1.05 / vcells)
fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95)
else:
max_rows = max(self._shape(df)[0] for df in left + [right])
height = 1.0 / np.max(max_rows)
gs = gridspec.GridSpec(1, hcells)
# left
i = 0
for df, _label in zip(left, labels, strict=True):
sp = self._shape(df)
ax = fig.add_subplot(gs[0, i : i + sp[1]])
self._make_table(ax, df, title=_label, height=height)
i += sp[1]
# right
ax = plt.subplot(gs[0, i:])
self._make_table(ax, right, title="Result", height=height)
fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95)
return fig
def _conv(self, data):
"""
Convert each input to appropriate for table outplot.
"""
if isinstance(data, pd.Series):
if data.name is None:
data = data.to_frame(name="")
else:
data = data.to_frame()
data = data.fillna("NaN")
return data
def _insert_index(self, data):
# insert is destructive
data = data.copy()
idx_nlevels = data.index.nlevels
if idx_nlevels == 1:
data.insert(0, "Index", data.index)
else:
for i in range(idx_nlevels):
data.insert(i, f"Index{i}", data.index._get_level_values(i))
col_nlevels = data.columns.nlevels
if col_nlevels > 1:
col = data.columns._get_level_values(0)
values = [
data.columns._get_level_values(i)._values for i in range(1, col_nlevels)
]
col_df = pd.DataFrame(values)
data.columns = col_df.columns
data = pd.concat([col_df, data])
data.columns = col
return data
def _make_table(self, ax, df, title: str, height: float | None = None) -> None:
if df is None:
ax.set_visible(False)
return
from pandas import plotting
idx_nlevels = df.index.nlevels
col_nlevels = df.columns.nlevels
# must be convert here to get index levels for colorization
df = self._insert_index(df)
tb = plotting.table(ax, df, loc=9)
tb.set_fontsize(self.font_size)
if height is None:
height = 1.0 / (len(df) + 1)
props = tb.properties()
for (r, c), cell in props["celld"].items():
if c == -1:
cell.set_visible(False)
elif r < col_nlevels and c < idx_nlevels:
cell.set_visible(False)
elif r < col_nlevels or c < idx_nlevels:
cell.set_facecolor("#AAAAAA")
cell.set_height(height)
ax.set_title(title, size=self.font_size)
ax.axis("off")
def main() -> None:
import matplotlib.pyplot as plt
p = TablePlotter()
df1 = pd.DataFrame({"A": [10, 11, 12], "B": [20, 21, 22], "C": [30, 31, 32]})
df2 = pd.DataFrame({"A": [10, 12], "C": [30, 32]})
p.plot([df1, df2], pd.concat([df1, df2]), labels=["df1", "df2"], vertical=True)
plt.show()
df3 = pd.DataFrame({"X": [10, 12], "Z": [30, 32]})
p.plot(
[df1, df3], pd.concat([df1, df3], axis=1), labels=["df1", "df2"], vertical=False
)
plt.show()
idx = pd.MultiIndex.from_tuples(
[(1, "A"), (1, "B"), (1, "C"), (2, "A"), (2, "B"), (2, "C")]
)
column = pd.MultiIndex.from_tuples([(1, "A"), (1, "B")])
df3 = pd.DataFrame({"v1": [1, 2, 3, 4, 5, 6], "v2": [5, 6, 7, 8, 9, 10]}, index=idx)
df3.columns = column
p.plot(df3, df3, labels=["df3"])
plt.show()
if __name__ == "__main__":
main()
| TablePlotter |
python | pandas-dev__pandas | pandas/tests/arithmetic/test_period.py | {
"start": 16206,
"end": 19409
} | class ____:
"""Test PeriodIndex and Period Series Ops consistency"""
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = Series(values)
result = func(s)
exp = Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
per = idx[2]
f = lambda x: x == per
exp = np.array([False, False, True, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: per == x
self._check(idx, f, exp)
f = lambda x: x != per
exp = np.array([True, True, False, True], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: per != x
self._check(idx, f, exp)
f = lambda x: per >= x
exp = np.array([True, True, True, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: x > per
exp = np.array([False, False, False, True], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: per >= x
exp = np.array([True, True, True, False], dtype=np.bool_)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
per = idx[2]
f = lambda x: x == per
exp = np.array([False, False, True, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: per == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != per
exp = np.array([True, True, False, True], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: per != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: per >= x
exp = np.array([True, False, True, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: x < per
exp = np.array([True, False, False, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool_)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
| TestPeriodIndexSeriesComparisonConsistency |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/event_log/base.py | {
"start": 5689,
"end": 6567
} | class ____(
LoadableBy[AssetCheckKey],
):
asset_check_key: AssetCheckKey
last_check_execution_record: Optional[AssetCheckExecutionRecord]
last_run_id: Optional[str]
last_completed_check_execution_record: Optional[AssetCheckExecutionRecord]
@classmethod
def _blocking_batch_load(
cls, keys: Iterable[AssetCheckKey], context: LoadingContext
) -> Iterable[Optional["AssetCheckSummaryRecord"]]:
records_by_key = context.instance.event_log_storage.get_asset_check_summary_records(
list(keys)
)
return [records_by_key[key] for key in keys]
@property
def last_completed_run_id(self) -> Optional[str]:
return (
self.last_completed_check_execution_record.run_id
if self.last_completed_check_execution_record
else None
)
@record
| AssetCheckSummaryRecord |
python | mlflow__mlflow | mlflow/server/graphql/autogenerated_graphql_schema.py | {
"start": 1943,
"end": 2791
} | class ____(graphene.ObjectType):
name = graphene.String()
version = graphene.String()
creation_timestamp = LongString()
last_updated_timestamp = LongString()
user_id = graphene.String()
current_stage = graphene.String()
description = graphene.String()
source = graphene.String()
run_id = graphene.String()
status = graphene.Field(MlflowModelVersionStatus)
status_message = graphene.String()
tags = graphene.List(graphene.NonNull(MlflowModelVersionTag))
run_link = graphene.String()
aliases = graphene.List(graphene.String)
model_id = graphene.String()
model_params = graphene.List(graphene.NonNull(MlflowModelParam))
model_metrics = graphene.List(graphene.NonNull(MlflowModelMetric))
deployment_job_state = graphene.Field(MlflowModelVersionDeploymentJobState)
| MlflowModelVersion |
python | getsentry__sentry | src/sentry/core/endpoints/team_members.py | {
"start": 1082,
"end": 1408
} | class ____(OrganizationMemberResponse):
# NOTE: We override users to be required b/c team members will always have
# an existing user to be part of a team.
user: UserSerializerResponse # type: ignore[misc]
teamRole: str | None
teamSlug: str
@register(OrganizationMemberTeam)
| OrganizationMemberOnTeamResponse |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/src/connectors_qa/models.py | {
"start": 861,
"end": 1009
} | class ____(Enum):
"""The status of a QA check"""
PASSED = "✅ Passed"
FAILED = "❌ Failed"
SKIPPED = "🔶 Skipped"
@dataclass
| CheckStatus |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassHash1.py | {
"start": 264,
"end": 332
} | class ____:
a: int
v2: Hashable = DC2(0)
@dataclass(eq=True)
| DC2 |
python | huggingface__transformers | src/transformers/models/resnet/modeling_resnet.py | {
"start": 1251,
"end": 1999
} | class ____(nn.Module):
def __init__(
self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, activation: str = "relu"
):
super().__init__()
self.convolution = nn.Conv2d(
in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, bias=False
)
self.normalization = nn.BatchNorm2d(out_channels)
self.activation = ACT2FN[activation] if activation is not None else nn.Identity()
def forward(self, input: Tensor) -> Tensor:
hidden_state = self.convolution(input)
hidden_state = self.normalization(hidden_state)
hidden_state = self.activation(hidden_state)
return hidden_state
| ResNetConvLayer |
python | apache__airflow | providers/teradata/tests/unit/teradata/operators/test_bteq.py | {
"start": 1073,
"end": 11281
} | class ____:
@mock.patch.object(BteqHook, "execute_bteq_script")
@mock.patch.object(BteqHook, "__init__", return_value=None)
def test_execute(self, mock_hook_init, mock_execute_bteq):
task_id = "test_bteq_operator"
sql = "SELECT * FROM my_table;"
teradata_conn_id = "teradata_default"
mock_context = {}
# Given
expected_result = "BTEQ execution result"
mock_execute_bteq.return_value = expected_result
operator = BteqOperator(
task_id=task_id,
sql=sql,
teradata_conn_id=teradata_conn_id,
)
# When
result = operator.execute(mock_context)
# Then
mock_hook_init.assert_called_once_with(teradata_conn_id=teradata_conn_id, ssh_conn_id=None)
mock_execute_bteq.assert_called_once_with(sql + "\n.EXIT", None, "", 600, None, "", None, "UTF-8")
assert result == "BTEQ execution result"
@mock.patch.object(BteqHook, "execute_bteq_script")
@mock.patch.object(BteqHook, "__init__", return_value=None)
def test_execute_sql_only(self, mock_hook_init, mock_execute_bteq):
# Arrange
task_id = "test_bteq_operator"
sql = "SELECT * FROM my_table;"
teradata_conn_id = "teradata_default"
mock_context = {}
expected_result = "BTEQ execution result"
mock_execute_bteq.return_value = expected_result
operator = BteqOperator(
task_id=task_id,
sql=sql,
teradata_conn_id=teradata_conn_id,
)
# Manually set _hook since we bypassed __init__
operator._hook = mock.MagicMock()
operator._hook.execute_bteq_script = mock_execute_bteq
# Act
result = operator.execute(mock_context)
# Assert
mock_hook_init.assert_called_once_with(teradata_conn_id=teradata_conn_id, ssh_conn_id=None)
mock_execute_bteq.assert_called_once_with(
sql + "\n.EXIT", # Assuming the prepare_bteq_script_for_local_execution appends ".EXIT"
None, # default remote_working_dir
"", # bteq_script_encoding (default ASCII => empty string)
600, # timeout default
None, # timeout_rc
"", # bteq_session_encoding
None, # bteq_quit_rc
"UTF-8",
)
assert result == expected_result
@mock.patch("airflow.providers.teradata.operators.bteq.BteqHook.execute_bteq_script")
@mock.patch("airflow.providers.teradata.operators.bteq.BteqHook.__init__", return_value=None)
def test_execute_sql_local(self, mock_hook_init, mock_execute_script):
sql = "SELECT * FROM test_table;"
expected_result = 0
mock_execute_script.return_value = expected_result
context = {}
op = BteqOperator(
task_id="test_local_sql",
sql=sql,
teradata_conn_id="td_conn",
)
op._hook = mock.Mock()
op._hook.execute_bteq_script = mock_execute_script
result = op.execute(context)
mock_hook_init.assert_called_once_with(teradata_conn_id="td_conn", ssh_conn_id=None)
mock_execute_script.assert_called_once()
assert result == expected_result
@mock.patch.object(BteqHook, "on_kill")
def test_on_kill(self, mock_on_kill):
task_id = "test_bteq_operator"
sql = "SELECT * FROM my_table;"
# Given
operator = BteqOperator(
task_id=task_id,
sql=sql,
)
operator._hook = BteqHook(None)
# When
operator.on_kill()
# Then
mock_on_kill.assert_called_once()
def test_on_kill_not_initialized(self):
task_id = "test_bteq_operator"
sql = "SELECT * FROM my_table;"
# Given
operator = BteqOperator(
task_id=task_id,
sql=sql,
)
operator._hook = None
# When/Then (no exception should be raised)
operator.on_kill()
def test_template_fields(self):
# Verify template fields are defined correctly
print(BteqOperator.template_fields)
assert BteqOperator.template_fields == "sql"
def test_execute_raises_if_no_sql_or_file(self):
op = BteqOperator(task_id="fail_case", teradata_conn_id="td_conn")
with pytest.raises(
ValueError,
match="Failed to execute BTEQ script due to missing required parameters: either 'sql' or 'file_path' must be provided.",
):
op.execute({})
@mock.patch("airflow.providers.teradata.operators.bteq.is_valid_file", return_value=False)
def test_invalid_file_path(self, mock_is_valid_file):
op = BteqOperator(
task_id="fail_invalid_file",
file_path="/invalid/path.sql",
teradata_conn_id="td_conn",
)
with pytest.raises(ValueError, match="Failed to execute BTEQ script due to invalid file path"):
op.execute({})
@mock.patch("airflow.providers.teradata.operators.bteq.is_valid_file", return_value=True)
@mock.patch(
"airflow.providers.teradata.operators.bteq.is_valid_encoding",
side_effect=UnicodeDecodeError("utf8", b"", 0, 1, "error"),
)
def test_file_encoding_error(self, mock_encoding, mock_valid_file):
op = BteqOperator(
task_id="encoding_fail",
file_path="/tmp/test.sql",
bteq_script_encoding="UTF-8",
teradata_conn_id="td_conn",
)
with pytest.raises(
ValueError,
match="Failed to execute BTEQ script because the provided file.*encoding differs from the specified BTEQ I/O encoding",
):
op.execute({})
@mock.patch("airflow.providers.teradata.operators.bteq.BteqHook.execute_bteq_script")
@mock.patch("airflow.providers.teradata.operators.bteq.is_valid_file", return_value=True)
@mock.patch("airflow.providers.teradata.operators.bteq.is_valid_encoding")
@mock.patch("airflow.providers.teradata.operators.bteq.read_file")
def test_execute_local_file(
self,
mock_read_file,
mock_valid_encoding,
mock_valid_file,
mock_execute_bteq_script,
):
mock_execute_bteq_script.return_value = 0
sql_content = "SELECT * FROM table_name;"
mock_read_file.return_value = sql_content
with tempfile.NamedTemporaryFile("w+", suffix=".sql", delete=False) as tmp_file:
tmp_file.write(sql_content)
tmp_file_path = tmp_file.name
op = BteqOperator(
task_id="test_bteq_local_file",
file_path=tmp_file_path,
teradata_conn_id="teradata_default",
)
result = op.execute(context={})
assert result == 0
mock_execute_bteq_script.assert_called_once()
def test_on_kill_calls_hook(self):
op = BteqOperator(task_id="kill_test", teradata_conn_id="td_conn")
op._hook = mock.Mock()
op.on_kill()
op._hook.on_kill.assert_called_once()
def test_on_kill_logs_if_no_hook(self):
op = BteqOperator(task_id="kill_no_hook", teradata_conn_id="td_conn")
op._hook = None
with mock.patch.object(op.log, "warning") as mock_log_info:
op.on_kill()
mock_log_info.assert_called_once_with("BteqHook was not initialized. Nothing to terminate.")
@mock.patch("airflow.providers.teradata.operators.bteq.BteqHook.execute_bteq_script")
@mock.patch("airflow.providers.teradata.operators.bteq.BteqHook.get_conn")
@mock.patch("airflow.providers.teradata.operators.bteq.SSHHook")
@mock.patch("airflow.providers.teradata.operators.bteq.BteqHook.__init__", return_value=None)
def test_remote_execution_with_sql(
self,
mock_bteq_hook_init,
mock_ssh_hook_class,
mock_get_conn,
mock_execute_bteq_script,
):
mock_execute_bteq_script.return_value = 0
mock_ssh_hook_instance = mock.Mock()
mock_ssh_hook_class.return_value = mock_ssh_hook_instance
op = BteqOperator(
task_id="test_remote_sql",
sql="SELECT * FROM customers;",
ssh_conn_id="ssh_default",
teradata_conn_id="teradata_default",
)
result = op.execute(context={})
mock_bteq_hook_init.assert_called_once_with(
teradata_conn_id="teradata_default", ssh_conn_id="ssh_default"
)
mock_execute_bteq_script.assert_called_once()
assert result == 0
@mock.patch("airflow.providers.common.compat.sdk.BaseOperator.render_template")
def test_render_template_in_sql(self, mock_render):
op = BteqOperator(task_id="render_test", sql="SELECT * FROM {{ params.table }};")
mock_render.return_value = "SELECT * FROM my_table;"
rendered_sql = op.render_template("sql", op.sql, context={"params": {"table": "my_table"}})
assert rendered_sql == "SELECT * FROM my_table;"
@mock.patch("airflow.providers.teradata.operators.bteq.BteqHook.execute_bteq_script", return_value=99)
@mock.patch("airflow.providers.teradata.operators.bteq.BteqHook.__init__", return_value=None)
def test_bteq_timeout_with_custom_rc(self, mock_hook_init, mock_exec):
op = BteqOperator(
task_id="timeout_case",
sql="SELECT 1",
teradata_conn_id="td_conn",
timeout=30,
timeout_rc=99,
bteq_quit_rc=[99],
)
result = op.execute({})
assert result == 99
mock_exec.assert_called_once()
@mock.patch("airflow.providers.teradata.operators.bteq.BteqHook.execute_bteq_script", return_value=42)
@mock.patch("airflow.providers.teradata.operators.bteq.BteqHook.__init__", return_value=None)
def test_bteq_return_code_not_in_quit_rc(self, mock_hook_init, mock_exec):
op = BteqOperator(
task_id="rc_not_allowed", sql="SELECT 1", teradata_conn_id="td_conn", bteq_quit_rc=[0, 1]
)
result = op.execute({})
assert result == 42 # still returns, but caller can fail on RC if desired
if __name__ == "__main__":
unittest.main()
| TestBteqOperator |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/callbackProtocol2.py | {
"start": 394,
"end": 504
} | class ____:
def __call__(self, inputs) -> int:
return 5
g1: MyCallable[int, int] = Class1()
| Class1 |
python | huggingface__transformers | src/transformers/models/instructblip/modeling_instructblip.py | {
"start": 37480,
"end": 45987
} | class ____(InstructBlipPreTrainedModel):
main_input_name = "pixel_values"
_keep_in_fp32_modules = ["query_tokens"] # TODO @ArthurZucker I don't know why this is required for FP8
def __init__(self, config: InstructBlipConfig):
super().__init__(config)
self.vision_model = InstructBlipVisionModel(config.vision_config)
self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
self.qformer = InstructBlipQFormerModel(config.qformer_config)
self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size)
self.language_model = AutoModel.from_config(config.text_config)
if self.language_model._no_split_modules is not None:
self._no_split_modules.extend(self.language_model._no_split_modules)
if self.language_model._keep_in_fp32_modules is not None:
self._keep_in_fp32_modules.extend(self.language_model._keep_in_fp32_modules)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def _preprocess_accelerate(self):
r"""
Some pre-processing hacks to make the model `accelerate` compatible. Check
https://github.com/huggingface/transformers/pull/21707 for more details.
"""
hf_device_map = self.hf_device_map
if len(hf_device_map) > 1 and "language_model" not in hf_device_map and torch.cuda.device_count() > 1:
# warn users about unexpected behavior when using multi-GPU + InstructBLIP + `accelerate`.
logger.warning(
"The `language_model` is not in the `hf_device_map` dictionary and you are running your script"
" in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`."
" Please pass a `device_map` that contains `language_model` to remove this warning."
" Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for"
" more details on creating a `device_map` for large models.",
)
if hasattr(self.language_model, "_hf_hook"):
self.language_model._hf_hook.io_same_device = True # For `generate` compatibility
def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
return special_image_mask
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
qformer_input_ids: torch.FloatTensor,
qformer_attention_mask: Optional[torch.LongTensor] = None,
input_ids: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
interpolate_pos_encoding: bool = False,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Union[tuple, InstructBlipForConditionalGenerationModelOutput]:
r"""
qformer_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of input sequence tokens in the vocabulary of the Q-Former. Input tokens can optionally be provided
to serve as text prompt, which the Q-Former model will encode.
Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for
details.
[What are input IDs?](../glossary#input-ids)
qformer_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
Only relevant in case an encoder-decoder language model (like T5) is used.
"""
# step 1: forward the images through the vision encoder,
# to get image embeddings of shape (batch_size, seq_len, hidden_size)
vision_outputs = self.vision_model(
pixel_values=pixel_values,
interpolate_pos_encoding=interpolate_pos_encoding,
**kwargs,
)
image_embeds = vision_outputs[0]
# step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
# difference with BLIP-2 here: we also feed the instruction prompt to the Q-Former
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)
if qformer_attention_mask is None:
qformer_attention_mask = torch.ones_like(qformer_input_ids)
qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)
query_outputs = self.qformer(
input_ids=qformer_input_ids,
attention_mask=qformer_attention_mask,
query_embeds=query_tokens,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_attention_mask,
**kwargs,
)
query_output = query_outputs[0][:, : query_tokens.size(1), :]
if inputs_embeds is None:
inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
# step 3: use the language model, conditioned on the query outputs and the prompt
language_model_inputs = self.language_projection(query_output)
language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs)
if self.config.use_decoder_only_language_model:
outputs = self.language_model(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
**kwargs,
)
else:
outputs = self.language_model(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
**kwargs,
)
return InstructBlipForConditionalGenerationModelOutput(
vision_outputs=vision_outputs,
qformer_outputs=query_outputs,
language_model_outputs=outputs,
)
@auto_docstring(
custom_intro="""
InstructBLIP Model for generating text given an image and an optional text prompt. The model consists of a vision
encoder, Querying Transformer (Q-Former) and a language model.
One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue
the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.
"""
)
| InstructBlipModel |
python | ipython__ipython | IPython/core/tbtools.py | {
"start": 10678,
"end": 16880
} | class ____:
"""Basic tools used by all traceback printer classes."""
# Number of frames to skip when reporting tracebacks
tb_offset = 0
_theme_name: str
_old_theme_name: str
call_pdb: bool
ostream: Any
debugger_cls: Any
pdb: Any
def __init__(
self,
color_scheme: Any = _sentinel,
call_pdb: bool = False,
ostream: Any = None,
*,
debugger_cls: type | None = None,
theme_name: str = "nocolor",
):
if color_scheme is not _sentinel:
assert isinstance(color_scheme, str), color_scheme
warnings.warn(
"color_scheme is deprecated since IPython 9.0, use theme_name instead, all lowercase",
DeprecationWarning,
stacklevel=2,
)
theme_name = color_scheme
if theme_name in ["Linux", "LightBG", "Neutral", "NoColor"]:
warnings.warn(
f"Theme names and color schemes are lowercase in IPython 9.0 use {theme_name.lower()} instead",
DeprecationWarning,
stacklevel=2,
)
theme_name = theme_name.lower()
# Whether to call the interactive pdb debugger after printing
# tracebacks or not
super().__init__()
self.call_pdb = call_pdb
# Output stream to write to. Note that we store the original value in
# a private attribute and then make the public ostream a property, so
# that we can delay accessing sys.stdout until runtime. The way
# things are written now, the sys.stdout object is dynamically managed
# so a reference to it should NEVER be stored statically. This
# property approach confines this detail to a single location, and all
# subclasses can simply access self.ostream for writing.
self._ostream = ostream
# Create color table
self.set_theme_name(theme_name)
self.debugger_cls = debugger_cls or debugger.Pdb
if call_pdb:
self.pdb = self.debugger_cls()
else:
self.pdb = None
def _get_ostream(self) -> Any:
"""Output stream that exceptions are written to.
Valid values are:
- None: the default, which means that IPython will dynamically resolve
to sys.stdout. This ensures compatibility with most tools, including
Windows (where plain stdout doesn't recognize ANSI escapes).
- Any object with 'write' and 'flush' attributes.
"""
return sys.stdout if self._ostream is None else self._ostream
def _set_ostream(self, val) -> None: # type:ignore[no-untyped-def]
assert val is None or (hasattr(val, "write") and hasattr(val, "flush"))
self._ostream = val
ostream = property(_get_ostream, _set_ostream)
@staticmethod
def _get_chained_exception(exception_value: Any) -> Any:
cause = getattr(exception_value, "__cause__", None)
if cause:
return cause
if getattr(exception_value, "__suppress_context__", False):
return None
return getattr(exception_value, "__context__", None)
def get_parts_of_chained_exception(
self, evalue: BaseException | None
) -> Optional[Tuple[type, BaseException, TracebackType]]:
chained_evalue = self._get_chained_exception(evalue)
if chained_evalue:
return (
chained_evalue.__class__,
chained_evalue,
chained_evalue.__traceback__,
)
return None
def prepare_chained_exception_message(
self, cause: BaseException | None
) -> list[list[str]]:
direct_cause = (
"\nThe above exception was the direct cause of the following exception:\n"
)
exception_during_handling = (
"\nDuring handling of the above exception, another exception occurred:\n"
)
if cause:
message = [[direct_cause]]
else:
message = [[exception_during_handling]]
return message
@property
def has_colors(self) -> bool:
assert self._theme_name == self._theme_name.lower()
return self._theme_name != "nocolor"
def set_theme_name(self, name: str) -> None:
assert name in theme_table
assert name.lower() == name
self._theme_name = name
# Also set colors of debugger
if hasattr(self, "pdb") and self.pdb is not None:
self.pdb.set_theme_name(name)
def set_colors(self, name: str) -> None:
"""Shorthand access to the color table scheme selector method."""
# todo emit deprecation
warnings.warn(
"set_colors is deprecated since IPython 9.0, use set_theme_name instead",
DeprecationWarning,
stacklevel=2,
)
self.set_theme_name(name)
def color_toggle(self) -> None:
"""Toggle between the currently active color scheme and nocolor."""
if self._theme_name == "nocolor":
self._theme_name = self._old_theme_name
else:
self._old_theme_name = self._theme_name
self._theme_name = "nocolor"
def stb2text(self, stb: list[str]) -> str:
"""Convert a structured traceback (a list) to a string."""
return "\n".join(stb)
def text(
self,
etype: type,
value: BaseException | None,
tb: TracebackType | None,
tb_offset: Optional[int] = None,
context: int = 5,
) -> str:
"""Return formatted traceback.
Subclasses may override this if they add extra arguments.
"""
tb_list = self.structured_traceback(etype, value, tb, tb_offset, context)
return self.stb2text(tb_list)
def structured_traceback(
self,
etype: type,
evalue: BaseException | None,
etb: Optional[TracebackType] = None,
tb_offset: Optional[int] = None,
context: int = 5,
) -> list[str]:
"""Return a list of traceback frames.
Must be implemented by each class.
"""
raise NotImplementedError()
| TBTools |
python | openai__openai-python | src/openai/types/beta/realtime/transcription_session.py | {
"start": 2275,
"end": 3184
} | class ____(BaseModel):
client_secret: ClientSecret
"""Ephemeral key returned by the API.
Only present when the session is created on the server via REST API.
"""
input_audio_format: Optional[str] = None
"""The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
input_audio_transcription: Optional[InputAudioTranscription] = None
"""Configuration of the transcription model."""
modalities: Optional[List[Literal["text", "audio"]]] = None
"""The set of modalities the model can respond with.
To disable audio, set this to ["text"].
"""
turn_detection: Optional[TurnDetection] = None
"""Configuration for turn detection.
Can be set to `null` to turn off. Server VAD means that the model will detect
the start and end of speech based on audio volume and respond at the end of user
speech.
"""
| TranscriptionSession |
python | huggingface__transformers | src/transformers/models/depth_anything/modeling_depth_anything.py | {
"start": 9802,
"end": 12226
} | class ____(nn.Module):
"""
Output head consisting of 3 convolutional layers. It progressively halves the feature dimension and upsamples
the predictions to the input resolution after the first convolutional layer (details can be found in the DPT paper's
supplementary material). The final activation function is either ReLU or Sigmoid, depending on the depth estimation
type (relative or metric). For metric depth estimation, the output is scaled by the maximum depth used during pretraining.
"""
def __init__(self, config):
super().__init__()
self.head_in_index = config.head_in_index
self.patch_size = config.patch_size
features = config.fusion_hidden_size
self.conv1 = nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(features // 2, config.head_hidden_size, kernel_size=3, stride=1, padding=1)
self.activation1 = nn.ReLU()
self.conv3 = nn.Conv2d(config.head_hidden_size, 1, kernel_size=1, stride=1, padding=0)
if config.depth_estimation_type == "relative":
self.activation2 = nn.ReLU()
elif config.depth_estimation_type == "metric":
self.activation2 = nn.Sigmoid()
else:
raise ValueError(f"Unknown depth estimation type: {config.depth_estimation_type}")
self.max_depth = config.max_depth
def forward(self, hidden_states: list[torch.Tensor], patch_height, patch_width) -> torch.Tensor:
hidden_states = hidden_states[self.head_in_index]
predicted_depth = self.conv1(hidden_states)
predicted_depth = nn.functional.interpolate(
predicted_depth,
(int(patch_height * self.patch_size), int(patch_width * self.patch_size)),
mode="bilinear",
align_corners=True,
)
predicted_depth = self.conv2(predicted_depth)
predicted_depth = self.activation1(predicted_depth)
predicted_depth = self.conv3(predicted_depth)
predicted_depth = self.activation2(predicted_depth) * self.max_depth
predicted_depth = predicted_depth.squeeze(dim=1) # shape (batch_size, height, width)
return predicted_depth
@auto_docstring(
custom_intro="""
Depth Anything Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2.
"""
)
| DepthAnythingDepthEstimationHead |
python | openai__openai-python | src/openai/_models.py | {
"start": 1534,
"end": 1610
} | class ____(Protocol):
allow_population_by_field_name: bool
| _ConfigProtocol |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.