language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | rapidsai__cudf | python/cudf/cudf/core/udf/strings_typing.py | {
"start": 1032,
"end": 1292
} | class ____(types.Type):
np_dtype: np.dtype[np.object_] = np.dtype("object")
def __init__(self):
super().__init__(name="string_view")
@property
def return_as(self):
return ManagedUDFString()
@register_model(StringView)
| StringView |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/waiters/test_glue.py | {
"start": 1470,
"end": 1701
} | class ____:
@pytest.fixture(autouse=True)
def mock_conn(self, monkeypatch):
self.client = boto3.client("glue")
monkeypatch.setattr(GlueDataQualityHook, "conn", self.client)
| TestGlueDataQualityCustomWaitersBase |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/deep_learning/activation_functions.py | {
"start": 287,
"end": 533
} | class ____():
def __call__(self, x):
e_x = np.exp(x - np.max(x, axis=-1, keepdims=True))
return e_x / np.sum(e_x, axis=-1, keepdims=True)
def gradient(self, x):
p = self.__call__(x)
return p * (1 - p)
| Softmax |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/base.py | {
"start": 148011,
"end": 158478
} | class ____(Runnable[Input, Output]):
"""`Runnable` that runs a generator function.
`RunnableGenerator`s can be instantiated directly or by using a generator within
a sequence.
`RunnableGenerator`s can be used to implement custom behavior, such as custom
output parsers, while preserving streaming capabilities. Given a generator function
with a signature `Iterator[A] -> Iterator[B]`, wrapping it in a
`RunnableGenerator` allows it to emit output chunks as soon as they are streamed
in from the previous step.
!!! note
If a generator function has a `signature A -> Iterator[B]`, such that it
requires its input from the previous step to be completed before emitting chunks
(e.g., most LLMs need the entire prompt available to start generating), it can
instead be wrapped in a `RunnableLambda`.
Here is an example to show the basic mechanics of a `RunnableGenerator`:
```python
from typing import Any, AsyncIterator, Iterator
from langchain_core.runnables import RunnableGenerator
def gen(input: Iterator[Any]) -> Iterator[str]:
for token in ["Have", " a", " nice", " day"]:
yield token
runnable = RunnableGenerator(gen)
runnable.invoke(None) # "Have a nice day"
list(runnable.stream(None)) # ["Have", " a", " nice", " day"]
runnable.batch([None, None]) # ["Have a nice day", "Have a nice day"]
# Async version:
async def agen(input: AsyncIterator[Any]) -> AsyncIterator[str]:
for token in ["Have", " a", " nice", " day"]:
yield token
runnable = RunnableGenerator(agen)
await runnable.ainvoke(None) # "Have a nice day"
[p async for p in runnable.astream(None)] # ["Have", " a", " nice", " day"]
```
`RunnableGenerator` makes it easy to implement custom behavior within a streaming
context. Below we show an example:
```python
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableGenerator, RunnableLambda
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
model = ChatOpenAI()
chant_chain = (
ChatPromptTemplate.from_template("Give me a 3 word chant about {topic}")
| model
| StrOutputParser()
)
def character_generator(input: Iterator[str]) -> Iterator[str]:
for token in input:
if "," in token or "." in token:
yield "👏" + token
else:
yield token
runnable = chant_chain | character_generator
assert type(runnable.last) is RunnableGenerator
"".join(runnable.stream({"topic": "waste"})) # Reduce👏, Reuse👏, Recycle👏.
# Note that RunnableLambda can be used to delay streaming of one step in a
# sequence until the previous step is finished:
def reverse_generator(input: str) -> Iterator[str]:
# Yield characters of input in reverse order.
for character in input[::-1]:
yield character
runnable = chant_chain | RunnableLambda(reverse_generator)
"".join(runnable.stream({"topic": "waste"})) # ".elcycer ,esuer ,ecudeR"
```
"""
def __init__(
self,
transform: Callable[[Iterator[Input]], Iterator[Output]]
| Callable[[AsyncIterator[Input]], AsyncIterator[Output]],
atransform: Callable[[AsyncIterator[Input]], AsyncIterator[Output]]
| None = None,
*,
name: str | None = None,
) -> None:
"""Initialize a `RunnableGenerator`.
Args:
transform: The transform function.
atransform: The async transform function.
name: The name of the `Runnable`.
Raises:
TypeError: If the transform is not a generator function.
"""
if atransform is not None:
self._atransform = atransform
func_for_name: Callable = atransform
if is_async_generator(transform):
self._atransform = transform
func_for_name = transform
elif inspect.isgeneratorfunction(transform):
self._transform = transform
func_for_name = transform
else:
msg = (
"Expected a generator function type for `transform`."
f"Instead got an unsupported type: {type(transform)}"
)
raise TypeError(msg)
try:
self.name = name or func_for_name.__name__
except AttributeError:
self.name = "RunnableGenerator"
@property
@override
def InputType(self) -> Any:
func = getattr(self, "_transform", None) or self._atransform
try:
params = inspect.signature(func).parameters
first_param = next(iter(params.values()), None)
if first_param and first_param.annotation != inspect.Parameter.empty:
return getattr(first_param.annotation, "__args__", (Any,))[0]
except ValueError:
pass
return Any
@override
def get_input_schema(self, config: RunnableConfig | None = None) -> type[BaseModel]:
# Override the default implementation.
# For a runnable generator, we need to bring to provide the
# module of the underlying function when creating the model.
root_type = self.InputType
func = getattr(self, "_transform", None) or self._atransform
module = getattr(func, "__module__", None)
if (
inspect.isclass(root_type)
and not isinstance(root_type, GenericAlias)
and issubclass(root_type, BaseModel)
):
return root_type
return create_model_v2(
self.get_name("Input"),
root=root_type,
# To create the schema, we need to provide the module
# where the underlying function is defined.
# This allows pydantic to resolve type annotations appropriately.
module_name=module,
)
@property
@override
def OutputType(self) -> Any:
func = getattr(self, "_transform", None) or self._atransform
try:
sig = inspect.signature(func)
return (
getattr(sig.return_annotation, "__args__", (Any,))[0]
if sig.return_annotation != inspect.Signature.empty
else Any
)
except ValueError:
return Any
@override
def get_output_schema(
self, config: RunnableConfig | None = None
) -> type[BaseModel]:
# Override the default implementation.
# For a runnable generator, we need to bring to provide the
# module of the underlying function when creating the model.
root_type = self.OutputType
func = getattr(self, "_transform", None) or self._atransform
module = getattr(func, "__module__", None)
if (
inspect.isclass(root_type)
and not isinstance(root_type, GenericAlias)
and issubclass(root_type, BaseModel)
):
return root_type
return create_model_v2(
self.get_name("Output"),
root=root_type,
# To create the schema, we need to provide the module
# where the underlying function is defined.
# This allows pydantic to resolve type annotations appropriately.
module_name=module,
)
@override
def __eq__(self, other: object) -> bool:
if isinstance(other, RunnableGenerator):
if hasattr(self, "_transform") and hasattr(other, "_transform"):
return self._transform == other._transform
if hasattr(self, "_atransform") and hasattr(other, "_atransform"):
return self._atransform == other._atransform
return False
return False
__hash__ = None # type: ignore[assignment]
@override
def __repr__(self) -> str:
return f"RunnableGenerator({self.name})"
@override
def transform(
self,
input: Iterator[Input],
config: RunnableConfig | None = None,
**kwargs: Any,
) -> Iterator[Output]:
if not hasattr(self, "_transform"):
msg = f"{self!r} only supports async methods."
raise NotImplementedError(msg)
return self._transform_stream_with_config(
input,
self._transform, # type: ignore[arg-type]
config,
**kwargs,
)
@override
def stream(
self,
input: Input,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> Iterator[Output]:
return self.transform(iter([input]), config, **kwargs)
@override
def invoke(
self, input: Input, config: RunnableConfig | None = None, **kwargs: Any
) -> Output:
final: Output | None = None
for output in self.stream(input, config, **kwargs):
final = output if final is None else final + output # type: ignore[operator]
return cast("Output", final)
@override
def atransform(
self,
input: AsyncIterator[Input],
config: RunnableConfig | None = None,
**kwargs: Any,
) -> AsyncIterator[Output]:
if not hasattr(self, "_atransform"):
msg = f"{self!r} only supports sync methods."
raise NotImplementedError(msg)
return self._atransform_stream_with_config(
input, self._atransform, config, **kwargs
)
@override
def astream(
self,
input: Input,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> AsyncIterator[Output]:
async def input_aiter() -> AsyncIterator[Input]:
yield input
return self.atransform(input_aiter(), config, **kwargs)
@override
async def ainvoke(
self, input: Input, config: RunnableConfig | None = None, **kwargs: Any
) -> Output:
final: Output | None = None
async for output in self.astream(input, config, **kwargs):
final = output if final is None else final + output # type: ignore[operator]
return cast("Output", final)
| RunnableGenerator |
python | langchain-ai__langchain | libs/langchain/langchain_classic/smith/evaluation/string_run_evaluator.py | {
"start": 10586,
"end": 18429
} | class ____(Chain, RunEvaluator):
"""Evaluate Run and optional examples."""
run_mapper: StringRunMapper
"""Maps the Run to a dictionary with 'input' and 'prediction' strings."""
example_mapper: StringExampleMapper | None = None
"""Maps the Example (dataset row) to a dictionary
with a 'reference' string."""
name: str
"""The name of the evaluation metric."""
string_evaluator: StringEvaluator
"""The evaluation chain."""
@property
@override
def input_keys(self) -> list[str]:
return ["run", "example"]
@property
@override
def output_keys(self) -> list[str]:
return ["feedback"]
def _prepare_input(self, inputs: dict[str, Any]) -> dict[str, str]:
run: Run = inputs["run"]
example: Example | None = inputs.get("example")
evaluate_strings_inputs = self.run_mapper(run)
if not self.string_evaluator.requires_input:
# Hide warning about unused input
evaluate_strings_inputs.pop("input", None)
if example and self.example_mapper and self.string_evaluator.requires_reference:
evaluate_strings_inputs.update(self.example_mapper(example))
elif self.string_evaluator.requires_reference:
msg = (
f"Evaluator {self.name} requires an reference"
" example from the dataset,"
f" but none was provided for run {run.id}."
)
raise ValueError(msg)
return evaluate_strings_inputs
def _prepare_output(self, output: dict[str, Any]) -> dict[str, Any]:
evaluation_result = EvaluationResult(
key=self.name,
comment=output.get("reasoning"),
**output,
)
if RUN_KEY in output:
# TODO: Not currently surfaced. Update
evaluation_result.evaluator_info[RUN_KEY] = output[RUN_KEY]
return {"feedback": evaluation_result}
def _call(
self,
inputs: dict[str, str],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
"""Call the evaluation chain."""
evaluate_strings_inputs = self._prepare_input(inputs)
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
chain_output = self.string_evaluator.evaluate_strings(
**evaluate_strings_inputs,
callbacks=callbacks,
include_run_info=True,
)
return self._prepare_output(chain_output)
async def _acall(
self,
inputs: dict[str, str],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
"""Call the evaluation chain."""
evaluate_strings_inputs = self._prepare_input(inputs)
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
chain_output = await self.string_evaluator.aevaluate_strings(
**evaluate_strings_inputs,
callbacks=callbacks,
include_run_info=True,
)
return self._prepare_output(chain_output)
def _prepare_evaluator_output(self, output: dict[str, Any]) -> EvaluationResult:
feedback: EvaluationResult = output["feedback"]
if RUN_KEY not in feedback.evaluator_info:
feedback.evaluator_info[RUN_KEY] = output[RUN_KEY]
return feedback
@override
def evaluate_run(
self,
run: Run,
example: Example | None = None,
evaluator_run_id: uuid.UUID | None = None,
) -> EvaluationResult:
"""Evaluate an example."""
try:
result = self({"run": run, "example": example}, include_run_info=True)
return self._prepare_evaluator_output(result)
except Exception as e:
_logger.exception("Error evaluating run %s", run.id)
return EvaluationResult(
key=self.string_evaluator.evaluation_name,
comment=f"Error evaluating run {run.id}: {e}",
# TODO: Add run ID once we can declare it via callbacks
)
@override
async def aevaluate_run(
self,
run: Run,
example: Example | None = None,
evaluator_run_id: uuid.UUID | None = None,
) -> EvaluationResult:
"""Evaluate an example."""
try:
result = await self.acall(
{"run": run, "example": example},
include_run_info=True,
)
return self._prepare_evaluator_output(result)
except Exception as e:
_logger.exception("Error evaluating run %s", run.id)
return EvaluationResult(
key=self.string_evaluator.evaluation_name,
comment=f"Error evaluating run {run.id}: {e}",
)
@classmethod
def from_run_and_data_type(
cls,
evaluator: StringEvaluator,
run_type: str,
data_type: DataType,
input_key: str | None = None,
prediction_key: str | None = None,
reference_key: str | None = None,
tags: list[str] | None = None,
) -> StringRunEvaluatorChain:
"""Create a StringRunEvaluatorChain.
Create a StringRunEvaluatorChain from an evaluator and the run and dataset
types.
This method provides an easy way to instantiate a StringRunEvaluatorChain, by
taking an evaluator and information about the type of run and the data.
The method supports LLM and chain runs.
Args:
evaluator: The string evaluator to use.
run_type: The type of run being evaluated.
Supported types are LLM and Chain.
data_type: The type of dataset used in the run.
input_key: The key used to map the input from the run.
prediction_key: The key used to map the prediction from the run.
reference_key: The key used to map the reference from the dataset.
tags: List of tags to attach to the evaluation chain.
Returns:
The instantiated evaluation chain.
Raises:
ValueError: If the run type is not supported, or if the evaluator requires a
reference from the dataset but the reference key is not provided.
"""
# Configure how run inputs/predictions are passed to the evaluator
if run_type == "llm":
run_mapper: StringRunMapper = LLMStringRunMapper()
elif run_type == "chain":
run_mapper = ChainStringRunMapper(
input_key=input_key,
prediction_key=prediction_key,
)
else:
msg = f"Unsupported run type {run_type}. Expected one of 'llm' or 'chain'."
raise ValueError(msg)
# Configure how example rows are fed as a reference string to the evaluator
if (
reference_key is not None
or data_type in (DataType.llm, DataType.chat)
or evaluator.requires_reference
):
example_mapper = StringExampleMapper(reference_key=reference_key)
elif evaluator.requires_reference:
msg = ( # type: ignore[unreachable]
f"Evaluator {evaluator.evaluation_name} requires a reference"
" example from the dataset. Please specify the reference key from"
" amongst the dataset outputs keys."
)
raise ValueError(msg)
else:
example_mapper = None
return cls(
name=evaluator.evaluation_name,
run_mapper=run_mapper,
example_mapper=example_mapper,
string_evaluator=evaluator,
tags=tags,
)
| StringRunEvaluatorChain |
python | google__jax | jax/_src/literals.py | {
"start": 1259,
"end": 1595
} | class ____(float):
dtype: np.dtype
def __new__(cls, value: float, dtype: np.dtype):
v = super(TypedFloat, cls).__new__(cls, value)
v.dtype = dtype
return v
def __repr__(self):
return f'TypedFloat({float(self)}, dtype={self.dtype.name})'
def __getnewargs__(self):
return (float(self), self.dtype)
| TypedFloat |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 85579,
"end": 86068
} | class ____(torch.nn.Module):
r"""A Module with manually inserted `QuantStub` and `DeQuantStub`"""
def __init__(self) -> None:
super().__init__()
self.qconfig = torch.ao.quantization.get_default_qconfig("qnnpack")
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.quant(x)
x = self.fc(x)
return self.dequant(x)
| QuantStubModel |
python | spack__spack | lib/spack/spack/spec.py | {
"start": 35443,
"end": 41256
} | class ____(collections.abc.Mapping):
"""Represent a collection of edges (DependencySpec objects) in the DAG.
Objects of this class are used in Specs to track edges that are
outgoing towards direct dependencies, or edges that are incoming
from direct dependents.
Edges are stored in a dictionary and keyed by package name.
"""
__slots__ = "edges", "store_by_child"
def __init__(self, store_by_child: bool = True) -> None:
self.edges: Dict[str, List[DependencySpec]] = {}
self.store_by_child = store_by_child
def __getitem__(self, key: str) -> List[DependencySpec]:
return self.edges[key]
def __iter__(self):
return iter(self.edges)
def __len__(self) -> int:
return len(self.edges)
def add(self, edge: DependencySpec) -> None:
key = edge.spec.name if self.store_by_child else edge.parent.name
if key in self.edges:
lst = self.edges[key]
lst.append(edge)
lst.sort(key=_sort_by_dep_types)
else:
self.edges[key] = [edge]
def __str__(self) -> str:
return f"{{deps: {', '.join(str(d) for d in sorted(self.values()))}}}"
def select(
self,
*,
parent: Optional[str] = None,
child: Optional[str] = None,
depflag: dt.DepFlag = dt.ALL,
virtuals: Optional[Union[str, Sequence[str]]] = None,
) -> List[DependencySpec]:
"""Selects a list of edges and returns them.
If an edge:
- Has *any* of the dependency types passed as argument,
- Matches the parent and/or child name
- Provides *any* of the virtuals passed as argument
then it is selected.
The deptypes argument needs to be a flag, since the method won't
convert it for performance reason.
Args:
parent: name of the parent package
child: name of the child package
depflag: allowed dependency types in flag form
virtuals: list of virtuals or specific virtual on the edge
"""
if not depflag:
return []
# Start from all the edges we store
selected = (d for d in itertools.chain.from_iterable(self.values()))
# Filter by parent name
if parent:
selected = (d for d in selected if d.parent.name == parent)
# Filter by child name
if child:
selected = (d for d in selected if d.spec.name == child)
# Filter by allowed dependency types
selected = (dep for dep in selected if not dep.depflag or (depflag & dep.depflag))
# Filter by virtuals
if virtuals is not None:
if isinstance(virtuals, str):
selected = (dep for dep in selected if virtuals in dep.virtuals)
else:
selected = (dep for dep in selected if any(v in dep.virtuals for v in virtuals))
return list(selected)
def clear(self):
self.edges.clear()
def _headers_default_handler(spec: "Spec"):
"""Default handler when looking for the 'headers' attribute.
Tries to search for ``*.h`` files recursively starting from
``spec.package.home.include``.
Parameters:
spec: spec that is being queried
Returns:
HeaderList: The headers in ``prefix.include``
Raises:
NoHeadersError: If no headers are found
"""
home = getattr(spec.package, "home")
headers = fs.find_headers("*", root=home.include, recursive=True)
if headers:
return headers
raise spack.error.NoHeadersError(f"Unable to locate {spec.name} headers in {home}")
def _libs_default_handler(spec: "Spec"):
"""Default handler when looking for the 'libs' attribute.
Tries to search for ``lib{spec.name}`` recursively starting from
``spec.package.home``. If ``spec.name`` starts with ``lib``, searches for
``{spec.name}`` instead.
Parameters:
spec: spec that is being queried
Returns:
LibraryList: The libraries found
Raises:
NoLibrariesError: If no libraries are found
"""
# Variable 'name' is passed to function 'find_libraries', which supports
# glob characters. For example, we have a package with a name 'abc-abc'.
# Now, we don't know if the original name of the package is 'abc_abc'
# (and it generates a library 'libabc_abc.so') or 'abc-abc' (and it
# generates a library 'libabc-abc.so'). So, we tell the function
# 'find_libraries' to give us anything that matches 'libabc?abc' and it
# gives us either 'libabc-abc.so' or 'libabc_abc.so' (or an error)
# depending on which one exists (there is a possibility, of course, to
# get something like 'libabcXabc.so, but for now we consider this
# unlikely).
name = spec.name.replace("-", "?")
home = getattr(spec.package, "home")
# Avoid double 'lib' for packages whose names already start with lib
if not name.startswith("lib") and not spec.satisfies("platform=windows"):
name = "lib" + name
# If '+shared' search only for shared library; if '~shared' search only for
# static library; otherwise, first search for shared and then for static.
search_shared = (
[True] if ("+shared" in spec) else ([False] if ("~shared" in spec) else [True, False])
)
for shared in search_shared:
# Since we are searching for link libraries, on Windows search only for
# ".Lib" extensions by default as those represent import libraries for implicit links.
libs = fs.find_libraries(name, home, shared=shared, recursive=True, runtime=False)
if libs:
return libs
raise spack.error.NoLibrariesError(
f"Unable to recursively locate {spec.name} libraries in {home}"
)
| _EdgeMap |
python | run-llama__llama_index | llama-index-core/llama_index/core/llama_dataset/simple.py | {
"start": 2204,
"end": 4368
} | class ____(BaseLlamaDataset[LLM]):
_example_type = LabelledSimpleDataExample
def _construct_prediction_dataset( # type: ignore
self, predictions: Sequence[SimpleExamplePrediction]
) -> SimplePredictionDataset:
"""
Construct the specific prediction dataset.
Args:
predictions (List[BaseLlamaExamplePrediction]): the list of predictions.
Returns:
BaseLlamaPredictionDataset: A dataset of predictions.
"""
return SimplePredictionDataset(predictions=predictions)
def to_pandas(self) -> Any:
"""Create pandas dataframe."""
try:
import pandas as pd
except ImportError:
raise ImportError(
"pandas is required for this function. Please install it with `pip install pandas`."
)
data: Dict[str, List[str]] = {
"reference_label": [],
"text": [],
"text_by": [],
}
for example in self.examples:
if not isinstance(example, self._example_type):
raise ValueError(
f"Expected example of type {LabelledSimpleDataExample}, got {type(example)}"
)
data["reference_label"].append(example.reference_label)
data["text"].append(example.text)
data["text_by"].append(str(example.text_by))
return pd.DataFrame(data)
async def _apredict_example(
self,
predictor: LLM,
example: BaseLlamaDataExample,
sleep_time_in_seconds: int,
) -> BaseLlamaExamplePrediction:
"""Async predict RAG example with a query engine."""
raise NotImplementedError("This method has not yet been implemented.")
def _predict_example(
self,
predictor: LLM,
example: BaseLlamaDataExample,
sleep_time_in_seconds: int = 0,
) -> BaseLlamaExamplePrediction:
raise NotImplementedError("This method has not yet been implemented.")
@property
def class_name(self) -> str:
"""Data example class name."""
return "LabelledSimpleDataset"
| LabelledSimpleDataset |
python | django__django | django/urls/resolvers.py | {
"start": 5199,
"end": 6366
} | class ____:
def describe(self):
"""
Format the URL pattern for display in warning messages.
"""
description = "'{}'".format(self)
if self.name:
description += " [name='{}']".format(self.name)
return description
def _check_pattern_startswith_slash(self):
"""
Check that the pattern does not begin with a forward slash.
"""
if not settings.APPEND_SLASH:
# Skip check as it can be useful to start a URL pattern with a
# slash when APPEND_SLASH=False.
return []
if self._regex.startswith(("/", "^/", "^\\/")) and not self._regex.endswith(
"/"
):
warning = Warning(
"Your URL pattern {} has a route beginning with a '/'. Remove this "
"slash as it is unnecessary. If this pattern is targeted in an "
"include(), ensure the include() pattern has a trailing '/'.".format(
self.describe()
),
id="urls.W002",
)
return [warning]
else:
return []
| CheckURLMixin |
python | doocs__leetcode | solution/0800-0899/0813.Largest Sum of Averages/Solution.py | {
"start": 0,
"end": 508
} | class ____:
def largestSumOfAverages(self, nums: List[int], k: int) -> float:
@cache
def dfs(i: int, k: int) -> float:
if i == n:
return 0
if k == 1:
return (s[n] - s[i]) / (n - i)
ans = 0
for j in range(i + 1, n):
ans = max(ans, (s[j] - s[i]) / (j - i) + dfs(j, k - 1))
return ans
n = len(nums)
s = list(accumulate(nums, initial=0))
return dfs(0, k)
| Solution |
python | google__pytype | pytype/tools/analyze_project/pytype_runner.py | {
"start": 5495,
"end": 14901
} | class ____:
"""Runs pytype over an import graph."""
def __init__(self, conf, sorted_sources):
self.filenames = set(conf.inputs) # files to type-check
# all source modules as a sequence of (module, direct_deps)
self.sorted_sources = sorted_sources
self.python_version = conf.python_version
self.platform = conf.platform
self.pyi_dir = path_utils.join(conf.output, 'pyi')
self.imports_dir = path_utils.join(conf.output, 'imports')
self.ninja_file = path_utils.join(conf.output, 'build.ninja')
self.custom_options = [
(k, getattr(conf, k)) for k in set(conf.__slots__) - set(config.ITEMS)]
self.keep_going = conf.keep_going
self.jobs = conf.jobs
def set_custom_options(self, flags_with_values, binary_flags, report_errors):
"""Merge self.custom_options into flags_with_values and binary_flags."""
for dest, value in self.custom_options:
if not report_errors and dest in config.REPORT_ERRORS_ITEMS:
continue
arg_info = config.get_pytype_single_item(dest).arg_info
assert arg_info is not None
if arg_info.to_command_line:
value = arg_info.to_command_line(value)
if isinstance(value, bool):
if value:
binary_flags.add(arg_info.flag)
else:
binary_flags.discard(arg_info.flag)
elif value:
flags_with_values[arg_info.flag] = str(value)
def get_pytype_command_for_ninja(self, report_errors):
"""Get the command line for running pytype."""
exe = PYTYPE_SINGLE
flags_with_values = {
'--imports_info': '$imports',
'-V': self.python_version,
'-o': '$out',
'--module-name': '$module',
'--platform': self.platform,
}
binary_flags = {
'--quick',
'--analyze-annotated' if report_errors else '--no-report-errors',
'--nofail',
}
self.set_custom_options(flags_with_values, binary_flags, report_errors)
# Order the flags so that ninja recognizes commands across runs.
return (
exe +
list(sum(sorted(flags_with_values.items()), ())) +
sorted(binary_flags) +
['$in']
)
def make_imports_dir(self):
try:
file_utils.makedirs(self.imports_dir)
except OSError:
logging.error('Could not create imports directory: %s', self.imports_dir)
return False
return True
def write_default_pyi(self):
"""Write a default pyi file."""
output = path_utils.join(self.imports_dir, 'default.pyi')
with open(output, 'w') as f:
f.write(DEFAULT_PYI)
return output
def write_imports(self, module_name, imports_map, suffix):
"""Write a .imports file."""
output = path_utils.join(self.imports_dir,
module_name + '.imports' + suffix)
with open(output, 'w') as f:
for item in imports_map.items():
f.write('%s %s\n' % item)
return output
def get_module_action(self, module):
"""Get the action for the given module.
Args:
module: A module_utils.Module object.
Returns:
An Action object, or None for a non-Python file.
"""
f = module.full_path
# Report errors for files we are analysing directly.
if f in self.filenames:
action = Action.CHECK
report = logging.warning
else:
action = Action.INFER
report = logging.info
# For builtin and system files not in pytype's own pytype_extensions
# library, do not attempt to generate a pyi.
if (not module.name.startswith('pytype_extensions.') and
module.kind in ('Builtin', 'System')):
action = Action.GENERATE_DEFAULT
report('%s: %s module %s', action, module.kind, module.name)
return action
def yield_sorted_modules(
self,
) -> Iterable[
tuple[module_utils.Module, str, Sequence[module_utils.Module], str]
]:
"""Yield modules from our sorted source files."""
for group, deps in self.sorted_sources:
modules = []
for module in group:
action = self.get_module_action(module)
if action:
modules.append((module, action))
if len(modules) == 1:
yield modules[0] + (deps, Stage.SINGLE_PASS)
else:
# If we have a cycle we run pytype over the files twice. So that we
# don't fail on missing dependencies, we'll ignore errors the first
# time and add the cycle itself to the dependencies the second time.
second_pass_deps = []
for module, action in modules:
second_pass_deps.append(module)
if action == Action.CHECK:
action = Action.INFER
yield module, action, deps, Stage.FIRST_PASS
deps += tuple(second_pass_deps)
for module, action in modules:
# We don't need to run generate_default twice
if action != Action.GENERATE_DEFAULT:
yield module, action, deps, Stage.SECOND_PASS
def write_ninja_preamble(self):
"""Write out the pytype-single commands that the build will call."""
with open(self.ninja_file, 'w') as f:
for action, report_errors in ((Action.INFER, False),
(Action.CHECK, True)):
command = ' '.join(
self.get_pytype_command_for_ninja(report_errors=report_errors))
logging.info('%s command: %s', action, command)
f.write(
'rule {action}\n'
' command = {command}\n'
' description = {action} $module\n'.format(
action=action, command=command)
)
def write_build_statement(self, module, action, deps, imports, suffix):
"""Write a build statement for the given module.
Args:
module: A module_utils.Module object.
action: An Action object.
deps: The module's dependencies.
imports: An imports file.
suffix: An output file suffix.
Returns:
The expected output of the build statement.
"""
output = path_utils.join(self.pyi_dir,
_module_to_output_path(module) + '.pyi' + suffix)
logging.info('%s %s\n imports: %s\n deps: %s\n output: %s',
action, module.name, imports, deps, output)
if deps:
deps = ' | ' + ' '.join(escape_ninja_path(dep) for dep in deps)
else:
deps = ''
with open(self.ninja_file, 'a') as f:
f.write('build {output}: {action} {input}{deps}\n'
' imports = {imports}\n'
' module = {module}\n'.format(
output=escape_ninja_path(output),
action=action,
input=escape_ninja_path(module.full_path),
deps=deps,
imports=escape_ninja_path(imports),
module=module.name))
return output
def setup_build(self):
"""Write out the full build.ninja file.
Returns:
All files with build statements.
"""
if not self.make_imports_dir():
return set()
default_output = self.write_default_pyi()
self.write_ninja_preamble()
files = set()
module_to_imports_map = {}
module_to_output = {}
for module, action, deps, stage in self.yield_sorted_modules():
if files >= self.filenames:
logging.info('skipped: %s %s (%s)', action, module.name, stage)
continue
if action == Action.GENERATE_DEFAULT:
module_to_output[module] = default_output
continue
if stage == Stage.SINGLE_PASS:
files.add(module.full_path)
suffix = ''
elif stage == Stage.FIRST_PASS:
suffix = FIRST_PASS_SUFFIX
else:
assert stage == Stage.SECOND_PASS
files.add(module.full_path)
suffix = ''
imports_map = module_to_imports_map[module] = get_imports_map(
deps, module_to_imports_map, module_to_output)
imports = self.write_imports(module.name, imports_map, suffix)
# Don't depend on default.pyi, since it's regenerated every time.
deps = tuple(module_to_output[m] for m in deps
if module_to_output[m] != default_output)
module_to_output[module] = self.write_build_statement(
module, action, deps, imports, suffix)
return files
def build(self):
"""Execute the build.ninja file."""
# -k N keep going until N jobs fail (0 means infinity)
# -C DIR change to DIR before doing anything else
# -j N run N jobs in parallel (0 means infinity)
# -v show all command lines while building
k = '0' if self.keep_going else '1'
# relpath() prevents possibly sensitive directory info from appearing in
# ninja's "Entering directory" message.
c = path_utils.relpath(path_utils.dirname(self.ninja_file))
command = _get_executable('ninja') + [
'-k', k, '-C', c, '-j', str(self.jobs)]
if logging.getLogger().isEnabledFor(logging.INFO):
command.append('-v')
ret = subprocess.call(command)
print(f'Leaving directory {c!r}')
return ret
def run(self):
"""Run pytype over the project."""
logging.info('------------- Starting pytype run. -------------')
files_to_analyze = self.setup_build()
num_sources = len(self.filenames & files_to_analyze)
print('Analyzing %d sources with %d local dependencies' %
(num_sources, len(files_to_analyze) - num_sources))
ret = self.build()
if not ret:
print('Success: no errors found')
return ret
| PytypeRunner |
python | walkccc__LeetCode | solutions/1379. Find a Corresponding Node of a Binary Tree in a Clone of That Tree/1379.py | {
"start": 0,
"end": 487
} | class ____:
def getTargetCopy(
self,
original: TreeNode,
cloned: TreeNode,
target: TreeNode,
) -> TreeNode:
ans = None
def dfs(original: TreeNode, cloned: TreeNode) -> None:
nonlocal ans
if ans:
return
if not original:
return
if original == target:
ans = cloned
return
dfs(original.left, cloned.left)
dfs(original.right, cloned.right)
dfs(original, cloned)
return ans
| Solution |
python | pytorch__pytorch | torchgen/model.py | {
"start": 10257,
"end": 12538
} | class ____(Enum):
Byte = auto()
Char = auto()
Short = auto()
Int = auto()
Long = auto()
Half = auto()
Float = auto()
Double = auto()
ComplexHalf = auto()
ComplexFloat = auto()
ComplexDouble = auto()
Bool = auto()
BFloat16 = auto()
Float8_e5m2 = auto()
Float8_e5m2fnuz = auto()
Float8_e4m3fn = auto()
Float8_e4m3fnuz = auto()
Float8_e8m0fnu = auto()
def __str__(self) -> str:
return self.name
@staticmethod
def maybe_parse(value: str) -> ScalarType | None:
for k, v in ScalarType.__members__.items():
if k == value:
return v
return None
@staticmethod
def parse(value: str) -> ScalarType:
mb_r = ScalarType.maybe_parse(value)
assert mb_r is not None, f"unknown dtype {value}"
return mb_r
@staticmethod
def parse_set(values: str) -> OrderedSet[ScalarType]:
dtypes: OrderedSet[ScalarType] = OrderedSet()
for value in values.split(", "):
if value in DTYPE_CLASSES:
dtypes.update(DTYPE_CLASSES[value])
else:
dtypes.add(ScalarType.parse(value))
return dtypes
DTYPE_CLASSES: dict[str, OrderedSet[ScalarType]] = {}
# NB: Integral doesn't include boolean
DTYPE_CLASSES["Integral"] = OrderedSet(
[
ScalarType.Byte,
ScalarType.Char,
ScalarType.Int,
ScalarType.Long,
ScalarType.Short,
]
)
# NB: Floating doesn't include low precision types
DTYPE_CLASSES["Floating"] = OrderedSet([ScalarType.Float, ScalarType.Double])
DTYPE_CLASSES["Complex"] = OrderedSet(
[ScalarType.ComplexFloat, ScalarType.ComplexDouble]
)
DTYPE_CLASSES["All"] = DTYPE_CLASSES["Integral"] | DTYPE_CLASSES["Floating"]
DTYPE_CLASSES["AllAndComplex"] = DTYPE_CLASSES["All"] | DTYPE_CLASSES["Complex"]
DTYPE_CLASSES["FloatingAndComplex"] = (
DTYPE_CLASSES["Floating"] | DTYPE_CLASSES["Complex"]
)
# Represents the valid entries for ufunc_inner_loop in native_functions.yaml.
# NB: if you add a new UfuncKey, you will teach torchgen.dest.ufunc how
# to process it. Most logic will ignore keys they don't understand, so your
# new key will get silently ignored until you hook in logic to deal with it.
| ScalarType |
python | sympy__sympy | sympy/matrices/expressions/fourier.py | {
"start": 273,
"end": 1478
} | class ____(MatrixExpr):
r"""
Returns a discrete Fourier transform matrix. The matrix is scaled
with :math:`\frac{1}{\sqrt{n}}` so that it is unitary.
Parameters
==========
n : integer or Symbol
Size of the transform.
Examples
========
>>> from sympy.abc import n
>>> from sympy.matrices.expressions.fourier import DFT
>>> DFT(3)
DFT(3)
>>> DFT(3).as_explicit()
Matrix([
[sqrt(3)/3, sqrt(3)/3, sqrt(3)/3],
[sqrt(3)/3, sqrt(3)*exp(-2*I*pi/3)/3, sqrt(3)*exp(2*I*pi/3)/3],
[sqrt(3)/3, sqrt(3)*exp(2*I*pi/3)/3, sqrt(3)*exp(-2*I*pi/3)/3]])
>>> DFT(n).shape
(n, n)
References
==========
.. [1] https://en.wikipedia.org/wiki/DFT_matrix
"""
def __new__(cls, n):
n = _sympify(n)
cls._check_dim(n)
obj = super().__new__(cls, n)
return obj
n = property(lambda self: self.args[0]) # type: ignore
shape = property(lambda self: (self.n, self.n)) # type: ignore
def _entry(self, i, j, **kwargs):
w = exp(-2*S.Pi*I/self.n)
return w**(i*j) / sqrt(self.n)
def _eval_inverse(self):
return IDFT(self.n)
| DFT |
python | huggingface__transformers | src/transformers/models/bit/modeling_bit.py | {
"start": 11084,
"end": 13432
} | class ____(nn.Module):
"""Pre-activation (v2) bottleneck block.
Follows the implementation of "Identity Mappings in Deep Residual Networks":
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
Except it puts the stride on 3x3 conv when available.
"""
def __init__(
self,
config,
in_channels,
out_channels=None,
bottle_ratio=0.25,
stride=1,
dilation=1,
first_dilation=None,
groups=1,
drop_path_rate=0.0,
is_first_layer=False,
):
super().__init__()
first_dilation = first_dilation or dilation
out_channels = out_channels or in_channels
mid_channels = make_div(out_channels * bottle_ratio)
if is_first_layer:
self.downsample = BitDownsampleConv(
config,
in_channels,
out_channels,
stride=stride,
preact=True,
)
else:
self.downsample = None
self.norm1 = BitGroupNormActivation(config, in_channels)
self.conv1 = WeightStandardizedConv2d(in_channels, mid_channels, 1, eps=1e-8, padding=config.global_padding)
self.norm2 = BitGroupNormActivation(config, num_channels=mid_channels)
self.conv2 = WeightStandardizedConv2d(
mid_channels, mid_channels, 3, stride=stride, groups=groups, eps=1e-8, padding=config.global_padding
)
self.norm3 = BitGroupNormActivation(config, mid_channels)
self.conv3 = WeightStandardizedConv2d(mid_channels, out_channels, 1, eps=1e-8, padding=config.global_padding)
self.drop_path = BitDropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
def forward(self, hidden_states):
hidden_states_preact = self.norm1(hidden_states)
# shortcut branch
shortcut = hidden_states
if self.downsample is not None:
shortcut = self.downsample(hidden_states_preact)
# residual branch
hidden_states = self.conv1(hidden_states_preact)
hidden_states = self.conv2(self.norm2(hidden_states))
hidden_states = self.conv3(self.norm3(hidden_states))
hidden_states = self.drop_path(hidden_states)
return hidden_states + shortcut
| BitPreActivationBottleneckLayer |
python | kamyu104__LeetCode-Solutions | Python/count-integers-with-even-digit-sum.py | {
"start": 39,
"end": 396
} | class ____(object):
def countEven(self, num):
"""
:type num: int
:rtype: int
"""
def parity(x):
result = 0
while x:
result += x%10
x //= 10
return result%2
return (num-parity(num))//2
# Time: O(nlogn)
# Space: O(1)
# brute force
| Solution |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/sequential.py | {
"start": 4506,
"end": 7535
} | class ____(Chain):
"""Simple chain where the outputs of one step feed directly into next."""
chains: list[Chain]
strip_outputs: bool = False
input_key: str = "input"
output_key: str = "output"
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Expect input key."""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return output key."""
return [self.output_key]
@model_validator(mode="after")
def validate_chains(self) -> Self:
"""Validate that chains are all single input/output."""
for chain in self.chains:
if len(chain.input_keys) != 1:
msg = (
"Chains used in SimplePipeline should all have one input, got "
f"{chain} with {len(chain.input_keys)} inputs."
)
raise ValueError(msg)
if len(chain.output_keys) != 1:
msg = (
"Chains used in SimplePipeline should all have one output, got "
f"{chain} with {len(chain.output_keys)} outputs."
)
raise ValueError(msg)
return self
def _call(
self,
inputs: dict[str, str],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_input = inputs[self.input_key]
color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))])
for i, chain in enumerate(self.chains):
_input = chain.run(
_input,
callbacks=_run_manager.get_child(f"step_{i + 1}"),
)
if self.strip_outputs:
_input = _input.strip()
_run_manager.on_text(
_input,
color=color_mapping[str(i)],
end="\n",
verbose=self.verbose,
)
return {self.output_key: _input}
async def _acall(
self,
inputs: dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
_input = inputs[self.input_key]
color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))])
for i, chain in enumerate(self.chains):
_input = await chain.arun(
_input,
callbacks=_run_manager.get_child(f"step_{i + 1}"),
)
if self.strip_outputs:
_input = _input.strip()
await _run_manager.on_text(
_input,
color=color_mapping[str(i)],
end="\n",
verbose=self.verbose,
)
return {self.output_key: _input}
| SimpleSequentialChain |
python | langchain-ai__langchain | libs/core/langchain_core/prompts/chat.py | {
"start": 11338,
"end": 22090
} | class ____(BaseMessagePromptTemplate):
"""Human message prompt template. This is a message sent from the user."""
prompt: (
StringPromptTemplate
| list[StringPromptTemplate | ImagePromptTemplate | DictPromptTemplate]
)
"""Prompt template."""
additional_kwargs: dict = Field(default_factory=dict)
"""Additional keyword arguments to pass to the prompt template."""
_msg_class: type[BaseMessage]
@classmethod
def from_template(
cls: type[Self],
template: str
| list[str | _TextTemplateParam | _ImageTemplateParam | dict[str, Any]],
template_format: PromptTemplateFormat = "f-string",
*,
partial_variables: dict[str, Any] | None = None,
**kwargs: Any,
) -> Self:
"""Create a class from a string template.
Args:
template: a template.
template_format: format of the template.
Options are: 'f-string', 'mustache', 'jinja2'.
partial_variables: A dictionary of variables that can be used too partially.
**kwargs: keyword arguments to pass to the constructor.
Returns:
A new instance of this class.
Raises:
ValueError: If the template is not a string or list of strings.
"""
if isinstance(template, str):
prompt: StringPromptTemplate | list = PromptTemplate.from_template(
template,
template_format=template_format,
partial_variables=partial_variables,
)
return cls(prompt=prompt, **kwargs)
if isinstance(template, list):
if (partial_variables is not None) and len(partial_variables) > 0:
msg = "Partial variables are not supported for list of templates."
raise ValueError(msg)
prompt = []
for tmpl in template:
if isinstance(tmpl, str) or (
isinstance(tmpl, dict)
and "text" in tmpl
and set(tmpl.keys()) <= {"type", "text"}
):
if isinstance(tmpl, str):
text: str = tmpl
else:
text = cast("_TextTemplateParam", tmpl)["text"] # type: ignore[assignment]
prompt.append(
PromptTemplate.from_template(
text, template_format=template_format
)
)
elif (
isinstance(tmpl, dict)
and "image_url" in tmpl
and set(tmpl.keys())
<= {
"type",
"image_url",
}
):
img_template = cast("_ImageTemplateParam", tmpl)["image_url"]
input_variables = []
if isinstance(img_template, str):
variables = get_template_variables(
img_template, template_format
)
if variables:
if len(variables) > 1:
msg = (
"Only one format variable allowed per image"
f" template.\nGot: {variables}"
f"\nFrom: {tmpl}"
)
raise ValueError(msg)
input_variables = [variables[0]]
img_template = {"url": img_template}
img_template_obj = ImagePromptTemplate(
input_variables=input_variables,
template=img_template,
template_format=template_format,
)
elif isinstance(img_template, dict):
img_template = dict(img_template)
for key in ["url", "path", "detail"]:
if key in img_template:
input_variables.extend(
get_template_variables(
img_template[key], template_format
)
)
img_template_obj = ImagePromptTemplate(
input_variables=input_variables,
template=img_template,
template_format=template_format,
)
else:
msg = f"Invalid image template: {tmpl}"
raise ValueError(msg)
prompt.append(img_template_obj)
elif isinstance(tmpl, dict):
if template_format == "jinja2":
msg = (
"jinja2 is unsafe and is not supported for templates "
"expressed as dicts. Please use 'f-string' or 'mustache' "
"format."
)
raise ValueError(msg)
data_template_obj = DictPromptTemplate(
template=cast("dict[str, Any]", tmpl),
template_format=template_format,
)
prompt.append(data_template_obj)
else:
msg = f"Invalid template: {tmpl}"
raise ValueError(msg)
return cls(prompt=prompt, **kwargs)
msg = f"Invalid template: {template}"
raise ValueError(msg)
@classmethod
def from_template_file(
cls: type[Self],
template_file: str | Path,
input_variables: list[str],
**kwargs: Any,
) -> Self:
"""Create a class from a template file.
Args:
template_file: path to a template file. String or Path.
input_variables: list of input variables.
**kwargs: keyword arguments to pass to the constructor.
Returns:
A new instance of this class.
"""
template = Path(template_file).read_text(encoding="utf-8")
return cls.from_template(template, input_variables=input_variables, **kwargs)
def format_messages(self, **kwargs: Any) -> list[BaseMessage]:
"""Format messages from kwargs.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
List of BaseMessages.
"""
return [self.format(**kwargs)]
async def aformat_messages(self, **kwargs: Any) -> list[BaseMessage]:
"""Async format messages from kwargs.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
List of BaseMessages.
"""
return [await self.aformat(**kwargs)]
@property
def input_variables(self) -> list[str]:
"""Input variables for this prompt template.
Returns:
List of input variable names.
"""
prompts = self.prompt if isinstance(self.prompt, list) else [self.prompt]
return [iv for prompt in prompts for iv in prompt.input_variables]
def format(self, **kwargs: Any) -> BaseMessage:
"""Format the prompt template.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
Formatted message.
"""
if isinstance(self.prompt, StringPromptTemplate):
text = self.prompt.format(**kwargs)
return self._msg_class(
content=text, additional_kwargs=self.additional_kwargs
)
content: list = []
for prompt in self.prompt:
inputs = {var: kwargs[var] for var in prompt.input_variables}
if isinstance(prompt, StringPromptTemplate):
formatted_text: str = prompt.format(**inputs)
if formatted_text != "":
content.append({"type": "text", "text": formatted_text})
elif isinstance(prompt, ImagePromptTemplate):
formatted_image: ImageURL = prompt.format(**inputs)
content.append({"type": "image_url", "image_url": formatted_image})
elif isinstance(prompt, DictPromptTemplate):
formatted_dict: dict[str, Any] = prompt.format(**inputs)
content.append(formatted_dict)
return self._msg_class(
content=content, additional_kwargs=self.additional_kwargs
)
async def aformat(self, **kwargs: Any) -> BaseMessage:
"""Async format the prompt template.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
Formatted message.
"""
if isinstance(self.prompt, StringPromptTemplate):
text = await self.prompt.aformat(**kwargs)
return self._msg_class(
content=text, additional_kwargs=self.additional_kwargs
)
content: list = []
for prompt in self.prompt:
inputs = {var: kwargs[var] for var in prompt.input_variables}
if isinstance(prompt, StringPromptTemplate):
formatted_text: str = await prompt.aformat(**inputs)
if formatted_text != "":
content.append({"type": "text", "text": formatted_text})
elif isinstance(prompt, ImagePromptTemplate):
formatted_image: ImageURL = await prompt.aformat(**inputs)
content.append({"type": "image_url", "image_url": formatted_image})
elif isinstance(prompt, DictPromptTemplate):
formatted_dict: dict[str, Any] = prompt.format(**inputs)
content.append(formatted_dict)
return self._msg_class(
content=content, additional_kwargs=self.additional_kwargs
)
@override
def pretty_repr(self, html: bool = False) -> str:
"""Human-readable representation.
Args:
html: Whether to format as HTML.
Returns:
Human-readable representation.
"""
# TODO: Handle partials
title = self.__class__.__name__.replace("MessagePromptTemplate", " Message")
title = get_msg_title_repr(title, bold=html)
prompts = self.prompt if isinstance(self.prompt, list) else [self.prompt]
prompt_reprs = "\n\n".join(prompt.pretty_repr(html=html) for prompt in prompts)
return f"{title}\n\n{prompt_reprs}"
| _StringImageMessagePromptTemplate |
python | pytorch__pytorch | torch/_export/db/examples/type_reflection_method.py | {
"start": 111,
"end": 461
} | class ____(torch.nn.Module):
"""
type() calls on custom objects followed by attribute accesses are not allowed
due to its overly dynamic nature.
"""
def forward(self, x):
a = A()
return type(a).func(x)
example_args = (torch.randn(3, 4),)
tags = {"python.builtin"}
model = TypeReflectionMethod()
| TypeReflectionMethod |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/class_definition.py | {
"start": 3627,
"end": 3765
} | class ____[A, B, C](meta=Aaaaaaaaaaaaaaaaaaaaaa):
pass
# Regression test for: https://github.com/astral-sh/ruff/pull/7001
| TestTypeParams |
python | eventlet__eventlet | eventlet/green/http/client.py | {
"start": 57530,
"end": 57662
} | class ____(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
| UnknownProtocol |
python | ansible__ansible | test/integration/targets/ansible-test-container/runme.py | {
"start": 23646,
"end": 24873
} | class ____:
user_scenario: UserScenario
engine: str
container_name: str
image: str
disable_selinux: bool
expose_cgroup_version: int | None
enable_sha1: bool
debug_systemd: bool
probe_cgroups: bool
disable_apparmor_profile_unix_chkpwd: bool
@property
def tags(self) -> tuple[str, ...]:
tags = []
if self.user_scenario.ssh:
tags.append(f'ssh: {self.user_scenario.ssh.name}')
if self.user_scenario.remote:
tags.append(f'remote: {self.user_scenario.remote.name}')
if self.disable_selinux:
tags.append('selinux: permissive')
if self.expose_cgroup_version is not None:
tags.append(f'cgroup: {self.expose_cgroup_version}')
if self.enable_sha1:
tags.append('sha1: enabled')
if self.disable_apparmor_profile_unix_chkpwd:
tags.append('apparmor(unix-chkpwd): disabled')
return tuple(tags)
@property
def tag_label(self) -> str:
return ' '.join(f'[{tag}]' for tag in self.tags)
def __str__(self):
return f'[{self.container_name}] ({self.engine}) {self.tag_label}'.strip()
@dataclasses.dataclass(frozen=True)
| TestScenario |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 616749,
"end": 617084
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field(SecurityVulnerability, graphql_name="node")
| SecurityVulnerabilityEdge |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 944988,
"end": 945740
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for RepositoryTopic."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("RepositoryTopicEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("RepositoryTopic"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| RepositoryTopicConnection |
python | kennethreitz__tablib | src/tablib/packages/dbfpy/fields.py | {
"start": 11916,
"end": 14538
} | class ____(DbfFieldDef):
"""Definition of the timestamp field."""
# a difference between JDN (Julian Day Number)
# and GDN (Gregorian Day Number). note, that GDN < JDN
JDN_GDN_DIFF = 1721425
typeCode = "T"
defaultValue = utils.classproperty(lambda cls: datetime.datetime.now())
# two 32-bits integers representing JDN and amount of
# milliseconds respectively gives us 8 bytes.
# note, that values must be encoded in LE byteorder.
length = 8
def decodeValue(self, value):
"""Return a `datetime.datetime` instance."""
assert len(value) == self.length
# LE byteorder
_jdn, _msecs = struct.unpack("<2I", value)
if _jdn >= 1:
_rv = datetime.datetime.fromordinal(_jdn - self.JDN_GDN_DIFF)
_rv += datetime.timedelta(0, _msecs / 1000.0)
else:
# empty date
_rv = None
return _rv
def encodeValue(self, value):
"""Return a string-encoded ``value``."""
if value:
value = utils.getDateTime(value)
# LE byteorder
_rv = struct.pack("<2I", value.toordinal() + self.JDN_GDN_DIFF,
(value.hour * 3600 + value.minute * 60 + value.second) * 1000)
else:
_rv = "\0" * self.length
assert len(_rv) == self.length
return _rv
_fieldsRegistry = {}
def registerField(fieldCls):
"""Register field definition class.
``fieldCls`` should be subclass of the `DbfFieldDef`.
Use `lookupFor` to retrieve field definition class
by the type code.
"""
assert fieldCls.typeCode is not None, "Type code isn't defined"
# XXX: use fieldCls.typeCode.upper()? in case of any decign
# don't forget to look to the same comment in ``lookupFor`` method
_fieldsRegistry[fieldCls.typeCode] = fieldCls
def lookupFor(typeCode):
"""Return field definition class for the given type code.
``typeCode`` must be a single character. That type should be
previously registered.
Use `registerField` to register new field class.
Return:
Return value is a subclass of the `DbfFieldDef`.
"""
# XXX: use typeCode.upper()? in case of any decign don't
# forget to look to the same comment in ``registerField``
return _fieldsRegistry[chr(typeCode)]
# register generic types
for (_name, _val) in list(globals().items()):
if isinstance(_val, type) and issubclass(_val, DbfFieldDef) \
and (_name != "DbfFieldDef"):
__all__.append(_name)
registerField(_val)
del _name, _val
# vim: et sts=4 sw=4 :
| DbfDateTimeFieldDef |
python | pandas-dev__pandas | pandas/tests/extension/decimal/test_decimal.py | {
"start": 1285,
"end": 8316
} | class ____(base.ExtensionTests):
def _get_expected_exception(
self, op_name: str, obj, other
) -> type[Exception] | tuple[type[Exception], ...] | None:
return None
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
if op_name in ["kurt", "sem"]:
return False
return True
def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
if op_name == "count":
return super().check_reduce(ser, op_name, skipna)
else:
result = getattr(ser, op_name)(skipna=skipna)
expected = getattr(np.asarray(ser), op_name)()
tm.assert_almost_equal(result, expected)
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, request):
if all_numeric_reductions in ["kurt", "skew", "sem", "median"]:
mark = pytest.mark.xfail(raises=NotImplementedError)
request.applymarker(mark)
super().test_reduce_series_numeric(data, all_numeric_reductions, skipna)
def test_reduce_frame(self, data, all_numeric_reductions, skipna, request):
op_name = all_numeric_reductions
if op_name in ["skew", "median"]:
mark = pytest.mark.xfail(raises=NotImplementedError)
request.applymarker(mark)
return super().test_reduce_frame(data, all_numeric_reductions, skipna)
def test_compare_scalar(self, data, comparison_op):
ser = pd.Series(data)
self._compare_other(ser, data, comparison_op, 0.5)
def test_compare_array(self, data, comparison_op):
ser = pd.Series(data)
alter = np.random.default_rng(2).choice([-1, 0, 1], len(data))
# Randomly double, halve or keep same value
other = pd.Series(data) * [decimal.Decimal(pow(2.0, i)) for i in alter]
self._compare_other(ser, data, comparison_op, other)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
op_name = all_arithmetic_operators
ser = pd.Series(data)
context = decimal.getcontext()
divbyzerotrap = context.traps[decimal.DivisionByZero]
invalidoptrap = context.traps[decimal.InvalidOperation]
context.traps[decimal.DivisionByZero] = 0
context.traps[decimal.InvalidOperation] = 0
# Decimal supports ops with int, but not float
other = pd.Series([int(d * 100) for d in data])
self.check_opname(ser, op_name, other)
if "mod" not in op_name:
self.check_opname(ser, op_name, ser * 2)
self.check_opname(ser, op_name, 0)
self.check_opname(ser, op_name, 5)
context.traps[decimal.DivisionByZero] = divbyzerotrap
context.traps[decimal.InvalidOperation] = invalidoptrap
def test_fillna_frame(self, data_missing):
msg = "ExtensionArray.fillna added a 'copy' keyword"
with tm.assert_produces_warning(
Pandas4Warning, match=msg, check_stacklevel=False
):
super().test_fillna_frame(data_missing)
def test_fillna_series(self, data_missing):
msg = "ExtensionArray.fillna added a 'copy' keyword"
with tm.assert_produces_warning(
DeprecationWarning, match=msg, check_stacklevel=False
):
super().test_fillna_series(data_missing)
def test_fillna_with_none(self, data_missing):
# GH#57723
# EAs that don't have special logic for None will raise, unlike pandas'
# which interpret None as the NA value for the dtype.
msg = "conversion from NoneType to Decimal is not supported"
with pytest.raises(TypeError, match=msg):
super().test_fillna_with_none(data_missing)
def test_fillna_limit_frame(self, data_missing):
# GH#58001
msg = "ExtensionArray.fillna added a 'copy' keyword"
with tm.assert_produces_warning(
DeprecationWarning, match=msg, check_stacklevel=False
):
super().test_fillna_limit_frame(data_missing)
def test_fillna_limit_series(self, data_missing):
# GH#58001
msg = "ExtensionArray.fillna added a 'copy' keyword"
with tm.assert_produces_warning(
DeprecationWarning, match=msg, check_stacklevel=False
):
super().test_fillna_limit_series(data_missing)
@pytest.mark.xfail(reason="copy keyword is missing")
def test_fillna_readonly(self, data_missing):
super().test_fillna_readonly(data_missing)
def test_series_repr(self, data):
# Overriding this base test to explicitly test that
# the custom _formatter is used
ser = pd.Series(data)
assert data.dtype.name in repr(ser)
assert "Decimal: " in repr(ser)
@pytest.mark.xfail(reason="Inconsistent array-vs-scalar behavior")
@pytest.mark.parametrize("ufunc", [np.positive, np.negative, np.abs])
def test_unary_ufunc_dunder_equivalence(self, data, ufunc):
super().test_unary_ufunc_dunder_equivalence(data, ufunc)
def test_take_na_value_other_decimal():
arr = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("2.0")])
result = arr.take([0, -1], allow_fill=True, fill_value=decimal.Decimal("-1.0"))
expected = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("-1.0")])
tm.assert_extension_array_equal(result, expected)
def test_series_constructor_coerce_data_to_extension_dtype():
dtype = DecimalDtype()
ser = pd.Series([0, 1, 2], dtype=dtype)
arr = DecimalArray(
[decimal.Decimal(0), decimal.Decimal(1), decimal.Decimal(2)],
dtype=dtype,
)
exp = pd.Series(arr)
tm.assert_series_equal(ser, exp)
def test_series_constructor_with_dtype():
arr = DecimalArray([decimal.Decimal("10.0")])
result = pd.Series(arr, dtype=DecimalDtype())
expected = pd.Series(arr)
tm.assert_series_equal(result, expected)
result = pd.Series(arr, dtype="int64")
expected = pd.Series([10])
tm.assert_series_equal(result, expected)
def test_dataframe_constructor_with_dtype():
arr = DecimalArray([decimal.Decimal("10.0")])
result = pd.DataFrame({"A": arr}, dtype=DecimalDtype())
expected = pd.DataFrame({"A": arr})
tm.assert_frame_equal(result, expected)
arr = DecimalArray([decimal.Decimal("10.0")])
result = pd.DataFrame({"A": arr}, dtype="int64")
expected = pd.DataFrame({"A": [10]})
tm.assert_frame_equal(result, expected)
def test_astype_dispatches(frame_or_series):
# This is a dtype-specific test that ensures Series[decimal].astype
# gets all the way through to ExtensionArray.astype
# Designing a reliable smoke test that works for arbitrary data types
# is difficult.
data = pd.Series(DecimalArray([decimal.Decimal(2)]), name="a")
ctx = decimal.Context()
ctx.prec = 5
data = frame_or_series(data)
result = data.astype(DecimalDtype(ctx))
if frame_or_series is pd.DataFrame:
result = result["a"]
assert result.dtype.context.prec == ctx.prec
| TestDecimalArray |
python | pytorch__pytorch | torch/utils/mkldnn.py | {
"start": 2990,
"end": 3696
} | class ____(_MkldnnConvNd):
def __init__(self, dense_module, dtype) -> None:
super().__init__(dense_module)
self.register_buffer('weight', torch._C._nn.mkldnn_reorder_conv2d_weight(
dense_module.weight.to_mkldnn(dtype),
self.padding,
self.stride,
self.dilation,
self.groups))
@torch.jit.script_method
def __setstate__(self, state):
self.weight = torch._C._nn.mkldnn_reorder_conv2d_weight(
state[0].to_mkldnn(),
self.padding,
self.stride,
self.dilation,
self.groups)
self.bias = state[1].to_mkldnn()
self.training = state[2]
| MkldnnConv2d |
python | django__django | tests/admin_changelist/admin.py | {
"start": 3031,
"end": 3150
} | class ____(admin.ModelAdmin):
list_display = ("band", "player")
list_select_related = ("player",)
| InvitationAdmin |
python | skorch-dev__skorch | examples/benchmarks/history.py | {
"start": 514,
"end": 706
} | class ____(Callback):
def on_batch_end(self, net, **kwargs):
try:
net.history[-1, 'batches', -1, 'foobar']
except Exception as e:
pass
| TriggerKeyError |
python | joke2k__faker | faker/providers/bank/ru_RU/__init__.py | {
"start": 42,
"end": 16674
} | class ____(BankProvider):
"""Implement bank provider for ``ru_RU`` locale.
Sources for region codes, currency codes, and bank names:
- https://ru.wikipedia.org/wiki/Коды_субъектов_Российской_Федерации
- https://ru.wikipedia.org/wiki/Общероссийский_классификатор_валют
- http://cbr.ru/credit/coreports/ko17012020.zip
"""
country_code = "RU"
region_codes = (
"01",
"03",
"04",
"05",
"07",
"08",
"10",
"11",
"12",
"14",
"15",
"17",
"18",
"19",
"20",
"22",
"24",
"25",
"26",
"27",
"28",
"29",
"30",
"32",
"33",
"34",
"35",
"36",
"37",
"38",
"40",
"41",
"42",
"44",
"45",
"46",
"47",
"49",
"50",
"52",
"53",
"54",
"56",
"57",
"58",
"60",
"61",
"63",
"64",
"65",
"66",
"67",
"68",
"69",
"70",
"71",
"73",
"75",
"76",
"77",
"78",
"79",
"80",
"81",
"82",
"83",
"84",
"85",
"86",
"87",
"88",
"89",
"90",
"91",
"92",
"93",
"94",
"95",
"96",
"97",
"98",
"99",
)
department_code_formats = (
"0#",
"1#",
"2#",
"3#",
"4#",
"5#",
"6#",
"7#",
"8#",
"9#",
)
credit_organization_code_formats = (
"05#",
"06#",
"07#",
"08#",
"09#",
"1##",
"2##",
"3##",
"4##",
"5##",
"6##",
"7##",
"8##",
"9##",
)
checking_account_codes = (
[str(i) for i in range(102, 110)]
+ ["203", "204"]
+ [str(i) for i in range(301, 330)]
+ [str(i) for i in range(401, 409)]
+ [str(i) for i in range(411, 426)]
+ ["430"]
+ [str(i) for i in range(501, 527)]
)
organization_codes = (
"01",
"02",
"03",
"04",
)
currency_codes = (
"008",
"012",
"032",
"036",
"044",
"048",
"050",
"051",
"052",
"060",
"064",
"068",
"072",
"084",
"090",
"096",
"104",
"108",
"116",
"124",
"132",
"136",
"144",
"152",
"156",
"170",
"174",
"188",
"191",
"192",
"203",
"208",
"214",
"222",
"230",
"232",
"238",
"242",
"262",
"270",
"292",
"320",
"324",
"328",
"332",
"340",
"344",
"348",
"352",
"356",
"360",
"364",
"368",
"376",
"388",
"392",
"398",
"400",
"404",
"408",
"410",
"414",
"417",
"418",
"422",
"426",
"430",
"434",
"440",
"446",
"454",
"458",
"462",
"478",
"480",
"484",
"496",
"498",
"504",
"512",
"516",
"524",
"532",
"533",
"548",
"554",
"558",
"566",
"578",
"586",
"590",
"598",
"600",
"604",
"608",
"634",
"643",
"646",
"654",
"678",
"682",
"690",
"694",
"702",
"704",
"706",
"710",
"728",
"748",
"752",
"756",
"760",
"764",
"776",
"780",
"784",
"788",
"800",
"807",
"810",
"818",
"826",
"834",
"840",
"858",
"860",
"882",
"886",
"894",
"901",
"931",
"932",
"933",
"934",
"936",
"937",
"938",
"940",
"941",
"943",
"944",
"946",
"947",
"948",
"949",
"950",
"951",
"952",
"953",
"959",
"960",
"961",
"962",
"963",
"964",
"968",
"969",
"970",
"971",
"972",
"973",
"975",
"976",
"977",
"978",
"980",
"981",
"985",
"986",
"997",
"998",
"999",
)
banks = (
"Абсолют Банк",
"Авангард",
"Аверс",
"Автоградбанк",
"Автокредитбанк",
"Автоторгбанк",
"Агора",
"Агропромкредит",
"Агророс",
"Азиатско-Тихоокеанский Банк",
"Азия-Инвест Банк",
"Айсибиси Банк",
"АК Барс",
"Акибанк",
"Акрополь",
"Актив Банк",
"Акцепт",
"Александровский",
"Алеф-Банк",
"Алмазэргиэнбанк",
"Алтайкапиталбанк",
"Алтынбанк",
"Альба Альянс",
"Альтернатива",
"Альфа-Банк",
"Америкэн Экспресс Банк",
"Апабанк",
"Аресбанк",
"Арзамас",
"Байкалинвестбанк",
"Байкалкредобанк",
"Балаково-Банк",
"Балтинвестбанк",
'Банк "Санкт-Петербург"',
'Банк "СКС"',
"Банк 131",
"Банк Берейт",
"Банк Дом.рф",
"Банк Жилищного Финансирования",
"Банк Зенит",
"Банк Зенит Сочи",
"Банк Интеза",
"Банк Казани",
"Банк Корпоративного Финансирования",
"Банк Кредит Свисс (Москва)",
"Банк Оранжевый",
"Банк Оренбург",
"Банк ПСА Финанс Рус",
"Банк Раунд",
"Банк Реалист",
"Банк РМП",
"Банк РСИ",
"Банк СГБ",
"Банк Стандарт-Кредит",
"Банк Финам",
"Банк ЧБРР",
"ББР Банк",
"Белгородсоцбанк",
"Бест Эффортс Банк",
"Бизнес-Сервис-Траст",
"БКС Банк",
"БМ-Банк",
"БМВ Банк",
"БНП Париба Банк",
"Братский АНКБ",
"Быстробанк",
"Бэнк Оф Чайна",
"Вакобанк",
"Великие Луки Банк",
"Венец",
"Веста",
"Викинг",
"Витабанк",
"Вкабанк",
"Владбизнесбанк",
"Внешфинбанк",
"Возрождение",
"Вологжанин",
"Восточный",
"ВРБ",
"Всероссийский Банк Развития Регионов",
"ВТБ",
"Вуз-Банк",
"Вятич",
"Газнефтьбанк",
"Газпромбанк",
"Газтрансбанк",
"Газэнергобанк",
"Гарант-Инвест",
"Генбанк",
"Геобанк",
"Гефест",
"Глобус",
"Голдман Сакс Банк",
"Горбанк",
"Гута-Банк",
"Далена",
"Дальневосточный Банк",
"Денизбанк Москва",
"Держава",
"Дж.П. Морган Банк Интернешнл",
"Джей Энд Ти Банк",
"Дойче Банк",
"Долинск",
"Дом-Банк",
"Донкомбанк",
"Дон-Тексбанк",
"Дружба",
"ЕАТП Банк",
"Евразийский Банк",
"Евроазиатский Инвестиционный Банк",
"Евроальянс",
"Еврофинанс Моснарбанк",
"Екатеринбург",
"Енисейский Объединенный Банк",
"Ермак",
"Живаго Банк",
"Запсибкомбанк",
"Заречье",
"Заубер Банк",
"Земельный",
"Земский Банк",
"Зираат Банк (Москва)",
"Ижкомбанк",
"ИК Банк",
"Икано Банк",
"Инбанк",
"Инвестторгбанк",
"Инг Банк (Евразия)",
"Интерпрогрессбанк",
"Интерпромбанк",
"ИРС",
"ИС Банк",
"ИТ Банк",
"Итуруп",
"Ишбанк",
"Йошкар-Ола",
"Калуга",
"Камский Коммерческий Банк",
"Капитал",
"Кетовский Коммерческий Банк",
"Киви Банк",
"Классик Эконом Банк",
"Кольцо Урала",
"Коммерцбанк (Евразия)",
"Коммерческий Индо Банк",
"Консервативный Коммерческий Банк",
"Континенталь",
"Космос",
"Костромаселькомбанк",
"Кошелев-Банк",
"Креди Агриколь Киб",
"Кредит Европа Банк",
"Кредит Урал Банк",
"Кремлевский",
"Крокус-Банк",
"Крона-Банк",
"Кросна-Банк",
"КС Банк",
"Кубань Кредит",
"Кубаньторгбанк",
"Кузбассхимбанк",
"Кузнецкбизнесбанк",
"Кузнецкий",
"Кузнецкий Мост",
"Курган",
"Курскпромбанк",
"Кэб Эйчэнби Банк",
"Ланта-Банк",
"Левобережный",
"Локо-Банк",
"Майкопбанк",
"Майский",
"Максима",
"МБА-Москва",
"МВС Банк",
"Мегаполис",
"Международный Финансовый Клуб",
"Мерседес-Бенц Банк Рус",
"Металлинвестбанк",
"Металлург",
"Меткомбанк",
"Мидзухо Банк (Москва)",
"Мир Бизнес Банк",
"МКБ",
"Модульбанк",
"Морган Стэнли Банк",
"Морской Банк",
"Москва-Сити",
"Московский Индустриальный Банк",
"Московский Коммерческий Банк",
"Московский Кредитный Банк",
"Московский Нефтехимический Банк",
"Московский Областной Банк",
"Московское Ипотечное Агентство",
"Москоммерцбанк",
"МС Банк Рус",
"МСКБ",
"МСП Банк",
"МТИ Банк",
"МТС-Банк",
"Муниципальный Камчатпрофитбанк",
"Нальчик",
"Народный Банк",
"Народный Банк Тувы",
"Народный Доверительный Банк",
"Натиксис Банк",
"Национальный Банк Сбережений",
"Национальный Инвестиционно-Промышленный",
"Национальный Резервный Банк",
"Национальный Стандарт",
"НБД-Банк",
"Невастройинвест",
"Нейва",
"Нефтепромбанк",
"НИБ",
"Нижневолжский Коммерческий Банк",
"Нико-Банк",
"НК Банк",
"Новикомбанк",
"Новобанк",
"Новокиб",
"Новый Век",
"Новый Московский Банк",
"Нокссбанк",
"Ноосфера",
"Норвик Банк",
"Нордеа Банк",
"НС Банк",
"НФК",
"Объединенный Банк Республики",
"Объединенный Капитал",
"Онего",
"Оней Банк",
"Орбанк",
"Оргбанк",
"ОТП Банк",
"Первоуральскбанк",
"Первый Дортрансбанк",
"Первый Инвестиционный Банк",
"Первый Клиентский Банк",
"Пересвет",
"Пермь",
"Петербургский Социальный Ком. Банк",
"Платина",
"Плюс Банк",
"Пойдём!",
"Почта Банк",
"Почтобанк",
"Приморский Территориальный",
"Приморье",
"Примсоцбанк",
"Приобье",
"Прио-Внешторгбанк",
"Прокоммерцбанк",
"Проминвестбанк",
"Промсвязьбанк",
"Промсельхозбанк",
"Промтрансбанк",
"Профессионал Банк",
"Профессиональный Инвестиционный Банк",
"Прохладный",
"Развитие-Столица",
"Райффайзенбанк",
"РБА",
"Ренессанс Кредит",
"Рента-Банк",
"Ресо Кредит",
"Республиканский Кредитный Альянс",
"Ресурс-Траст",
"РН Банк",
"Росбанк",
"Росбизнесбанк",
"Росгосстрах Банк",
"Росдорбанк",
"Роскосмосбанк",
"Россельхозбанк",
"Российская Финансовая Корпорация",
"Российский Национальный Коммерческий Банк",
"Россита-Банк",
"Россия",
"Ростфинанс",
"Росэксимбанк",
"Роял Кредит Банк",
"Руна-Банк",
"Руснарбанк",
"Русский Банк Сбережений",
"Русский Региональный Банк",
"Русский Стандарт",
"Русфинанс Банк",
"Русьуниверсалбанк",
"РФИ Банк",
"Саммит Банк",
"Санкт-Петербургский Банк Инвестиций",
"Саратов",
"Саровбизнесбанк",
"Сбербанк России",
"Связь-Банк",
"СДМ-Банк",
"Севастопольский Морской Банк",
"Северный Морской Путь",
"Северный Народный Банк",
"Северстройбанк",
"Севзапинвестпромбанк",
"Сельмашбанк",
"Сервис Резерв",
"Сетелем Банк",
"СИАБ",
"Сибсоцбанк",
"Синко-Банк",
"Система",
"Сити Инвест Банк",
"Ситибанк",
"СКБ-Банк",
"Славия",
"Славянбанк",
"Славянский Кредит",
"Снежинский",
"Собинбанк",
"Совкомбанк",
"Современные Стандарты Бизнеса",
"Соколовский",
"Солид Банк",
"Солидарность",
"Социум-Банк",
"Союз",
"Спецстройбанк",
"Спиритбанк",
"Спутник",
"Ставропольпромстройбанк",
"Столичный Кредит",
"Стройлесбанк",
"Сумитомо Мицуи Рус Банк",
"Сургутнефтегазбанк",
"СЭБ Банк",
"Таврический Банк",
"Таганрогбанк",
"Тайдон",
"Тамбовкредитпромбанк",
"Татсоцбанк",
"Тексбанк",
"Тендер-Банк",
"Тимер Банк",
"Тинькофф Банк",
"Тойота Банк",
"Тольяттихимбанк",
"Томскпромстройбанк",
"Торжок",
"Транскапиталбанк",
"Трансстройбанк",
"Траст",
"Тэмбр-Банк",
"Углеметбанк",
"Унифондбанк",
"Уралпромбанк",
"Уралсиб",
"Уралфинанс",
"Уральский Банк Реконструкции и Развития",
"Уральский Финансовый Дом",
"УРИ Банк",
"Финанс Бизнес Банк",
"Финсервис",
"ФК Открытие",
"Фольксваген Банк Рус",
"Фора-Банк",
"Форбанк",
"Форштадт",
"Фридом Финанс",
"Хакасский Муниципальный Банк",
"Химик",
"ХКФ Банк",
"Хлынов",
"Центрально-Азиатский",
"Центр-Инвест",
"Центрокредит",
"ЦМРБанк",
"Чайна Констракшн Банк",
"Чайнасельхозбанк",
"Челиндбанк",
"Челябинвестбанк",
"Эйч-Эс-Би-Си Банк (РР)",
"Эко-Инвест",
"Экономбанк",
"Экси-Банк",
"Экспобанк",
"Экспресс-Волга",
"Элита",
"Эм-Ю-Эф-Джи Банк (Евразия)",
"Энергобанк",
"Энергомашбанк",
"Энерготрансбанк",
"Эс-Би-Ай Банк",
"Ю Би Эс Банк",
"Юг-Инвестбанк",
"ЮМК Банк",
"Юникредит Банк",
"Юнистрим",
"Яринтербанк",
)
def bic(self) -> str:
"""Generate a bank identification code (BIC).
BIC is a bank identification code that is used in Russia.
See https://ru.wikipedia.org/wiki/Банковский_идентификационный_код.
"""
region: str = self.random_element(self.region_codes)
department_code: str = self.numerify(self.random_element(self.department_code_formats))
credit_organization_code: str = self.numerify(self.random_element(self.credit_organization_code_formats))
return "04" + region + department_code + credit_organization_code
def correspondent_account(self) -> str:
"""Generate a correspondent account number.
Correspondent account is established to handle various financial
operations between financial institutions.
See https://ru.wikipedia.org/wiki/Корреспондентский_счёт.
"""
credit_organization_code = self.numerify(self.random_element(self.credit_organization_code_formats))
return "301" + self.numerify("#" * 14) + credit_organization_code
def checking_account(self) -> str:
"""Generate a checking account number.
Checking account is used in banks to handle financial operations of
clients.
See https://ru.wikipedia.org/wiki/Расчётный_счёт.
"""
account: str = self.random_element(self.checking_account_codes)
organization: str = self.random_element(self.organization_codes)
currency: str = self.random_element(self.currency_codes)
return account + organization + currency + self.numerify("#" * 12)
| Provider |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/declarative_automation/operators/boolean_operators.py | {
"start": 5054,
"end": 8554
} | class ____(BuiltinAutomationCondition[T_EntityKey]):
"""This class represents the condition that any of its children evaluate to true."""
operands: Sequence[AutomationCondition[T_EntityKey]]
@property
def description(self) -> str:
return "Any of"
@property
def name(self) -> str:
return "OR"
@property
def children(self) -> Sequence[AutomationCondition[T_EntityKey]]:
return self.operands
@property
def operator_type(self) -> OperatorType:
return "or"
@property
def requires_cursor(self) -> bool:
return False
async def evaluate( # pyright: ignore[reportIncompatibleMethodOverride]
self, context: AutomationContext[T_EntityKey]
) -> AutomationResult[T_EntityKey]:
true_subset = context.get_empty_subset()
coroutines = [
context.for_child_condition(
child_condition=child,
child_indices=[i],
candidate_subset=context.candidate_subset,
).evaluate_async()
for i, child in enumerate(self.children)
]
child_results = await asyncio.gather(*coroutines)
for child_result in child_results:
true_subset = true_subset.compute_union(child_result.true_subset)
return AutomationResult(context, true_subset, child_results=child_results)
@public
def replace(
self, old: Union[AutomationCondition, str], new: T_AutomationCondition
) -> Union[Self, T_AutomationCondition]:
"""Replaces all instances of ``old`` across any sub-conditions with ``new``.
If ``old`` is a string, then conditions with a label or name matching
that string will be replaced.
Args:
old (Union[AutomationCondition, str]): The condition to replace.
new (AutomationCondition): The condition to replace with.
"""
return (
new
if old in [self, self.name, self.get_label()]
else copy(self, operands=[child.replace(old, new) for child in self.operands])
)
@public
def allow(self, selection: "AssetSelection") -> "OrAutomationCondition":
"""Applies the ``.allow()`` method across all sub-conditions.
This impacts any dep-related sub-conditions.
Args:
selection (AssetSelection): The selection to allow.
"""
from dagster._core.definitions.asset_selection import AssetSelection
check.inst_param(selection, "selection", AssetSelection)
return copy(
self,
operands=[
child.allow(selection) if has_allow_ignore(child) else child
for child in self.operands
],
)
@public
def ignore(self, selection: "AssetSelection") -> "OrAutomationCondition":
"""Applies the ``.ignore()`` method across all sub-conditions.
This impacts any dep-related sub-conditions.
Args:
selection (AssetSelection): The selection to ignore.
"""
from dagster._core.definitions.asset_selection import AssetSelection
check.inst_param(selection, "selection", AssetSelection)
return copy(
self,
operands=[
child.ignore(selection) if has_allow_ignore(child) else child
for child in self.operands
],
)
@whitelist_for_serdes(storage_name="NotAssetCondition")
@record
| OrAutomationCondition |
python | kamyu104__LeetCode-Solutions | Python/shortest-path-in-a-hidden-grid.py | {
"start": 2100,
"end": 3778
} | class ____(object):
def findShortestPath(self, master):
"""
:type master: GridMaster
:rtype: int
"""
directions = {'L': (0, -1), 'R': (0, 1), 'U': (-1, 0), 'D': (1, 0)}
rollback = {'L': 'R', 'R': 'L', 'U': 'D', 'D': 'U'}
def dfs(pos, target, master, lookup, adj):
if target[0] is None and master.isTarget():
target[0] = pos
lookup.add(pos)
for d, (di, dj) in directions.iteritems():
if not master.canMove(d):
continue
nei = (pos[0]+di, pos[1]+dj)
adj[pos].add(nei)
adj[nei].add(pos)
if nei in lookup:
continue
master.move(d)
dfs(nei, target, master, lookup, adj)
master.move(rollback[d])
def bfs(adj, start, target):
q = [start]
lookup = set(q)
steps = 0
while q:
new_q = []
for pos in q:
if pos == target:
return steps
for nei in adj[pos]:
if nei in lookup:
continue
lookup.add(nei)
new_q.append(nei)
q = new_q
steps += 1
return -1
start = (0, 0)
target = [None]
adj = collections.defaultdict(set)
dfs(start, target, master, set(), adj)
if not target[0]:
return -1
return bfs(adj, start, target[0])
| Solution2 |
python | langchain-ai__langchain | libs/core/tests/unit_tests/test_tools.py | {
"start": 33998,
"end": 42306
} | class ____(FooBase):
@override
def _run(self, bar: Any, bar_config: RunnableConfig, **kwargs: Any) -> Any:
return True
def test_tool_pass_config_non_pickleable() -> None:
tool = FooBaseNonPickleable()
args = {"bar": threading.Lock()}
tool_call = {
"name": tool.name,
"args": args,
"id": "abc123",
"type": "tool_call",
}
_ = tool.invoke(tool_call, {"configurable": {"foo": "not-bar"}})
assert tool_call["args"] == args
@pytest.mark.parametrize(
"tool", [foo, afoo, simple_foo, asimple_foo, FooBase(), AFooBase()]
)
async def test_async_tool_pass_config(tool: BaseTool) -> None:
assert (
await tool.ainvoke({"bar": "baz"}, {"configurable": {"foo": "not-bar"}})
== "baz"
)
def test_tool_description() -> None:
def foo(bar: str) -> str:
"""The foo."""
return bar
foo1 = tool(foo)
assert foo1.description == "The foo."
foo2 = StructuredTool.from_function(foo)
assert foo2.description == "The foo."
def test_tool_arg_descriptions() -> None:
def foo(bar: str, baz: int) -> str:
"""The foo.
Args:
bar: The bar.
baz: The baz.
"""
return bar
foo1 = tool(foo)
args_schema = _schema(foo1.args_schema)
assert args_schema == {
"title": "foo",
"type": "object",
"description": inspect.getdoc(foo),
"properties": {
"bar": {"title": "Bar", "type": "string"},
"baz": {"title": "Baz", "type": "integer"},
},
"required": ["bar", "baz"],
}
# Test parses docstring
foo2 = tool(foo, parse_docstring=True)
args_schema = _schema(foo2.args_schema)
expected = {
"title": "foo",
"description": "The foo.",
"type": "object",
"properties": {
"bar": {"title": "Bar", "description": "The bar.", "type": "string"},
"baz": {"title": "Baz", "description": "The baz.", "type": "integer"},
},
"required": ["bar", "baz"],
}
assert args_schema == expected
# Test parsing with run_manager does not raise error
def foo3( # noqa: D417
bar: str, baz: int, run_manager: CallbackManagerForToolRun | None = None
) -> str:
"""The foo.
Args:
bar: The bar.
baz: The baz.
"""
return bar
as_tool = tool(foo3, parse_docstring=True)
args_schema = _schema(as_tool.args_schema)
assert args_schema["description"] == expected["description"]
assert args_schema["properties"] == expected["properties"]
# Test parameterless tool does not raise error for missing Args section
# in docstring.
def foo4() -> str:
"""The foo."""
return "bar"
as_tool = tool(foo4, parse_docstring=True)
args_schema = _schema(as_tool.args_schema)
assert args_schema["description"] == expected["description"]
def foo5(run_manager: CallbackManagerForToolRun | None = None) -> str:
"""The foo."""
return "bar"
as_tool = tool(foo5, parse_docstring=True)
args_schema = _schema(as_tool.args_schema)
assert args_schema["description"] == expected["description"]
def test_docstring_parsing() -> None:
expected = {
"title": "foo",
"description": "The foo.",
"type": "object",
"properties": {
"bar": {"title": "Bar", "description": "The bar.", "type": "string"},
"baz": {"title": "Baz", "description": "The baz.", "type": "integer"},
},
"required": ["bar", "baz"],
}
# Simple case
def foo(bar: str, baz: int) -> str:
"""The foo.
Args:
bar: The bar.
baz: The baz.
"""
return bar
as_tool = tool(foo, parse_docstring=True)
args_schema = _schema(as_tool.args_schema)
assert args_schema["description"] == "The foo."
assert args_schema["properties"] == expected["properties"]
# Multi-line description
def foo2(bar: str, baz: int) -> str:
"""The foo.
Additional description here.
Args:
bar: The bar.
baz: The baz.
"""
return bar
as_tool = tool(foo2, parse_docstring=True)
args_schema2 = _schema(as_tool.args_schema)
assert args_schema2["description"] == "The foo. Additional description here."
assert args_schema2["properties"] == expected["properties"]
# Multi-line with Returns block
def foo3(bar: str, baz: int) -> str:
"""The foo.
Additional description here.
Args:
bar: The bar.
baz: The baz.
Returns:
description of returned value.
"""
return bar
as_tool = tool(foo3, parse_docstring=True)
args_schema3 = _schema(as_tool.args_schema)
args_schema3["title"] = "foo2"
assert args_schema2 == args_schema3
# Single argument
def foo4(bar: str) -> str:
"""The foo.
Args:
bar: The bar.
"""
return bar
as_tool = tool(foo4, parse_docstring=True)
args_schema4 = _schema(as_tool.args_schema)
assert args_schema4["description"] == "The foo."
assert args_schema4["properties"] == {
"bar": {"description": "The bar.", "title": "Bar", "type": "string"}
}
def test_tool_invalid_docstrings() -> None:
"""Test invalid docstrings."""
def foo3(bar: str, baz: int) -> str:
"""The foo."""
return bar
def foo4(bar: str, baz: int) -> str:
"""The foo.
Args:
bar: The bar.
baz: The baz.
""" # noqa: D205,D411 # We're intentionally testing bad formatting.
return bar
for func in {foo3, foo4}:
with pytest.raises(ValueError, match="Found invalid Google-Style docstring"):
_ = tool(func, parse_docstring=True)
def foo5(bar: str, baz: int) -> str: # noqa: D417
"""The foo.
Args:
banana: The bar.
monkey: The baz.
"""
return bar
with pytest.raises(
ValueError, match="Arg banana in docstring not found in function signature"
):
_ = tool(foo5, parse_docstring=True)
def test_tool_annotated_descriptions() -> None:
def foo(
bar: Annotated[str, "this is the bar"], baz: Annotated[int, "this is the baz"]
) -> str:
"""The foo.
Returns:
The bar only.
"""
return bar
foo1 = tool(foo)
args_schema = _schema(foo1.args_schema)
assert args_schema == {
"title": "foo",
"type": "object",
"description": inspect.getdoc(foo),
"properties": {
"bar": {"title": "Bar", "type": "string", "description": "this is the bar"},
"baz": {
"title": "Baz",
"type": "integer",
"description": "this is the baz",
},
},
"required": ["bar", "baz"],
}
def test_tool_call_input_tool_message_output() -> None:
tool_call = {
"name": "structured_api",
"args": {"arg1": 1, "arg2": True, "arg3": {"img": "base64string..."}},
"id": "123",
"type": "tool_call",
}
tool = _MockStructuredTool()
expected = ToolMessage(
"1 True {'img': 'base64string...'}", tool_call_id="123", name="structured_api"
)
actual = tool.invoke(tool_call)
assert actual == expected
tool_call.pop("type")
with pytest.raises(ValidationError):
tool.invoke(tool_call)
@pytest.mark.parametrize("block_type", [*TOOL_MESSAGE_BLOCK_TYPES, "bad"])
def test_tool_content_block_output(block_type: str) -> None:
@tool
def my_tool(query: str) -> list[dict[str, Any]]:
"""Test tool."""
return [{"type": block_type, "foo": "bar"}]
tool_call = {
"type": "tool_call",
"name": "my_tool",
"args": {"query": "baz"},
"id": "call_abc123",
}
result = my_tool.invoke(tool_call)
assert isinstance(result, ToolMessage)
if block_type in TOOL_MESSAGE_BLOCK_TYPES:
assert result.content == [{"type": block_type, "foo": "bar"}]
else:
assert result.content == '[{"type": "bad", "foo": "bar"}]'
| FooBaseNonPickleable |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/freshness_evaluator.py | {
"start": 704,
"end": 1261
} | class ____(ABC):
"""Abstract base class for freshness policy evaluators.
Do not implement this class, implement subclasses for each policy type.
"""
@abstractmethod
async def evaluate_freshness(
self, context: LoadingContext, node: BaseAssetNode
) -> FreshnessState:
"""Evaluate the freshness of an asset based on the freshness policy, returning the corresponding FreshnessState."""
raise NotImplementedError("Subclasses must implement this method")
# IMPROVEME [OPER-1796] move to oss
| FreshnessPolicyEvaluator |
python | huggingface__transformers | tests/models/table_transformer/test_modeling_table_transformer.py | {
"start": 7594,
"end": 22517
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
TableTransformerModel,
TableTransformerForObjectDetection,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{"image-feature-extraction": TableTransformerModel, "object-detection": TableTransformerForObjectDetection}
if is_torch_available()
else {}
)
is_encoder_decoder = True
test_missing_keys = False
zero_init_hidden_state = True
test_torch_exportable = True
# special case for head models
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "TableTransformerForObjectDetection":
labels = []
for i in range(self.model_tester.batch_size):
target = {}
target["class_labels"] = torch.ones(
size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long
)
target["boxes"] = torch.ones(
self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float
)
target["masks"] = torch.ones(
self.model_tester.n_targets,
self.model_tester.min_size,
self.model_tester.max_size,
device=torch_device,
dtype=torch.float,
)
labels.append(target)
inputs_dict["labels"] = labels
return inputs_dict
def setUp(self):
self.model_tester = TableTransformerModelTester(self)
self.config_tester = ConfigTester(self, config_class=TableTransformerConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def test_table_transformer_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_table_transformer_model(*config_and_inputs)
def test_table_transformer_object_detection_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_table_transformer_object_detection_head_model(*config_and_inputs)
def test_table_transformer_no_timm_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_table_transformer_no_timm_backbone(*config_and_inputs)
@unittest.skip(reason="Table Transformer does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Table Transformer does not use inputs_embeds")
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(reason="Table Transformer does not have a get_input_embeddings method")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="Table Transformer is not a generative model")
def test_generate_without_input_ids(self):
pass
@unittest.skip(reason="Table Transformer does not use token embeddings")
def test_resize_tokens_embeddings(self):
pass
@slow
@unittest.skip(reason="TODO Niels: fix me!")
def test_model_outputs_equivalence(self):
pass
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
decoder_seq_length = self.model_tester.decoder_seq_length
encoder_seq_length = self.model_tester.encoder_seq_length
decoder_key_length = self.model_tester.decoder_seq_length
encoder_key_length = self.model_tester.encoder_seq_length
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
if self.is_encoder_decoder:
correct_outlen = 5
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
# Object Detection model returns pred_logits and pred_boxes
if model_class.__name__ == "TableTransformerForObjectDetection":
correct_outlen += 2
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_retain_grad_hidden_states_attentions(self):
# removed retain_grad and grad on decoder_hidden_states, as queries don't require grad
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
encoder_hidden_states = outputs.encoder_hidden_states[0]
encoder_attentions = outputs.encoder_attentions[0]
encoder_hidden_states.retain_grad()
encoder_attentions.retain_grad()
decoder_attentions = outputs.decoder_attentions[0]
decoder_attentions.retain_grad()
cross_attentions = outputs.cross_attentions[0]
cross_attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(encoder_attentions.grad)
self.assertIsNotNone(decoder_attentions.grad)
self.assertIsNotNone(cross_attentions.grad)
def test_forward_auxiliary_loss(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.auxiliary_loss = True
# only test for object detection and segmentation model
for model_class in self.all_model_classes[1:]:
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
outputs = model(**inputs)
self.assertIsNotNone(outputs.auxiliary_outputs)
self.assertEqual(len(outputs.auxiliary_outputs), self.model_tester.num_hidden_layers - 1)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = ["pixel_values", "pixel_mask", "decoder_attention_mask", "encoder_outputs"]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
else:
expected_arg_names = ["pixel_values", "pixel_mask"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_different_timm_backbone(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# let's pick a random timm backbone
config.backbone = "tf_mobilenetv3_small_075"
config.backbone_config = None
config.use_timm_backbone = True
config.backbone_kwargs = {"out_indices": [2, 3, 4]}
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if model_class.__name__ == "TableTransformerForObjectDetection":
expected_shape = (
self.model_tester.batch_size,
self.model_tester.num_queries,
self.model_tester.num_labels + 1,
)
self.assertEqual(outputs.logits.shape, expected_shape)
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
else:
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3)
self.assertTrue(outputs)
def test_hf_backbone(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Load a pretrained HF checkpoint as backbone
config.backbone = "microsoft/resnet-18"
config.backbone_config = None
config.use_timm_backbone = False
config.use_pretrained_backbone = True
config.backbone_kwargs = {"out_indices": [2, 3, 4]}
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if model_class.__name__ == "TableTransformerForObjectDetection":
expected_shape = (
self.model_tester.batch_size,
self.model_tester.num_queries,
self.model_tester.num_labels + 1,
)
self.assertEqual(outputs.logits.shape, expected_shape)
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
else:
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3)
self.assertTrue(outputs)
def test_greyscale_images(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# use greyscale pixel values
inputs_dict["pixel_values"] = floats_tensor(
[self.model_tester.batch_size, 1, self.model_tester.min_size, self.model_tester.max_size]
)
# let's set num_channels to 1
config.num_channels = 1
config.backbone_config.num_channels = 1
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertTrue(outputs)
TOLERANCE = 1e-4
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_timm
@require_vision
@slow
| TableTransformerModelTest |
python | pandas-dev__pandas | pandas/tests/generic/test_to_xarray.py | {
"start": 3127,
"end": 4524
} | class ____:
def test_to_xarray_index_types(self, index_flat, request):
# MultiIndex is tested in test_to_xarray_with_multiindex
index = index_flat
ser = Series(range(len(index)), index=index, dtype="int64")
ser.index.name = "foo"
result = ser.to_xarray()
repr(result)
assert len(result) == len(index)
assert len(result.coords) == 1
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
assert isinstance(result, xarray.DataArray)
# idempotency
tm.assert_series_equal(result.to_series(), ser)
def test_to_xarray_empty(self):
ser = Series([], dtype=object)
ser.index.name = "foo"
result = ser.to_xarray()
assert len(result) == 0
assert len(result.coords) == 1
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
assert isinstance(result, xarray.DataArray)
def test_to_xarray_with_multiindex(self):
mi = MultiIndex.from_product([["a", "b"], range(3)], names=["one", "two"])
ser = Series(range(6), dtype="int64", index=mi)
result = ser.to_xarray()
assert len(result) == 2
tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])
assert isinstance(result, xarray.DataArray)
res = result.to_series()
tm.assert_series_equal(res, ser)
| TestSeriesToXArray |
python | ray-project__ray | python/ray/data/_internal/block_builder.py | {
"start": 92,
"end": 1197
} | class ____(Generic[T]):
"""A builder class for blocks."""
@staticmethod
def for_block(block: Block) -> "BlockBuilder":
return BlockAccessor.for_block(block).builder()
def add(self, item: T) -> None:
"""Append a single row to the block being built."""
raise NotImplementedError
def add_block(self, block: Block) -> None:
"""Append an entire block to the block being built."""
raise NotImplementedError
def will_build_yield_copy(self) -> bool:
"""Whether building this block will yield a new block copy."""
raise NotImplementedError
def build(self) -> Block:
"""Build the block."""
raise NotImplementedError
def num_rows(self) -> int:
"""Return the number of rows added in the block."""
raise NotImplementedError
def get_estimated_memory_usage(self) -> int:
"""Return the estimated memory usage so far in bytes."""
raise NotImplementedError
def block_type(self) -> BlockType:
"""Return the block type."""
raise NotImplementedError
| BlockBuilder |
python | pypa__pipenv | pipenv/vendor/pipdeptree/_freeze.py | {
"start": 942,
"end": 2979
} | class ____:
"""
An adapter class for pip's `pipenv.patched.pip._internal.metadata.BaseDistribution` abstract class.
It essentially wraps over an importlib.metadata.Distribution object and provides just enough fields/methods found in
pip's `BaseDistribution` so that we can use `pipenv.patched.pip._internal.operations.freeze.FrozenRequirement.from_dist()`.
:param dist: Represents an `importlib.metadata.Distribution` object.
"""
DIRECT_URL_METADATA_NAME = "direct_url.json"
def __init__(self, dist: Distribution) -> None:
self._dist = dist
self._raw_name = dist.metadata["Name"]
self._version = Version(dist.version)
@property
def raw_name(self) -> str | Any:
return self._raw_name
@property
def version(self) -> Version:
return self._version
@property
def editable(self) -> bool:
return self.editable_project_location is not None
@property
def direct_url(self) -> DirectUrl | None:
result = None
json_str = self._dist.read_text(self.DIRECT_URL_METADATA_NAME)
try:
if json_str:
result = DirectUrl.from_json(json_str)
except (
UnicodeDecodeError,
JSONDecodeError,
DirectUrlValidationError,
):
return result
return result
@property
def editable_project_location(self) -> str | None:
direct_url = self.direct_url
if direct_url and direct_url.is_local_editable():
from pipenv.patched.pip._internal.utils.urls import url_to_path # noqa: PLC2701, PLC0415
return url_to_path(direct_url.url)
result = None
egg_link_path = egg_link_path_from_sys_path(self.raw_name)
if egg_link_path:
with Path(egg_link_path).open("r", encoding=locale.getpreferredencoding(False)) as f: # noqa: FBT003
result = f.readline().rstrip()
return result
__all__ = ["dist_to_frozen_repr"]
| PipBaseDistributionAdapter |
python | django__django | tests/admin_inlines/tests.py | {
"start": 59163,
"end": 70421
} | class ____(TestDataMixin, TestCase):
factory = RequestFactory()
def test_verbose_name_inline(self):
class NonVerboseProfileInline(TabularInline):
model = Profile
verbose_name = "Non-verbose childs"
class VerboseNameProfileInline(TabularInline):
model = VerboseNameProfile
verbose_name = "Childs with verbose name"
class VerboseNamePluralProfileInline(TabularInline):
model = VerboseNamePluralProfile
verbose_name = "Childs with verbose name plural"
class BothVerboseNameProfileInline(TabularInline):
model = BothVerboseNameProfile
verbose_name = "Childs with both verbose names"
modeladmin = ModelAdmin(ProfileCollection, admin_site)
modeladmin.inlines = [
NonVerboseProfileInline,
VerboseNameProfileInline,
VerboseNamePluralProfileInline,
BothVerboseNameProfileInline,
]
obj = ProfileCollection.objects.create()
url = reverse("admin:admin_inlines_profilecollection_change", args=(obj.pk,))
request = self.factory.get(url)
request.user = self.superuser
response = modeladmin.changeform_view(request)
self.assertNotContains(response, "Add another Profile")
# Non-verbose model.
self.assertContains(
response,
(
'<h2 id="profile_set-heading" class="inline-heading">'
"Non-verbose childss</h2>"
),
html=True,
)
self.assertContains(response, "Add another Non-verbose child")
self.assertNotContains(
response,
'<h2 id="profile_set-heading" class="inline-heading">Profiles</h2>',
html=True,
)
# Model with verbose name.
self.assertContains(
response,
(
'<h2 id="verbosenameprofile_set-heading" class="inline-heading">'
"Childs with verbose names</h2>"
),
html=True,
)
self.assertContains(response, "Add another Childs with verbose name")
self.assertNotContains(
response,
'<h2 id="verbosenameprofile_set-heading" class="inline-heading">'
"Model with verbose name onlys</h2>",
html=True,
)
self.assertNotContains(response, "Add another Model with verbose name only")
# Model with verbose name plural.
self.assertContains(
response,
(
'<h2 id="verbosenamepluralprofile_set-heading" class="inline-heading">'
"Childs with verbose name plurals</h2>"
),
html=True,
)
self.assertContains(response, "Add another Childs with verbose name plural")
self.assertNotContains(
response,
'<h2 id="verbosenamepluralprofile_set-heading" class="inline-heading">'
"Model with verbose name plural only</h2>",
html=True,
)
# Model with both verbose names.
self.assertContains(
response,
(
'<h2 id="bothverbosenameprofile_set-heading" class="inline-heading">'
"Childs with both verbose namess</h2>"
),
html=True,
)
self.assertContains(response, "Add another Childs with both verbose names")
self.assertNotContains(
response,
'<h2 id="bothverbosenameprofile_set-heading" class="inline-heading">'
"Model with both - plural name</h2>",
html=True,
)
self.assertNotContains(response, "Add another Model with both - name")
def test_verbose_name_plural_inline(self):
class NonVerboseProfileInline(TabularInline):
model = Profile
verbose_name_plural = "Non-verbose childs"
class VerboseNameProfileInline(TabularInline):
model = VerboseNameProfile
verbose_name_plural = "Childs with verbose name"
class VerboseNamePluralProfileInline(TabularInline):
model = VerboseNamePluralProfile
verbose_name_plural = "Childs with verbose name plural"
class BothVerboseNameProfileInline(TabularInline):
model = BothVerboseNameProfile
verbose_name_plural = "Childs with both verbose names"
modeladmin = ModelAdmin(ProfileCollection, admin_site)
modeladmin.inlines = [
NonVerboseProfileInline,
VerboseNameProfileInline,
VerboseNamePluralProfileInline,
BothVerboseNameProfileInline,
]
obj = ProfileCollection.objects.create()
url = reverse("admin:admin_inlines_profilecollection_change", args=(obj.pk,))
request = self.factory.get(url)
request.user = self.superuser
response = modeladmin.changeform_view(request)
# Non-verbose model.
self.assertContains(
response,
(
'<h2 id="profile_set-heading" class="inline-heading">'
"Non-verbose childs</h2>"
),
html=True,
)
self.assertContains(response, "Add another Profile")
self.assertNotContains(
response,
'<h2 id="profile_set-heading" class="inline-heading">Profiles</h2>',
html=True,
)
# Model with verbose name.
self.assertContains(
response,
(
'<h2 id="verbosenameprofile_set-heading" class="inline-heading">'
"Childs with verbose name</h2>"
),
html=True,
)
self.assertContains(response, "Add another Model with verbose name only")
self.assertNotContains(
response,
'<h2 id="verbosenameprofile_set-heading" class="inline-heading">'
"Model with verbose name onlys</h2>",
html=True,
)
# Model with verbose name plural.
self.assertContains(
response,
(
'<h2 id="verbosenamepluralprofile_set-heading" class="inline-heading">'
"Childs with verbose name plural</h2>"
),
html=True,
)
self.assertContains(response, "Add another Profile")
self.assertNotContains(
response,
'<h2 id="verbosenamepluralprofile_set-heading" class="inline-heading">'
"Model with verbose name plural only</h2>",
html=True,
)
# Model with both verbose names.
self.assertContains(
response,
(
'<h2 id="bothverbosenameprofile_set-heading" class="inline-heading">'
"Childs with both verbose names</h2>"
),
html=True,
)
self.assertContains(response, "Add another Model with both - name")
self.assertNotContains(
response,
'<h2 id="bothverbosenameprofile_set-heading" class="inline-heading">'
"Model with both - plural name</h2>",
html=True,
)
def test_both_verbose_names_inline(self):
class NonVerboseProfileInline(TabularInline):
model = Profile
verbose_name = "Non-verbose childs - name"
verbose_name_plural = "Non-verbose childs - plural name"
class VerboseNameProfileInline(TabularInline):
model = VerboseNameProfile
verbose_name = "Childs with verbose name - name"
verbose_name_plural = "Childs with verbose name - plural name"
class VerboseNamePluralProfileInline(TabularInline):
model = VerboseNamePluralProfile
verbose_name = "Childs with verbose name plural - name"
verbose_name_plural = "Childs with verbose name plural - plural name"
class BothVerboseNameProfileInline(TabularInline):
model = BothVerboseNameProfile
verbose_name = "Childs with both - name"
verbose_name_plural = "Childs with both - plural name"
modeladmin = ModelAdmin(ProfileCollection, admin_site)
modeladmin.inlines = [
NonVerboseProfileInline,
VerboseNameProfileInline,
VerboseNamePluralProfileInline,
BothVerboseNameProfileInline,
]
obj = ProfileCollection.objects.create()
url = reverse("admin:admin_inlines_profilecollection_change", args=(obj.pk,))
request = self.factory.get(url)
request.user = self.superuser
response = modeladmin.changeform_view(request)
self.assertNotContains(response, "Add another Profile")
# Non-verbose model.
self.assertContains(
response,
(
'<h2 id="profile_set-heading" class="inline-heading">'
"Non-verbose childs - plural name</h2>"
),
html=True,
)
self.assertContains(response, "Add another Non-verbose childs - name")
self.assertNotContains(
response,
'<h2 id="profile_set-heading" class="inline-heading">Profiles</h2>',
html=True,
)
# Model with verbose name.
self.assertContains(
response,
(
'<h2 id="verbosenameprofile_set-heading" class="inline-heading">'
"Childs with verbose name - plural name</h2>"
),
html=True,
)
self.assertContains(response, "Add another Childs with verbose name - name")
self.assertNotContains(
response,
'<h2 id="verbosenameprofile_set-heading" class="inline-heading">'
"Model with verbose name onlys</h2>",
html=True,
)
# Model with verbose name plural.
self.assertContains(
response,
(
'<h2 id="verbosenamepluralprofile_set-heading" class="inline-heading">'
"Childs with verbose name plural - plural name</h2>"
),
html=True,
)
self.assertContains(
response,
"Add another Childs with verbose name plural - name",
)
self.assertNotContains(
response,
'<h2 id="verbosenamepluralprofile_set-heading" class="inline-heading">'
"Model with verbose name plural only</h2>",
html=True,
)
# Model with both verbose names.
self.assertContains(
response,
'<h2 id="bothverbosenameprofile_set-heading" class="inline-heading">'
"Childs with both - plural name</h2>",
html=True,
)
self.assertContains(response, "Add another Childs with both - name")
self.assertNotContains(
response,
'<h2 id="bothverbosenameprofile_set-heading" class="inline-heading">'
"Model with both - plural name</h2>",
html=True,
)
self.assertNotContains(response, "Add another Model with both - name")
@override_settings(ROOT_URLCONF="admin_inlines.urls")
| TestVerboseNameInlineForms |
python | huggingface__transformers | src/transformers/models/lightglue/modular_lightglue.py | {
"start": 11586,
"end": 12349
} | class ____(nn.Module):
def __init__(self, config: LightGlueConfig):
super().__init__()
self.projector = nn.Linear(2, config.descriptor_dim // config.num_attention_heads // 2, bias=False)
def forward(
self, keypoints: torch.Tensor, output_hidden_states: Optional[bool] = False
) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
projected_keypoints = self.projector(keypoints)
embeddings = projected_keypoints.repeat_interleave(2, dim=-1)
cosines = torch.cos(embeddings)
sines = torch.sin(embeddings)
embeddings = (cosines, sines)
output = (embeddings, projected_keypoints) if output_hidden_states else (embeddings,)
return output
| LightGluePositionalEncoder |
python | apache__airflow | task-sdk/tests/task_sdk/execution_time/test_comms.py | {
"start": 1111,
"end": 2318
} | class ____:
"""Test Pydantic models used in task communication for proper validation."""
@pytest.mark.parametrize(
"object_to_mask",
[
{
"key_path": "/files/airflow-breeze-config/keys2/keys.json",
"scope": "https://www.googleapis.com/auth/cloud-platform",
"project": "project_id",
"num_retries": 6,
},
["iter1", "iter2", {"key": "value"}],
"string",
{
"key1": "value1",
},
],
)
def test_mask_secret_with_objects(self, object_to_mask):
mask_secret_object = MaskSecret(value=object_to_mask, name="test_secret")
assert mask_secret_object.value == object_to_mask
def test_mask_secret_with_list(self):
example_dict = ["test"]
mask_secret_object = MaskSecret(value=example_dict, name="test_secret")
assert mask_secret_object.value == example_dict
def test_mask_secret_with_iterable(self):
example_dict = ["test"]
mask_secret_object = MaskSecret(value=example_dict, name="test_secret")
assert mask_secret_object.value == example_dict
| TestCommsModels |
python | apache__airflow | providers/google/tests/unit/google/cloud/transfers/test_gcs_to_gcs.py | {
"start": 2287,
"end": 43103
} | class ____:
"""
Tests the three use-cases for the wildcard operator. These are
no_prefix: *test_object
no_suffix: test_object*
prefix_and_suffix: test*object
Also tests the destination_object as prefix when the wildcard is used.
"""
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_execute_no_prefix(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_PREFIX,
destination_bucket=DESTINATION_BUCKET,
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix="", delimiter="test_object")
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_execute_no_suffix(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_SUFFIX,
destination_bucket=DESTINATION_BUCKET,
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix="test_object", delimiter="")
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_execute_wildcard_with_replace_flag_false(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_SUFFIX,
destination_bucket=DESTINATION_BUCKET,
replace=False,
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, prefix="test_object", delimiter=""),
mock.call(DESTINATION_BUCKET, prefix="test_object", delimiter="", match_glob=None),
]
mock_hook.return_value.list.assert_has_calls(mock_calls)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_execute_no_wildcard_with_replace_flag_false(self, mock_hook):
mock_hook.return_value.list.side_effect = [[], [SOURCE_OBJECT_NO_WILDCARD]]
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
replace=False,
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, prefix=SOURCE_OBJECT_NO_WILDCARD, delimiter=None, match_glob=None),
mock.call(DESTINATION_BUCKET, prefix=SOURCE_OBJECT_NO_WILDCARD, delimiter=None, match_glob=None),
]
mock_hook.return_value.list.assert_has_calls(mock_calls)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_copy_file_with_exact_match(self, mock_hook):
SOURCE_FILES = [
"test_object.txt",
"test_object.txt.abc",
"test_object.txt.copy/",
"test_object.txt.folder/",
]
mock_hook.return_value.list.return_value = SOURCE_FILES
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
exact_match=True,
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, prefix="test_object.txt", delimiter=None, match_glob=None),
]
mock_hook.return_value.list.assert_has_calls(mock_calls)
mock_hook.return_value.rewrite.assert_has_calls(
[
mock.call(TEST_BUCKET, "test_object.txt", DESTINATION_BUCKET, "test_object.txt"),
]
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_copy_file_with_exact_match_destination(self, mock_hook):
SOURCE_FILES = [
"test_object.txt",
"test_object.txt.abc",
"test_object.txt.copy/",
"test_object.txt.folder/",
]
DESTINATION_OBJ = f"{DESTINATION_OBJECT_PREFIX}/test_object.txt"
mock_hook.return_value.list.return_value = SOURCE_FILES
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJ,
exact_match=True,
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, prefix="test_object.txt", delimiter=None, match_glob=None),
]
mock_hook.return_value.list.assert_has_calls(mock_calls)
mock_calls_rewrite = [
mock.call(TEST_BUCKET, "test_object.txt", DESTINATION_BUCKET, DESTINATION_OBJ),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_rewrite)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_execute_prefix_and_suffix(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_MIDDLE,
destination_bucket=DESTINATION_BUCKET,
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix="test", delimiter="object")
# copy with wildcard
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_execute_wildcard_with_destination_object(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST[:-1]
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT_PREFIX,
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, "test_object/file1.txt", DESTINATION_BUCKET, "foo/bar/file1.txt"),
mock.call(TEST_BUCKET, "test_object/file2.txt", DESTINATION_BUCKET, "foo/bar/file2.txt"),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_execute_wildcard_with_destination_object_retained_prefix(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST[:-1]
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
destination_object=f"{DESTINATION_OBJECT_PREFIX}/{SOURCE_OBJECT_WILDCARD_SUFFIX[:-1]}",
)
operator.execute(None)
mock_calls_retained = [
mock.call(
TEST_BUCKET, "test_object/file1.txt", DESTINATION_BUCKET, "foo/bar/test_object/file1.txt"
),
mock.call(
TEST_BUCKET, "test_object/file2.txt", DESTINATION_BUCKET, "foo/bar/test_object/file2.txt"
),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_retained)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_execute_wildcard_without_destination_object(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST[:-1]
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
)
operator.execute(None)
mock_calls_none = [
mock.call(TEST_BUCKET, "test_object/file1.txt", DESTINATION_BUCKET, "test_object/file1.txt"),
mock.call(TEST_BUCKET, "test_object/file2.txt", DESTINATION_BUCKET, "test_object/file2.txt"),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_none)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_execute_wildcard_empty_destination_object(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST[:-1]
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
destination_object="",
)
operator.execute(None)
mock_calls_empty = [
mock.call(TEST_BUCKET, "test_object/file1.txt", DESTINATION_BUCKET, "/file1.txt"),
mock.call(TEST_BUCKET, "test_object/file2.txt", DESTINATION_BUCKET, "/file2.txt"),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_empty)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_execute_wildcard_with_destination_object_rename(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_SINGLE_FILE
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_NESTED,
destination_bucket=DESTINATION_BUCKET,
destination_object="test_file.txt",
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, "test_object/file1.txt", DESTINATION_BUCKET, "test_file.txt"),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_execute_last_modified_time(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST[:-1]
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
last_modified_time=None,
)
operator.execute(None)
mock_calls_none = [
mock.call(TEST_BUCKET, "test_object/file1.txt", DESTINATION_BUCKET, "test_object/file1.txt"),
mock.call(TEST_BUCKET, "test_object/file2.txt", DESTINATION_BUCKET, "test_object/file2.txt"),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_none)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_wc_with_last_modified_time_with_all_true_cond(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST[:-1]
mock_hook.return_value.is_updated_after.side_effect = [True, True, True]
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
last_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_calls_none = [
mock.call(TEST_BUCKET, "test_object/file1.txt", DESTINATION_BUCKET, "test_object/file1.txt"),
mock.call(TEST_BUCKET, "test_object/file2.txt", DESTINATION_BUCKET, "test_object/file2.txt"),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_none)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_wc_with_last_modified_time_with_one_true_cond(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST[:-1]
mock_hook.return_value.is_updated_after.side_effect = [True, False, False]
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
last_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, "test_object/file1.txt", DESTINATION_BUCKET, "test_object/file1.txt"
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_wc_with_no_last_modified_time(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST[:-1]
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_FILENAME,
destination_bucket=DESTINATION_BUCKET,
last_modified_time=None,
)
operator.execute(None)
mock_calls_none = [
mock.call(TEST_BUCKET, "test_object/file1.txt", DESTINATION_BUCKET, "test_object/file1.txt"),
mock.call(TEST_BUCKET, "test_object/file2.txt", DESTINATION_BUCKET, "test_object/file2.txt"),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_none)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_no_prefix_with_last_modified_time_with_true_cond(self, mock_hook):
mock_hook.return_value.is_updated_after.return_value = True
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
last_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, SOURCE_OBJECT_NO_WILDCARD, DESTINATION_BUCKET, SOURCE_OBJECT_NO_WILDCARD
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_no_prefix_with_maximum_modified_time_with_true_cond(self, mock_hook):
mock_hook.return_value.is_updated_before.return_value = True
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
maximum_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, SOURCE_OBJECT_NO_WILDCARD, DESTINATION_BUCKET, SOURCE_OBJECT_NO_WILDCARD
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_exe_last_modified_time_and_maximum_modified_time_with_true_cond(self, mock_hook):
mock_hook.return_value.is_updated_between.return_value = True
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
last_modified_time=MOD_TIME_1,
maximum_modified_time=MOD_TIME_2,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, SOURCE_OBJECT_NO_WILDCARD, DESTINATION_BUCKET, SOURCE_OBJECT_NO_WILDCARD
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_execute_no_prefix_with_no_last_modified_time(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
last_modified_time=None,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, SOURCE_OBJECT_NO_WILDCARD, DESTINATION_BUCKET, SOURCE_OBJECT_NO_WILDCARD
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_no_prefix_with_last_modified_time_with_false_cond(self, mock_hook):
mock_hook.return_value.is_updated_after.return_value = False
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
last_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_not_called()
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_executes_with_is_older_than_with_true_cond(self, mock_hook):
mock_hook.return_value.is_older_than.return_value = True
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=DESTINATION_BUCKET,
destination_object=SOURCE_OBJECT_NO_WILDCARD,
last_modified_time=MOD_TIME_1,
maximum_modified_time=MOD_TIME_2,
is_older_than=3600,
)
operator.execute(None)
mock_hook.return_value.rewrite.assert_called_once_with(
TEST_BUCKET, SOURCE_OBJECT_NO_WILDCARD, DESTINATION_BUCKET, SOURCE_OBJECT_NO_WILDCARD
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_execute_more_than_1_wildcard(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_MULTIPLE_WILDCARDS,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT_PREFIX,
)
total_wildcards = operator.source_object.count(WILDCARD)
error_msg = f"Only one wildcard '[*]' is allowed in source_object parameter. Found {total_wildcards}"
with pytest.raises(AirflowException, match=error_msg):
operator.execute(None)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_execute_with_empty_destination_bucket(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_NO_WILDCARD,
destination_bucket=None,
destination_object=DESTINATION_OBJECT_PREFIX,
)
with mock.patch.object(operator.log, "warning") as mock_warn:
operator.execute(None)
mock_warn.assert_called_once_with(
"destination_bucket is None. Defaulting it to source_bucket (%s)", TEST_BUCKET
)
assert operator.destination_bucket == operator.source_bucket
# Tests the use of delimiter and source object as list
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_executes_with_empty_source_objects(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID, source_bucket=TEST_BUCKET, source_objects=SOURCE_OBJECTS_NO_FILE
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(
TEST_BUCKET, prefix="", delimiter=None, match_glob=None
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_raises_exception_with_two_empty_list_inside_source_objects(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID, source_bucket=TEST_BUCKET, source_objects=SOURCE_OBJECTS_TWO_EMPTY_STRING
)
with pytest.raises(AirflowException, match="You can't have two empty strings inside source_object"):
operator.execute(None)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_executes_with_single_item_in_source_objects(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID, source_bucket=TEST_BUCKET, source_objects=SOURCE_OBJECTS_SINGLE_FILE
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(
TEST_BUCKET, prefix=SOURCE_OBJECTS_SINGLE_FILE[0], delimiter=None, match_glob=None
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_executes_with_multiple_items_in_source_objects(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID, source_bucket=TEST_BUCKET, source_objects=SOURCE_OBJECTS_MULTIPLE_FILES
)
operator.execute(None)
mock_hook.return_value.list.assert_has_calls(
[
mock.call(TEST_BUCKET, prefix="test_object/file1.txt", delimiter=None, match_glob=None),
mock.call(TEST_BUCKET, prefix="test_object/file2.txt", delimiter=None, match_glob=None),
],
any_order=True,
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_executes_with_a_delimiter(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_objects=SOURCE_OBJECTS_NO_FILE,
delimiter=DELIMITER,
)
operator.execute(None)
mock_hook.return_value.list.assert_called_once_with(
TEST_BUCKET, prefix="", delimiter=DELIMITER, match_glob=None
)
# COPY
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_executes_with_delimiter_and_destination_object(self, mock_hook):
mock_hook.return_value.list.side_effect = [[], [], [SOURCE_OBJECTS_LIST[2]]]
mock_hook.return_value.exists.return_value = False
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_objects=SOURCE_OBJECTS_LIST,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT,
delimiter=DELIMITER,
)
operator.execute(None)
mock_calls = [
mock.call(
TEST_BUCKET, SOURCE_OBJECTS_LIST[2], DESTINATION_BUCKET, DESTINATION_OBJECT + "file3.json"
),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_executes_with_different_delimiter_and_destination_object(self, mock_hook):
mock_hook.return_value.list.side_effect = [[SOURCE_OBJECTS_LIST[0]], [SOURCE_OBJECTS_LIST[1]], []]
mock_hook.return_value.exists.return_value = False
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_objects=SOURCE_OBJECTS_LIST,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT,
delimiter=".txt",
)
operator.execute(None)
mock_calls = [
mock.call(
TEST_BUCKET, SOURCE_OBJECTS_LIST[0], DESTINATION_BUCKET, DESTINATION_OBJECT + "file1.txt"
),
mock.call(
TEST_BUCKET, SOURCE_OBJECTS_LIST[1], DESTINATION_BUCKET, DESTINATION_OBJECT + "file2.txt"
),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_executes_with_no_destination_bucket_and_no_destination_object(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST
operator = GCSToGCSOperator(
task_id=TASK_ID, source_bucket=TEST_BUCKET, source_objects=SOURCE_OBJECTS_LIST
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, "test_object/file1.txt", TEST_BUCKET, "test_object/file1.txt"),
mock.call(TEST_BUCKET, "test_object/file2.txt", TEST_BUCKET, "test_object/file2.txt"),
mock.call(TEST_BUCKET, "test_object/file3.json", TEST_BUCKET, "test_object/file3.json"),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_wc_with_last_modified_time_with_all_true_cond_no_file(self, mock_hook):
mock_hook.return_value.list.return_value = SOURCE_OBJECTS_LIST
mock_hook.return_value.is_updated_after.side_effect = [True, True, True]
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_objects=SOURCE_OBJECTS_NO_FILE,
destination_bucket=DESTINATION_BUCKET,
last_modified_time=MOD_TIME_1,
)
operator.execute(None)
mock_calls_none = [
mock.call(TEST_BUCKET, "test_object/file1.txt", DESTINATION_BUCKET, "test_object/file1.txt"),
mock.call(TEST_BUCKET, "test_object/file2.txt", DESTINATION_BUCKET, "test_object/file2.txt"),
mock.call(TEST_BUCKET, "test_object/file3.json", DESTINATION_BUCKET, "test_object/file3.json"),
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls_none)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_execute_wildcard_with_replace_flag_false_with_destination_object(self, mock_hook):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=SOURCE_OBJECT_WILDCARD_SUFFIX,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT_PREFIX,
replace=False,
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, prefix="test_object", delimiter=""),
mock.call(DESTINATION_BUCKET, prefix="foo/bar", delimiter="", match_glob=None),
]
mock_hook.return_value.list.assert_has_calls(mock_calls)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_execute_source_object_required_flag_true(self, mock_hook):
mock_hook.return_value.exists.return_value = False
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_objects=SOURCE_OBJECTS_SINGLE_FILE,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT_PREFIX,
source_object_required=True,
)
with pytest.raises(
AirflowException, match=f"{SOURCE_OBJECTS_SINGLE_FILE} does not exist in bucket {TEST_BUCKET}"
):
operator.execute(None)
@pytest.mark.parametrize(
(
"existing_objects",
"source_object",
"match_glob",
"exact_match",
"expected_source_objects",
"expected_destination_objects",
),
[
(["source/foo.txt"], "source/foo.txt", None, True, ["source/foo.txt"], ["{prefix}/foo.txt"]),
(["source/foo.txt"], "source/foo.txt", None, False, ["source/foo.txt"], ["{prefix}/foo.txt"]),
(["source/foo.txt"], "source", None, False, ["source/foo.txt"], ["{prefix}/foo.txt"]),
(["source/foo.txt"], "source/", None, False, ["source/foo.txt"], ["{prefix}/foo.txt"]),
(["source/foo.txt"], "source/*", None, False, ["source/foo.txt"], ["{prefix}/foo.txt"]),
(["source/foo.txt"], "source/foo.*", None, False, ["source/foo.txt"], ["{prefix}/txt"]),
(["source/foo.txt"], "source/", "**/foo*", False, ["source/foo.txt"], ["{prefix}/foo.txt"]),
(["source/foo.txt"], "source/", "**/foo.txt", False, ["source/foo.txt"], ["{prefix}/foo.txt"]),
(
["source/foo.txt", "source/foo.txt.abc"],
"source/foo.txt",
None,
True,
["source/foo.txt"],
["{prefix}/foo.txt"],
),
(
["source/foo.txt", "source/foo.txt.abc"],
"source/foo.txt",
None,
False,
["source/foo.txt", "source/foo.txt.abc"],
["{prefix}/foo.txt", "{prefix}/foo.txt.abc"],
),
(
["source/foo.txt", "source/foo.txt.abc"],
"source",
None,
False,
["source/foo.txt", "source/foo.txt.abc"],
["{prefix}/foo.txt", "{prefix}/foo.txt.abc"],
),
(
["source/foo.txt", "source/foo.txt.abc"],
"source/",
None,
False,
["source/foo.txt", "source/foo.txt.abc"],
["{prefix}/foo.txt", "{prefix}/foo.txt.abc"],
),
(
["source/foo.txt", "source/foo.txt.abc"],
"source/*",
None,
False,
["source/foo.txt", "source/foo.txt.abc"],
["{prefix}/foo.txt", "{prefix}/foo.txt.abc"],
),
(
["source/foo.txt", "source/foo.txt.abc"],
"source/foo.*",
None,
False,
["source/foo.txt", "source/foo.txt.abc"],
["{prefix}/txt", "{prefix}/txt.abc"],
),
(
["source/foo.txt", "source/foo.txt.abc"],
"source/",
"**/foo*",
False,
["source/foo.txt", "source/foo.txt.abc"],
["{prefix}/foo.txt", "{prefix}/foo.txt.abc"],
),
(
["source/foo.txt", "source/foo.txt.abc"],
"source/",
"**/foo.txt",
False,
["source/foo.txt"],
["{prefix}/foo.txt"],
),
(
["source/foo.txt", "source/foo.txt.abc", "source/foo.txt/subfolder/file.txt"],
"source/foo.txt",
None,
True,
["source/foo.txt"],
["{prefix}/foo.txt"],
),
(
["source/foo.txt", "source/foo.txt.abc", "source/foo.txt/subfolder/file.txt"],
"source/foo.txt",
None,
False,
["source/foo.txt", "source/foo.txt.abc", "source/foo.txt/subfolder/file.txt"],
["{prefix}/foo.txt", "{prefix}/foo.txt.abc", "{prefix}/foo.txt/subfolder/file.txt"],
),
(
["source/foo.txt", "source/foo.txt.abc", "source/foo.txt/subfolder/file.txt"],
"source",
None,
False,
["source/foo.txt", "source/foo.txt.abc", "source/foo.txt/subfolder/file.txt"],
["{prefix}/foo.txt", "{prefix}/foo.txt.abc", "{prefix}/foo.txt/subfolder/file.txt"],
),
(
["source/foo.txt", "source/foo.txt.abc", "source/foo.txt/subfolder/file.txt"],
"source/",
None,
False,
["source/foo.txt", "source/foo.txt.abc", "source/foo.txt/subfolder/file.txt"],
["{prefix}/foo.txt", "{prefix}/foo.txt.abc", "{prefix}/foo.txt/subfolder/file.txt"],
),
(
["source/foo.txt", "source/foo.txt.abc", "source/foo.txt/subfolder/file.txt"],
"source/*",
None,
False,
["source/foo.txt", "source/foo.txt.abc", "source/foo.txt/subfolder/file.txt"],
["{prefix}/foo.txt", "{prefix}/foo.txt.abc", "{prefix}/foo.txt/subfolder/file.txt"],
),
(
["source/foo.txt", "source/foo.txt.abc", "source/foo.txt/subfolder/file.txt"],
"source/foo.*",
None,
False,
["source/foo.txt", "source/foo.txt.abc", "source/foo.txt/subfolder/file.txt"],
["{prefix}/txt", "{prefix}/txt.abc", "{prefix}/txt/subfolder/file.txt"],
),
(
["source/foo.txt", "source/foo.txt.abc", "source/foo.txt/subfolder/file.txt"],
"source/",
"**/foo*",
False,
["source/foo.txt", "source/foo.txt.abc", "source/foo.txt/subfolder/file.txt"],
["{prefix}/foo.txt", "{prefix}/foo.txt.abc", "{prefix}/foo.txt/subfolder/file.txt"],
),
(
["source/foo.txt", "source/foo.txt.abc", "source/foo.txt/subfolder/file.txt"],
"source/",
"**/foo.txt",
False,
["source/foo.txt"],
["{prefix}/foo.txt"],
),
],
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_copy_files_into_a_folder(
self,
mock_hook,
existing_objects,
source_object,
match_glob,
exact_match,
expected_source_objects,
expected_destination_objects,
):
mock_hook.return_value.list.return_value = existing_objects
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_object=source_object,
destination_bucket=DESTINATION_BUCKET,
destination_object=DESTINATION_OBJECT_PREFIX + "/",
exact_match=exact_match,
match_glob=match_glob,
)
operator.execute(None)
mock_calls = [
mock.call(TEST_BUCKET, src, DESTINATION_BUCKET, dst.format(prefix=DESTINATION_OBJECT_PREFIX))
for src, dst in zip(expected_source_objects, expected_destination_objects)
]
mock_hook.return_value.rewrite.assert_has_calls(mock_calls)
@pytest.mark.parametrize(
("source_objects", "destination_object", "inputs", "outputs"),
(
(
SOURCE_OBJECTS_SINGLE_FILE,
None,
[Dataset(namespace=f"gs://{TEST_BUCKET}", name=SOURCE_OBJECTS_SINGLE_FILE[0])],
[Dataset(namespace=f"gs://{DESTINATION_BUCKET}", name=SOURCE_OBJECTS_SINGLE_FILE[0])],
),
(
SOURCE_OBJECTS_SINGLE_FILE,
"target.txt",
[Dataset(namespace=f"gs://{TEST_BUCKET}", name=SOURCE_OBJECTS_SINGLE_FILE[0])],
[Dataset(namespace=f"gs://{DESTINATION_BUCKET}", name="target.txt")],
),
(
SOURCE_OBJECTS_SINGLE_FILE,
"target_pre",
[Dataset(namespace=f"gs://{TEST_BUCKET}", name=SOURCE_OBJECTS_SINGLE_FILE[0])],
[Dataset(namespace=f"gs://{DESTINATION_BUCKET}", name="/")],
),
(
SOURCE_OBJECTS_SINGLE_FILE,
"dir/",
[Dataset(namespace=f"gs://{TEST_BUCKET}", name=SOURCE_OBJECTS_SINGLE_FILE[0])],
[Dataset(namespace=f"gs://{DESTINATION_BUCKET}", name="dir")],
),
(
SOURCE_OBJECTS_LIST,
"",
[
Dataset(namespace=f"gs://{TEST_BUCKET}", name=SOURCE_OBJECTS_LIST[0]),
Dataset(namespace=f"gs://{TEST_BUCKET}", name=SOURCE_OBJECTS_LIST[1]),
Dataset(namespace=f"gs://{TEST_BUCKET}", name=SOURCE_OBJECTS_LIST[2]),
],
[
Dataset(namespace=f"gs://{DESTINATION_BUCKET}", name="/"),
],
),
(
[*SOURCE_OBJECTS_LIST, "dir/*"],
"parent/pre_",
[
Dataset(namespace=f"gs://{TEST_BUCKET}", name=SOURCE_OBJECTS_LIST[0]),
Dataset(namespace=f"gs://{TEST_BUCKET}", name=SOURCE_OBJECTS_LIST[1]),
Dataset(namespace=f"gs://{TEST_BUCKET}", name=SOURCE_OBJECTS_LIST[2]),
Dataset(namespace=f"gs://{TEST_BUCKET}", name="dir"),
],
[Dataset(namespace=f"gs://{DESTINATION_BUCKET}", name="parent")],
),
(
SOURCE_OBJECTS_NO_FILE,
"no_ending_slash",
[Dataset(namespace=f"gs://{TEST_BUCKET}", name="/")],
[Dataset(namespace=f"gs://{DESTINATION_BUCKET}", name="/")],
),
(
[
f"dir/{SOURCE_OBJECT_WILDCARD_PREFIX}",
f"dir/{SOURCE_OBJECT_WILDCARD_SUFFIX}",
f"dir/{SOURCE_OBJECT_WILDCARD_MIDDLE}",
f"dir/{SOURCE_OBJECT_WILDCARD_FILENAME}",
"dir/*",
"dir/",
"dir/pre_",
],
"/",
[
Dataset(namespace=f"gs://{TEST_BUCKET}", name="dir"),
],
[
Dataset(namespace=f"gs://{DESTINATION_BUCKET}", name="/"),
],
),
(
["", "dir/pre", SOURCE_OBJECTS_SINGLE_FILE[0]],
DESTINATION_OBJECT,
[
Dataset(namespace=f"gs://{TEST_BUCKET}", name="/"),
Dataset(namespace=f"gs://{TEST_BUCKET}", name="dir"),
Dataset(namespace=f"gs://{TEST_BUCKET}", name=SOURCE_OBJECTS_SINGLE_FILE[0]),
],
[
Dataset(namespace=f"gs://{DESTINATION_BUCKET}", name=DESTINATION_OBJECT_PREFIX),
],
),
(
[
"",
"dir/",
],
None,
[
Dataset(namespace=f"gs://{TEST_BUCKET}", name="/"),
Dataset(namespace=f"gs://{TEST_BUCKET}", name="dir"),
],
[
Dataset(namespace=f"gs://{DESTINATION_BUCKET}", name="/"),
Dataset(namespace=f"gs://{DESTINATION_BUCKET}", name="dir"),
],
),
),
ids=(
"single file without output",
"single file with single file output",
"single file with prefix output",
"single file with dir output",
"multiple file with empty output",
"multiple file with prefix as output",
"empty prefix with prefix as output",
"directory + prefix or wildcard without output",
"mixed prefixes and file paths with output dir",
"empty prefix + directory without output",
),
)
@mock.patch("airflow.providers.google.cloud.transfers.gcs_to_gcs.GCSHook")
def test_get_openlineage_facets_on_complete(
self, mock_hook, source_objects, destination_object, inputs, outputs
):
if source_objects and any(WILDCARD in obj for obj in source_objects):
with pytest.warns(AirflowProviderDeprecationWarning, match="Usage of wildcard"):
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_objects=source_objects,
destination_bucket=DESTINATION_BUCKET,
destination_object=destination_object,
)
else:
operator = GCSToGCSOperator(
task_id=TASK_ID,
source_bucket=TEST_BUCKET,
source_objects=source_objects,
destination_bucket=DESTINATION_BUCKET,
destination_object=destination_object,
)
operator.execute(None)
lineage = operator.get_openlineage_facets_on_complete(None)
assert len(lineage.inputs) == len(inputs)
assert len(lineage.outputs) == len(outputs)
assert all(element in lineage.inputs for element in inputs)
assert all(element in inputs for element in lineage.inputs)
assert all(element in lineage.outputs for element in outputs)
assert all(element in outputs for element in lineage.outputs)
| TestGoogleCloudStorageToCloudStorageOperator |
python | ansible__ansible | test/lib/ansible_test/_internal/host_configs.py | {
"start": 2223,
"end": 2941
} | class ____(metaclass=abc.ABCMeta):
"""Base class for host configuration."""
@abc.abstractmethod
def get_defaults(self, context: HostContext) -> CompletionConfig:
"""Return the default settings."""
@abc.abstractmethod
def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
"""Apply default settings."""
@property
def is_managed(self) -> bool:
"""
True if the host is a managed instance, otherwise False.
Managed instances are used exclusively by ansible-test and can safely have destructive operations performed without explicit permission from the user.
"""
return False
@dataclasses.dataclass
| HostConfig |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1094899,
"end": 1095815
} | class ____(sgqlc.types.Type, Node, UniformResourceLocatable):
"""Represents a 'closed' event on any `Closable`."""
__schema__ = github_schema
__field_names__ = ("actor", "closable", "closer", "created_at", "state_reason")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
closable = sgqlc.types.Field(sgqlc.types.non_null(Closable), graphql_name="closable")
"""Object that was closed."""
closer = sgqlc.types.Field("Closer", graphql_name="closer")
"""Object which triggered the creation of this event."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
state_reason = sgqlc.types.Field(IssueStateReason, graphql_name="stateReason")
"""The reason the issue state was changed to closed."""
| ClosedEvent |
python | allegroai__clearml | clearml/backend_api/services/v2_9/queues.py | {
"start": 55179,
"end": 56481
} | class ____(Response):
"""
Response of queues.move_task_forward endpoint.
:param position: The new position of the task entry in the queue (index, -1
represents bottom of queue)
:type position: int
"""
_service = "queues"
_action = "move_task_forward"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"position": {
"description": "The new position of the task entry in the queue (index, -1 represents bottom of queue)",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, position: Optional[int] = None, **kwargs: Any) -> None:
super(MoveTaskForwardResponse, self).__init__(**kwargs)
self.position = position
@schema_property("position")
def position(self) -> Optional[int]:
return self._property_position
@position.setter
def position(self, value: Optional[int]) -> None:
if value is None:
self._property_position = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "position", six.integer_types)
self._property_position = value
| MoveTaskForwardResponse |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 60786,
"end": 62956
} | class ____(Parser):
def __init__(self, *args, **kwargs):
try:
arg1 = args[0]
except:
arg1 = 1
if isinstance(arg1,int):
super(LLkParser,self).__init__()
self.k = arg1
return
if isinstance(arg1,ParserSharedInputState):
super(LLkParser,self).__init__(arg1)
self.set_k(1,*args)
return
if isinstance(arg1,TokenBuffer):
super(LLkParser,self).__init__()
self.setTokenBuffer(arg1)
self.set_k(1,*args)
return
if isinstance(arg1,TokenStream):
super(LLkParser,self).__init__()
tokenBuf = TokenBuffer(arg1)
self.setTokenBuffer(tokenBuf)
self.set_k(1,*args)
return
### unknown argument
raise TypeError("LLkParser requires integer, " +
"ParserSharedInputStream or TokenStream argument")
def consume(self):
self.inputState.input.consume()
def LA(self,i):
return self.inputState.input.LA(i)
def LT(self,i):
return self.inputState.input.LT(i)
def set_k(self,index,*args):
try:
self.k = args[index]
except:
self.k = 1
def trace(self,ee,rname):
print(type(self))
self.traceIndent()
guess = ""
if self.inputState.guessing > 0:
guess = " [guessing]"
print((ee + rname + guess))
for i in range(1,self.k+1):
if i != 1:
print(", ")
if self.LT(i) :
v = self.LT(i).getText()
else:
v = "null"
print("LA(%s) == %s" % (i,v))
print("\n")
def traceIn(self,rname):
self.traceDepth += 1;
self.trace("> ", rname);
def traceOut(self,rname):
self.trace("< ", rname);
self.traceDepth -= 1;
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### TreeParserSharedInputState ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
| LLkParser |
python | tensorflow__tensorflow | tensorflow/python/compiler/xla/tests/jit_test.py | {
"start": 7443,
"end": 12926
} | class ____(test.TestCase, parameterized.TestCase):
@test_util.build_as_function_and_v1_graph
def testCompilationInGradient(self):
with self.cached_session():
x = constant_op.constant([[3.]])
y_nc = math_ops.matmul(x, x, name="not_compiled")
with jit.experimental_jit_scope():
y_c = math_ops.matmul(y_nc, y_nc, name="compiled")
x_grads = gradients.gradients([y_c], [x])[0]
operations = x.graph.get_operations()
c_grad_ops = [
op for op in operations if "gradients/compiled" in op.name]
nc_grad_ops = [
op for op in operations if "gradients/not_compiled" in op.name]
self.assertGreater(len(c_grad_ops), 0)
self.assertGreater(len(nc_grad_ops), 0)
for cg in c_grad_ops:
self.assertTrue(cg.get_attr("_XlaCompile"))
for ncg in nc_grad_ops:
with self.assertRaisesRegex(ValueError, "[Nn]o attr named"):
ncg.get_attr("_XlaCompile")
# d/dx (x ** 4) = 4 * (x ** 3)
self.assertAllClose([[108]], x_grads)
@test_util.build_as_function_and_v1_graph
def testCompilationGradientScopeNames(self):
with self.session(graph=ops.Graph()):
with jit.experimental_jit_scope():
# XlaScope 0
a1 = constant_op.constant([[1.]])
a1t = math_ops.matmul(a1, a1)
with jit.experimental_jit_scope():
# XlaScope 1
a2 = constant_op.constant([[1.]])
a2t = math_ops.matmul(a2, a2)
self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope"))
grad_a1 = gradients.gradients(a1t, a1, name="GA")[0]
grad_a2 = gradients.gradients(a2t, a2, name="GB")[0]
grad_a1 = grad_a1.op.inputs[0]
grad_a2 = grad_a2.op.inputs[0]
self.assertTrue(grad_a1.op.get_attr("_XlaCompile"))
self.assertTrue(grad_a2.op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0", grad_a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", grad_a2.op.get_attr("_XlaScope"))
@test_util.build_as_function_and_v1_graph
def testCompilationSeparateGradientScopeNames(self):
with self.session(graph=ops.Graph()):
with jit.experimental_jit_scope(True, separate_compiled_gradients=True):
# XlaScope 0
a1 = constant_op.constant([[1.]])
a1t = math_ops.matmul(a1, a1)
with jit.experimental_jit_scope(True, separate_compiled_gradients=True):
# XlaScope 1
a2 = constant_op.constant([[1.]])
a2t = math_ops.matmul(a2, a2)
self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope"))
grad_a1 = gradients.gradients(a1t, a1, name="GA")[0]
grad_a2 = gradients.gradients(a2t, a2, name="GB")[0]
grad_a1 = grad_a1.op.inputs[0]
grad_a2 = grad_a2.op.inputs[0]
self.assertTrue(grad_a1.op.get_attr("_XlaCompile"))
self.assertTrue(grad_a2.op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0_grad_GA",
grad_a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1_grad_GB",
grad_a2.op.get_attr("_XlaScope"))
@test_util.build_as_function_and_v1_graph
def testPlaysNicelyWithDefun(self):
with self.session(graph=ops.Graph()) as sess:
with jit.experimental_jit_scope(True):
@function.Defun(compiled=True, noinline=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
g_r = gradients.gradients(r, x, name="GA")[0]
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
self.assertEqual(b"jit_scope_0", func_attrs["_XlaScope"].s)
# Ensure the gradient (SymbolicGradient) is compiled, with the same
# _XlaScope as the function itself.
grad_op = g_r.op.inputs[0].op
self.assertTrue(grad_op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0", grad_op.get_attr("_XlaScope"))
# Ensure the ops run: grad(x1*x1) = 2*x1
self.assertAllClose([1.0, 1.0, 2.0], sess.run([x, r, g_r]))
@test_util.build_as_function_and_v1_graph
def testPlaysNicelyWithDefunSeparateGradientScope(self):
with self.session(graph=ops.Graph()) as sess:
with jit.experimental_jit_scope(True):
@function.Defun(
compiled=True, noinline=True, separate_compiled_gradients=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
g_r = gradients.gradients(r, x, name="GA")[0]
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
self.assertEqual(b"jit_scope_0", func_attrs["_XlaScope"].s)
# Ensure the gradient (SymbolicGradient) is compiled, with a different
# _XlaScope from the function itself.
grad_op = g_r.op.inputs[0].op
self.assertTrue(grad_op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0_grad_GA",
grad_op.get_attr("_XlaScope"))
# Ensure the ops run: grad(x1*x1) = 2*x1
self.assertAllClose([1.0, 1.0, 2.0], sess.run([x, r, g_r]))
if __name__ == "__main__":
test.main()
| CompilationEnabledInGradientTest |
python | google__python-fire | fire/console/text.py | {
"start": 2555,
"end": 2776
} | class ____(_TextTypes):
"""Defines text types that can be used for styling text."""
RESOURCE_NAME = 1
URL = 2
USER_INPUT = 3
COMMAND = 4
INFO = 5
URI = 6
OUTPUT = 7
PT_SUCCESS = 8
PT_FAILURE = 9
| TextTypes |
python | getsentry__sentry | src/sentry/integrations/vsts/webhooks.py | {
"start": 1587,
"end": 8798
} | class ____(Endpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
rate_limits = RateLimitConfig(
limit_overrides={
"POST": {
RateLimitCategory.IP: RateLimit(limit=100, window=1),
RateLimitCategory.USER: RateLimit(limit=100, window=1),
RateLimitCategory.ORGANIZATION: RateLimit(limit=100, window=1),
},
}
)
authentication_classes = ()
permission_classes = ()
def post(self, request: Request, *args: Any, **kwargs: Any) -> Response:
try:
data = request.data
event_type = data["eventType"]
external_id = get_vsts_external_id(data=request.data)
except Exception as e:
logger.info("vsts.invalid-webhook-payload", extra={"error": str(e)})
return self.respond(status=status.HTTP_400_BAD_REQUEST)
# https://docs.microsoft.com/en-us/azure/devops/service-hooks/events?view=azure-devops#workitem.updated
if event_type == "workitem.updated":
integration = integration_service.get_integration(
provider=IntegrationProviderSlug.AZURE_DEVOPS.value,
external_id=external_id,
status=ObjectStatus.ACTIVE,
)
if integration is None:
logger.info(
"vsts.integration-in-webhook-payload-does-not-exist",
extra={"external_id": external_id, "event_type": event_type},
)
return self.respond(
{"detail": "Integration does not exist."}, status=status.HTTP_400_BAD_REQUEST
)
if not check_webhook_secret(request, integration, event_type):
return self.respond(status=status.HTTP_401_UNAUTHORIZED)
with IntegrationWebhookEvent(
interaction_type=IntegrationWebhookEventType.INBOUND_SYNC,
domain=IntegrationDomain.SOURCE_CODE_MANAGEMENT,
provider_key=IntegrationProviderSlug.AZURE_DEVOPS.value,
).capture():
handle_updated_workitem(data, integration)
return self.respond()
def check_webhook_secret(request: Request, integration: RpcIntegration, event_type: str) -> bool:
integration_secret = integration.metadata.get("subscription", {}).get("secret")
webhook_payload_secret = request.META.get("HTTP_SHARED_SECRET")
if integration_secret and webhook_payload_secret:
is_valid: bool = constant_time_compare(integration_secret, webhook_payload_secret)
key = "vsts.valid-webhook-secret" if is_valid else "vsts.invalid-webhook-secret"
else:
is_valid = False
key = "vsts.missing-webhook-secret"
logger.info(key, extra={"event_type": event_type, "integration_id": integration.id})
return is_valid
def handle_assign_to(
integration: RpcIntegration,
external_issue_key: str | None,
assigned_to: Mapping[str, str] | None,
) -> None:
if not assigned_to:
return
email: str | None = None
assign = False
new_value = assigned_to.get("newValue")
if new_value is not None:
email = parse_email(new_value)
if not email:
logger.info(
"vsts.failed-to-parse-email-in-handle-assign-to",
extra={
"error": "parse_error",
"integration_id": integration.id,
"assigned_to_values": assigned_to,
"external_issue_key": external_issue_key,
},
)
return # TODO(mgaeta): return if cannot parse email?
assign = True
sync_group_assignee_inbound(
integration=integration,
email=email,
external_issue_key=external_issue_key,
assign=assign,
)
# TODO(Gabe): Consolidate this with Jira's implementation, create DTO for status
# changes.
def handle_status_change(
integration: RpcIntegration,
external_issue_key: str,
status_change: Mapping[str, str] | None,
project: str | None,
) -> None:
with ProjectManagementEvent(
action_type=ProjectManagementActionType.INBOUND_STATUS_SYNC, integration=integration
).capture() as lifecycle:
if status_change is None:
return
org_integrations = integration_service.get_organization_integrations(
integration_id=integration.id
)
logging_context = {
"org_integration_ids": [oi.id for oi in org_integrations],
"integration_id": integration.id,
"status_change": status_change,
}
for org_integration in org_integrations:
installation = integration.get_installation(
organization_id=org_integration.organization_id
)
if isinstance(installation, IssueSyncIntegration):
installation.sync_status_inbound(
external_issue_key,
{
"new_state": status_change["newValue"],
# old_state is None when the issue is New
"old_state": status_change.get("oldValue"),
"project": project,
},
)
else:
lifecycle.record_halt(
ProjectManagementHaltReason.SYNC_NON_SYNC_INTEGRATION_PROVIDED,
extra=logging_context,
)
def handle_updated_workitem(data: Mapping[str, Any], integration: RpcIntegration) -> None:
project: str | None = None
try:
external_issue_key = data["resource"]["workItemId"]
except KeyError as e:
logger.info(
"vsts.updating-workitem-does-not-have-necessary-information",
extra={"error": str(e), "integration_id": integration.id},
)
return
try:
project = data["resourceContainers"]["project"]["id"]
except KeyError as e:
logger.info(
"vsts.updating-workitem-does-not-have-necessary-information",
extra={"error": str(e), "integration_id": integration.id},
)
try:
assigned_to = data["resource"]["fields"].get("System.AssignedTo")
status_change = data["resource"]["fields"].get("System.State")
except KeyError as e:
logger.info(
"vsts.updated-workitem-fields-not-passed",
extra={
"error": str(e),
"workItemId": data["resource"]["workItemId"],
"integration_id": integration.id,
"azure_project_id": project,
},
)
return # In the case that there are no fields sent, no syncing can be done
logger.info(
"vsts.updated-workitem-fields-correct",
extra={
"workItemId": data["resource"]["workItemId"],
"integration_id": integration.id,
"azure_project_id": project,
},
)
handle_assign_to(integration, external_issue_key, assigned_to)
handle_status_change(integration, external_issue_key, status_change, project)
| WorkItemWebhook |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_set_start_page02.py | {
"start": 315,
"end": 1015
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("set_start_page02.xlsx")
self.ignore_elements = {"xl/worksheets/sheet1.xml": ["<pageMargins"]}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with printer settings."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_start_page(2)
worksheet.set_paper(9)
worksheet.vertical_dpi = 200
worksheet.write("A1", "Foo")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | wandb__wandb | tests/unit_tests/test_launch/test_runner/test_kubernetes.py | {
"start": 6289,
"end": 7158
} | class ____:
"""Mocks a kubernetes batch API client."""
def __init__(self):
self.jobs = dict()
async def read_namespaced_job(self, name, namespace):
return self.jobs[name]
async def read_namespaced_job_status(self, name, namespace):
return self.jobs[name]
async def patch_namespaced_job(self, name, namespace, body):
if body.spec.suspend:
self.jobs[name].status.conditions = [MockDict({"type": "Suspended"})]
self.jobs[name].status.active -= 1
async def delete_namespaced_job(self, name, namespace):
del self.jobs[name]
async def list_namespaced_job(self, namespace, field_selector=None):
return [self.jobs[name] for name in self.jobs]
async def create_job(self, body):
self.jobs[body["metadata"]["generateName"]] = body
return body
| MockBatchApi |
python | pydata__xarray | xarray/tests/test_combine.py | {
"start": 29240,
"end": 43700
} | class ____:
def test_combine_by_coords(self):
objs = [Dataset({"x": [0]}), Dataset({"x": [1]})]
actual = combine_by_coords(objs)
expected = Dataset({"x": [0, 1]})
assert_identical(expected, actual)
actual = combine_by_coords([actual])
assert_identical(expected, actual)
objs = [Dataset({"x": [0, 1]}), Dataset({"x": [2]})]
actual = combine_by_coords(objs)
expected = Dataset({"x": [0, 1, 2]})
assert_identical(expected, actual)
def test_combine_by_coords_handles_non_sorted_variables(self):
# ensure auto_combine handles non-sorted variables
objs = [
Dataset({"x": ("a", [0]), "y": ("a", [0]), "a": [0]}),
Dataset({"x": ("a", [1]), "y": ("a", [1]), "a": [1]}),
]
actual = combine_by_coords(objs, join="outer")
expected = Dataset({"x": ("a", [0, 1]), "y": ("a", [0, 1]), "a": [0, 1]})
assert_identical(expected, actual)
def test_combine_by_coords_multiple_variables(self):
objs = [Dataset({"x": [0], "y": [0]}), Dataset({"y": [1], "x": [1]})]
actual = combine_by_coords(objs, join="outer")
expected = Dataset({"x": [0, 1], "y": [0, 1]})
assert_equal(actual, expected)
def test_combine_by_coords_for_scalar_variables(self):
objs = [Dataset({"x": 0}), Dataset({"x": 1})]
with pytest.raises(
ValueError, match=r"Could not find any dimension coordinates"
):
combine_by_coords(objs)
def test_combine_by_coords_requires_coord_or_index(self):
objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [0]})]
with pytest.raises(
ValueError,
match=r"Every dimension requires a corresponding 1D coordinate and index",
):
combine_by_coords(objs)
def test_empty_input(self):
assert_identical(Dataset(), combine_by_coords([]))
@pytest.mark.parametrize(
"join, expected",
[
("outer", Dataset({"x": [0, 1], "y": [0, 1]})),
("inner", Dataset({"x": [0, 1], "y": []})),
("left", Dataset({"x": [0, 1], "y": [0]})),
("right", Dataset({"x": [0, 1], "y": [1]})),
],
)
def test_combine_coords_join(self, join, expected):
objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [1], "y": [1]})]
actual = combine_nested(objs, concat_dim="x", join=join)
assert_identical(expected, actual)
def test_combine_coords_join_exact(self):
objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [1], "y": [1]})]
with pytest.raises(ValueError, match=r"cannot align.*join.*exact.*"):
combine_nested(objs, concat_dim="x", join="exact")
@pytest.mark.parametrize(
"combine_attrs, expected",
[
("drop", Dataset({"x": [0, 1], "y": [0, 1]}, attrs={})),
(
"no_conflicts",
Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1, "b": 2}),
),
("override", Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1})),
(
lambda attrs, context: attrs[1],
Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1, "b": 2}),
),
],
)
def test_combine_coords_combine_attrs(self, combine_attrs, expected):
objs = [
Dataset({"x": [0], "y": [0]}, attrs={"a": 1}),
Dataset({"x": [1], "y": [1]}, attrs={"a": 1, "b": 2}),
]
actual = combine_nested(
objs, concat_dim="x", join="outer", combine_attrs=combine_attrs
)
assert_identical(expected, actual)
if combine_attrs == "no_conflicts":
objs[1].attrs["a"] = 2
with pytest.raises(ValueError, match=r"combine_attrs='no_conflicts'"):
actual = combine_nested(
objs, concat_dim="x", join="outer", combine_attrs=combine_attrs
)
def test_combine_coords_combine_attrs_identical(self):
objs = [
Dataset({"x": [0], "y": [0]}, attrs={"a": 1}),
Dataset({"x": [1], "y": [1]}, attrs={"a": 1}),
]
expected = Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1})
actual = combine_nested(
objs, concat_dim="x", join="outer", combine_attrs="identical"
)
assert_identical(expected, actual)
objs[1].attrs["b"] = 2
with pytest.raises(ValueError, match=r"combine_attrs='identical'"):
actual = combine_nested(
objs, concat_dim="x", join="outer", combine_attrs="identical"
)
def test_combine_nested_combine_attrs_drop_conflicts(self):
objs = [
Dataset({"x": [0], "y": [0]}, attrs={"a": 1, "b": 2, "c": 3}),
Dataset({"x": [1], "y": [1]}, attrs={"a": 1, "b": 0, "d": 3}),
]
expected = Dataset({"x": [0, 1], "y": [0, 1]}, attrs={"a": 1, "c": 3, "d": 3})
actual = combine_nested(
objs, concat_dim="x", join="outer", combine_attrs="drop_conflicts"
)
assert_identical(expected, actual)
@pytest.mark.parametrize(
"combine_attrs, attrs1, attrs2, expected_attrs, expect_exception",
[
(
"no_conflicts",
{"a": 1, "b": 2},
{"a": 1, "c": 3},
{"a": 1, "b": 2, "c": 3},
False,
),
("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False),
("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False),
(
"no_conflicts",
{"a": 1, "b": 2},
{"a": 4, "c": 3},
{"a": 1, "b": 2, "c": 3},
True,
),
("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False),
("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False),
("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True),
(
"override",
{"a": 1, "b": 2},
{"a": 4, "b": 5, "c": 3},
{"a": 1, "b": 2},
False,
),
(
"drop_conflicts",
{"a": 1, "b": 2, "c": 3},
{"b": 1, "c": 3, "d": 4},
{"a": 1, "c": 3, "d": 4},
False,
),
],
)
def test_combine_nested_combine_attrs_variables(
self, combine_attrs, attrs1, attrs2, expected_attrs, expect_exception
):
"""check that combine_attrs is used on data variables and coords"""
data1 = Dataset(
{
"a": ("x", [1, 2], attrs1),
"b": ("x", [3, -1], attrs1),
"x": ("x", [0, 1], attrs1),
}
)
data2 = Dataset(
{
"a": ("x", [2, 3], attrs2),
"b": ("x", [-2, 1], attrs2),
"x": ("x", [2, 3], attrs2),
}
)
if expect_exception:
with pytest.raises(MergeError, match="combine_attrs"):
combine_by_coords([data1, data2], combine_attrs=combine_attrs)
else:
actual = combine_by_coords([data1, data2], combine_attrs=combine_attrs)
expected = Dataset(
{
"a": ("x", [1, 2, 2, 3], expected_attrs),
"b": ("x", [3, -1, -2, 1], expected_attrs),
},
{"x": ("x", [0, 1, 2, 3], expected_attrs)},
)
assert_identical(actual, expected)
@pytest.mark.parametrize(
"combine_attrs, attrs1, attrs2, expected_attrs, expect_exception",
[
(
"no_conflicts",
{"a": 1, "b": 2},
{"a": 1, "c": 3},
{"a": 1, "b": 2, "c": 3},
False,
),
("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False),
("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False),
(
"no_conflicts",
{"a": 1, "b": 2},
{"a": 4, "c": 3},
{"a": 1, "b": 2, "c": 3},
True,
),
("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False),
("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False),
("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True),
(
"override",
{"a": 1, "b": 2},
{"a": 4, "b": 5, "c": 3},
{"a": 1, "b": 2},
False,
),
(
"drop_conflicts",
{"a": 1, "b": 2, "c": 3},
{"b": 1, "c": 3, "d": 4},
{"a": 1, "c": 3, "d": 4},
False,
),
],
)
def test_combine_by_coords_combine_attrs_variables(
self, combine_attrs, attrs1, attrs2, expected_attrs, expect_exception
):
"""check that combine_attrs is used on data variables and coords"""
data1 = Dataset(
{"x": ("a", [0], attrs1), "y": ("a", [0], attrs1), "a": ("a", [0], attrs1)}
)
data2 = Dataset(
{"x": ("a", [1], attrs2), "y": ("a", [1], attrs2), "a": ("a", [1], attrs2)}
)
if expect_exception:
with pytest.raises(MergeError, match="combine_attrs"):
combine_by_coords([data1, data2], combine_attrs=combine_attrs)
else:
actual = combine_by_coords([data1, data2], combine_attrs=combine_attrs)
expected = Dataset(
{
"x": ("a", [0, 1], expected_attrs),
"y": ("a", [0, 1], expected_attrs),
"a": ("a", [0, 1], expected_attrs),
}
)
assert_identical(actual, expected)
def test_infer_order_from_coords(self):
data = create_test_data()
objs = [data.isel(dim2=slice(4, 9)), data.isel(dim2=slice(4))]
actual = combine_by_coords(objs, data_vars="all")
expected = data
assert expected.broadcast_equals(actual) # type: ignore[arg-type]
with set_options(use_new_combine_kwarg_defaults=True):
actual = combine_by_coords(objs)
assert_identical(actual, expected)
def test_combine_leaving_bystander_dimensions(self):
# Check non-monotonic bystander dimension coord doesn't raise
# ValueError on combine (https://github.com/pydata/xarray/issues/3150)
ycoord = ["a", "c", "b"]
data = np.random.rand(7, 3)
ds1 = Dataset(
data_vars=dict(data=(["x", "y"], data[:3, :])),
coords=dict(x=[1, 2, 3], y=ycoord),
)
ds2 = Dataset(
data_vars=dict(data=(["x", "y"], data[3:, :])),
coords=dict(x=[4, 5, 6, 7], y=ycoord),
)
expected = Dataset(
data_vars=dict(data=(["x", "y"], data)),
coords=dict(x=[1, 2, 3, 4, 5, 6, 7], y=ycoord),
)
actual = combine_by_coords((ds1, ds2))
assert_identical(expected, actual)
def test_combine_by_coords_previously_failed(self):
# In the above scenario, one file is missing, containing the data for
# one year's data for one variable.
datasets = [
Dataset({"a": ("x", [0]), "x": [0]}),
Dataset({"b": ("x", [0]), "x": [0]}),
Dataset({"a": ("x", [1]), "x": [1]}),
]
expected = Dataset({"a": ("x", [0, 1]), "b": ("x", [0, np.nan])}, {"x": [0, 1]})
actual = combine_by_coords(datasets, join="outer")
assert_identical(expected, actual)
def test_combine_by_coords_still_fails(self):
# concat can't handle new variables (yet):
# https://github.com/pydata/xarray/issues/508
datasets = [Dataset({"x": 0}, {"y": 0}), Dataset({"x": 1}, {"y": 1, "z": 1})]
with pytest.raises(ValueError):
combine_by_coords(datasets, "y") # type: ignore[arg-type]
def test_combine_by_coords_no_concat(self):
objs = [Dataset({"x": 0}), Dataset({"y": 1})]
actual = combine_by_coords(objs)
expected = Dataset({"x": 0, "y": 1})
assert_identical(expected, actual)
objs = [Dataset({"x": 0, "y": 1}), Dataset({"y": np.nan, "z": 2})]
actual = combine_by_coords(objs, compat="no_conflicts")
expected = Dataset({"x": 0, "y": 1, "z": 2})
assert_identical(expected, actual)
def test_check_for_impossible_ordering(self):
ds0 = Dataset({"x": [0, 1, 5]})
ds1 = Dataset({"x": [2, 3]})
with pytest.raises(
ValueError,
match=r"does not have monotonic global indexes along dimension x",
):
combine_by_coords([ds1, ds0])
def test_combine_by_coords_incomplete_hypercube(self):
# test that this succeeds with default fill_value
x1 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [0], "x": [0]})
x2 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [1], "x": [0]})
x3 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [0], "x": [1]})
actual = combine_by_coords([x1, x2, x3], join="outer")
expected = Dataset(
{"a": (("y", "x"), [[1, 1], [1, np.nan]])},
coords={"y": [0, 1], "x": [0, 1]},
)
assert_identical(expected, actual)
# test that this fails if fill_value is None
with pytest.raises(
ValueError, match="supplied objects do not form a hypercube"
):
combine_by_coords([x1, x2, x3], join="outer", fill_value=None)
def test_combine_by_coords_override_order(self) -> None:
# regression test for https://github.com/pydata/xarray/issues/8828
x1 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [0], "x": [0]})
x2 = Dataset(
{"a": (("y", "x"), [[2]]), "b": (("y", "x"), [[1]])},
coords={"y": [0], "x": [0]},
)
actual = combine_by_coords([x1, x2], compat="override")
assert_equal(actual["a"], actual["b"])
assert_equal(actual["a"], x1["a"])
actual = combine_by_coords([x2, x1], compat="override")
assert_equal(actual["a"], x2["a"])
| TestCombineDatasetsbyCoords |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/custom_param_types.py | {
"start": 2514,
"end": 3843
} | class ____(BetterChoice):
"""
This parameter allows to pass parameters that do not pass verification by choice. This is
useful to keep autocomplete working but also to allow some extra parameters that are dynamic,
for example allowing glob in package names for docs building.
"""
name = "NotVerifiedBetterChoice"
def convert(self, value: Any, param: Parameter | None, ctx: Context | None) -> Any:
# Match through normalization and case sensitivity
# first do token_normalize_func, then lowercase
normed_value = value
normed_choices = {choice: choice for choice in self.choices}
if ctx is not None and ctx.token_normalize_func is not None:
normed_value = ctx.token_normalize_func(value)
normed_choices = {
ctx.token_normalize_func(normed_choice): original
for normed_choice, original in normed_choices.items()
}
if not self.case_sensitive:
normed_value = normed_value.casefold()
normed_choices = {
normed_choice.casefold(): original for normed_choice, original in normed_choices.items()
}
if normed_value in normed_choices:
return normed_choices[normed_value]
return normed_value
| NotVerifiedBetterChoice |
python | pytorch__pytorch | torch/_dynamo/variables/base.py | {
"start": 8066,
"end": 8832
} | class ____(type):
all_subclasses: list[type] = []
def __instancecheck__(cls: type, instance: object) -> bool:
"""Make isinstance work with LazyVariableTracker"""
# This is super expensive - just having it costs over 4% of tracing
# time!
if (type(instance) is variables.LazyVariableTracker) and (
cls not in (VariableTracker, variables.LazyVariableTracker)
):
instance = instance.realize()
return type.__instancecheck__(cls, instance)
def __init__(
cls: type, name: str, bases: tuple[type, ...], attrs: dict[str, Any]
) -> None:
super().__init__(name, bases, attrs) # type: ignore[misc]
VariableTrackerMeta.all_subclasses.append(cls)
| VariableTrackerMeta |
python | openai__openai-python | src/openai/types/beta/realtime/conversation_item_delete_event_param.py | {
"start": 232,
"end": 569
} | class ____(TypedDict, total=False):
item_id: Required[str]
"""The ID of the item to delete."""
type: Required[Literal["conversation.item.delete"]]
"""The event type, must be `conversation.item.delete`."""
event_id: str
"""Optional client-generated ID used to identify this event."""
| ConversationItemDeleteEventParam |
python | google__pytype | pytype/pytd/pep484_test.py | {
"start": 160,
"end": 1348
} | class ____(parser_test_base.ParserTest):
"""Test the visitors in optimize.py."""
def convert(self, t):
"""Run ConvertTypingToNative and return the result as a string."""
return pytd_utils.Print(t.Visit(pep484.ConvertTypingToNative(None)))
def test_convert_optional(self):
t = pytd.GenericType(
pytd.NamedType("typing.Optional"), (pytd.NamedType("str"),)
)
self.assertEqual(self.convert(t), "Optional[str]")
def test_convert_union(self):
t = pytd.GenericType(
pytd.NamedType("typing.Union"),
(pytd.NamedType("str"), pytd.NamedType("float")),
)
self.assertEqual(self.convert(t), "Union[str, float]")
def test_convert_list(self):
t = pytd.NamedType("typing.List")
self.assertEqual(self.convert(t), "list")
def test_convert_tuple(self):
t = pytd.NamedType("typing.Tuple")
self.assertEqual(self.convert(t), "tuple")
def test_convert_any(self):
t = pytd.NamedType("typing.Any")
self.assertEqual(self.convert(t), "Any")
def test_convert_anystr(self):
t = pytd.NamedType("typing.AnyStr")
self.assertEqual(self.convert(t), "AnyStr")
if __name__ == "__main__":
unittest.main()
| TestPEP484 |
python | pymupdf__PyMuPDF | src/__init__.py | {
"start": 94948,
"end": 95877
} | class ____:
def __init__(self, *args):
if args_match( args, mupdf.FzDevice):
device, = args
self.this = device
elif args_match( args, Pixmap, None):
pm, clip = args
bbox = JM_irect_from_py( clip)
if mupdf.fz_is_infinite_irect( bbox):
self.this = mupdf.fz_new_draw_device( mupdf.FzMatrix(), pm)
else:
self.this = mupdf.fz_new_draw_device_with_bbox( mupdf.FzMatrix(), pm, bbox)
elif args_match( args, mupdf.FzDisplayList):
dl, = args
self.this = mupdf.fz_new_list_device( dl)
elif args_match( args, mupdf.FzStextPage, None):
tp, flags = args
opts = mupdf.FzStextOptions( flags)
self.this = mupdf.fz_new_stext_device( tp, opts)
else:
raise Exception( f'Unrecognised args for DeviceWrapper: {args!r}')
| DeviceWrapper |
python | fluentpython__example-code-2e | 22-dyn-attr-prop/oscon/schedule_v4.py | {
"start": 541,
"end": 912
} | class ____:
__index = None
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
return f'<{self.__class__.__name__} serial={self.serial!r}>'
@staticmethod
def fetch(key):
if Record.__index is None:
Record.__index = load()
return Record.__index[key]
# tag::SCHEDULE4_INIT[]
| Record |
python | pytransitions__transitions | tests/test_nesting.py | {
"start": 42780,
"end": 42849
} | class ____(TestSeparatorsBase):
separator = '/'
| TestSeparatorsSlash |
python | pypa__virtualenv | src/virtualenv/config/convert.py | {
"start": 931,
"end": 1061
} | class ____(TypeData):
def convert(self, value):
if not value:
return None
return str(value)
| NoneType |
python | Textualize__textual | src/textual/app.py | {
"start": 7475,
"end": 7689
} | class ____:
"""A file-like where writes go nowhere."""
def write(self, text: str) -> None:
pass
def flush(self) -> None:
pass
def isatty(self) -> bool:
return True
| _NullFile |
python | pypa__pipenv | pipenv/patched/pip/_vendor/distlib/version.py | {
"start": 21929,
"end": 22103
} | class ____(Version):
def parse(self, s):
return _semantic_key(s)
@property
def is_prerelease(self):
return self._parts[1][0] != '|'
| SemanticVersion |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 25664,
"end": 25763
} | class ____(sgqlc.types.Scalar):
"""Git SSH string"""
__schema__ = github_schema
| GitSSHRemote |
python | django__django | tests/utils_tests/test_autoreload.py | {
"start": 16600,
"end": 17116
} | class ____(SimpleTestCase):
def test_mutates_error_files(self):
fake_method = mock.MagicMock(side_effect=RuntimeError())
wrapped = autoreload.check_errors(fake_method)
with mock.patch.object(autoreload, "_error_files") as mocked_error_files:
try:
with self.assertRaises(RuntimeError):
wrapped()
finally:
autoreload._exception = None
self.assertEqual(mocked_error_files.append.call_count, 1)
| TestCheckErrors |
python | getsentry__sentry | src/sentry/api/serializers/rest_framework/rule.py | {
"start": 3136,
"end": 5321
} | class ____(serializers.Serializer):
conditions = serializers.ListField(child=RuleNodeField(type="condition/event"), required=False)
filters = serializers.ListField(child=RuleNodeField(type="filter/event"), required=False)
actionMatch = serializers.ChoiceField(
choices=(("all", "all"), ("any", "any"), ("none", "none"))
)
filterMatch = serializers.ChoiceField(
choices=(("all", "all"), ("any", "any"), ("none", "none")), required=False
)
frequency = serializers.IntegerField(min_value=5, max_value=60 * 24 * 30)
def validate(self, attrs):
# ensure that if filters are passed in that a filterMatch is also supplied
filters = attrs.get("filters")
if filters:
filter_match = attrs.get("filterMatch")
if not filter_match:
raise serializers.ValidationError(
{
"filterMatch": "Must select a filter match (all, any, none) if filters are supplied."
}
)
# ensure that if a user has alert-filters enabled, they do not use old conditions
project = self.context["project"]
conditions = attrs.get("conditions", tuple())
project_has_filters = features.has("projects:alert-filters", project)
if project_has_filters:
old_conditions = [
condition for condition in conditions if condition["id"] in MIGRATED_CONDITIONS
]
if old_conditions:
raise serializers.ValidationError(
{
"conditions": "Conditions evaluating an event attribute, tag, or level are outdated please use an appropriate filter instead."
}
)
# ensure that if a user has alert-filters enabled, they do not use a 'none' match on conditions
if project_has_filters and attrs.get("actionMatch") == "none":
raise serializers.ValidationError(
{
"conditions": "The 'none' match on conditions is outdated and no longer supported."
}
)
return attrs
| RuleSetSerializer |
python | more-itertools__more-itertools | tests/test_recipes.py | {
"start": 8568,
"end": 11110
} | class ____(TestCase):
def test_basic(self):
seq = 'ABCDEF'
for n, expected in [
(3, [('A', 'B', 'C'), ('D', 'E', 'F')]),
(4, [('A', 'B', 'C', 'D'), ('E', 'F', None, None)]),
(5, [('A', 'B', 'C', 'D', 'E'), ('F', None, None, None, None)]),
(6, [('A', 'B', 'C', 'D', 'E', 'F')]),
(7, [('A', 'B', 'C', 'D', 'E', 'F', None)]),
]:
with self.subTest(n=n):
actual = list(mi.grouper(iter(seq), n))
self.assertEqual(actual, expected)
def test_fill(self):
seq = 'ABCDEF'
fillvalue = 'x'
for n, expected in [
(1, ['A', 'B', 'C', 'D', 'E', 'F']),
(2, ['AB', 'CD', 'EF']),
(3, ['ABC', 'DEF']),
(4, ['ABCD', 'EFxx']),
(5, ['ABCDE', 'Fxxxx']),
(6, ['ABCDEF']),
(7, ['ABCDEFx']),
]:
with self.subTest(n=n):
it = mi.grouper(
iter(seq), n, incomplete='fill', fillvalue=fillvalue
)
actual = [''.join(x) for x in it]
self.assertEqual(actual, expected)
def test_ignore(self):
seq = 'ABCDEF'
for n, expected in [
(1, ['A', 'B', 'C', 'D', 'E', 'F']),
(2, ['AB', 'CD', 'EF']),
(3, ['ABC', 'DEF']),
(4, ['ABCD']),
(5, ['ABCDE']),
(6, ['ABCDEF']),
(7, []),
]:
with self.subTest(n=n):
it = mi.grouper(iter(seq), n, incomplete='ignore')
actual = [''.join(x) for x in it]
self.assertEqual(actual, expected)
def test_strict(self):
seq = 'ABCDEF'
for n, expected in [
(1, ['A', 'B', 'C', 'D', 'E', 'F']),
(2, ['AB', 'CD', 'EF']),
(3, ['ABC', 'DEF']),
(6, ['ABCDEF']),
]:
with self.subTest(n=n):
it = mi.grouper(iter(seq), n, incomplete='strict')
actual = [''.join(x) for x in it]
self.assertEqual(actual, expected)
def test_strict_fails(self):
seq = 'ABCDEF'
for n in [4, 5, 7]:
with self.subTest(n=n):
with self.assertRaises(ValueError):
list(mi.grouper(iter(seq), n, incomplete='strict'))
def test_invalid_incomplete(self):
with self.assertRaises(ValueError):
list(mi.grouper('ABCD', 3, incomplete='bogus'))
| GrouperTests |
python | getsentry__sentry | src/sentry/integrations/bitbucket/webhook.py | {
"start": 3922,
"end": 6154
} | class ____(BitbucketWebhook):
# https://confluence.atlassian.com/bitbucket/event-payloads-740262817.html#EventPayloads-Push
@property
def event_type(self) -> IntegrationWebhookEventType:
return IntegrationWebhookEventType.PUSH
def __call__(self, event: Mapping[str, Any], **kwargs) -> None:
authors = {}
if not (repo := kwargs.get("repo")):
raise ValueError("Missing repo")
if not (organization := kwargs.get("organization")):
raise ValueError("Missing organization")
# while we're here, make sure repo data is up to date
self.update_repo_data(repo, event)
for change in event["push"]["changes"]:
for commit in change.get("commits", []):
if IntegrationRepositoryProvider.should_ignore_commit(commit["message"]):
continue
author_email = parse_email(commit["author"]["raw"])
# TODO(dcramer): we need to deal with bad values here, but since
# its optional, lets just throw it out for now
if author_email is None or len(author_email) > 75:
author = None
elif author_email not in authors:
authors[author_email] = author = CommitAuthor.objects.get_or_create(
organization_id=organization.id,
email=author_email,
defaults={"name": commit["author"]["raw"].split("<")[0].strip()},
)[0]
else:
author = authors[author_email]
try:
with transaction.atomic(router.db_for_write(Commit)):
Commit.objects.create(
repository_id=repo.id,
organization_id=organization.id,
key=commit["hash"],
message=commit["message"],
author=author,
date_added=parse_date(commit["date"]).astimezone(timezone.utc),
)
except IntegrityError:
pass
@region_silo_endpoint
| PushEventWebhook |
python | django__django | tests/admin_views/tests.py | {
"start": 172265,
"end": 176059
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
cls.b1 = Book.objects.create(name="Lærdommer")
cls.p1 = Promo.objects.create(name="<Promo for Lærdommer>", book=cls.b1)
cls.chap1 = Chapter.objects.create(
title="Norske bostaver æøå skaper problemer",
content="<p>Svært frustrerende med UnicodeDecodeErro</p>",
book=cls.b1,
)
cls.chap2 = Chapter.objects.create(
title="Kjærlighet",
content="<p>La kjærligheten til de lidende seire.</p>",
book=cls.b1,
)
cls.chap3 = Chapter.objects.create(
title="Kjærlighet", content="<p>Noe innhold</p>", book=cls.b1
)
cls.chap4 = ChapterXtra1.objects.create(
chap=cls.chap1, xtra="<Xtra(1) Norske bostaver æøå skaper problemer>"
)
cls.chap5 = ChapterXtra1.objects.create(
chap=cls.chap2, xtra="<Xtra(1) Kjærlighet>"
)
cls.chap6 = ChapterXtra1.objects.create(
chap=cls.chap3, xtra="<Xtra(1) Kjærlighet>"
)
cls.chap7 = ChapterXtra2.objects.create(
chap=cls.chap1, xtra="<Xtra(2) Norske bostaver æøå skaper problemer>"
)
cls.chap8 = ChapterXtra2.objects.create(
chap=cls.chap2, xtra="<Xtra(2) Kjærlighet>"
)
cls.chap9 = ChapterXtra2.objects.create(
chap=cls.chap3, xtra="<Xtra(2) Kjærlighet>"
)
def setUp(self):
self.client.force_login(self.superuser)
def test_unicode_edit(self):
"""
A test to ensure that POST on edit_view handles non-ASCII characters.
"""
post_data = {
"name": "Test lærdommer",
# inline data
"chapter_set-TOTAL_FORMS": "6",
"chapter_set-INITIAL_FORMS": "3",
"chapter_set-MAX_NUM_FORMS": "0",
"chapter_set-0-id": self.chap1.pk,
"chapter_set-0-title": "Norske bostaver æøå skaper problemer",
"chapter_set-0-content": (
"<p>Svært frustrerende med UnicodeDecodeError</p>"
),
"chapter_set-1-id": self.chap2.id,
"chapter_set-1-title": "Kjærlighet.",
"chapter_set-1-content": (
"<p>La kjærligheten til de lidende seire.</p>"
),
"chapter_set-2-id": self.chap3.id,
"chapter_set-2-title": "Need a title.",
"chapter_set-2-content": "<p>Newest content</p>",
"chapter_set-3-id": "",
"chapter_set-3-title": "",
"chapter_set-3-content": "",
"chapter_set-4-id": "",
"chapter_set-4-title": "",
"chapter_set-4-content": "",
"chapter_set-5-id": "",
"chapter_set-5-title": "",
"chapter_set-5-content": "",
}
response = self.client.post(
reverse("admin:admin_views_book_change", args=(self.b1.pk,)), post_data
)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_unicode_delete(self):
"""
The delete_view handles non-ASCII characters
"""
delete_dict = {"post": "yes"}
delete_url = reverse("admin:admin_views_book_delete", args=(self.b1.pk,))
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(delete_url, delete_dict)
self.assertRedirects(response, reverse("admin:admin_views_book_changelist"))
@override_settings(ROOT_URLCONF="admin_views.urls")
| AdminViewUnicodeTest |
python | PrefectHQ__prefect | src/integrations/prefect-gcp/prefect_gcp/credentials.py | {
"start": 2518,
"end": 20032
} | class ____(CredentialsBlock):
"""
Block used to manage authentication with GCP. Google authentication is
handled via the `google.oauth2` module or through the CLI.
Specify either one of service `account_file` or `service_account_info`; if both
are not specified, the client will try to detect the credentials following Google's
[Application Default Credentials](https://cloud.google.com/docs/authentication/application-default-credentials).
See Google's [Authentication documentation](https://cloud.google.com/docs/authentication#service-accounts)
for details on inference and recommended authentication patterns.
Attributes:
service_account_file: Path to the service account JSON keyfile.
service_account_info: The contents of the keyfile as a dict.
Example:
Load GCP credentials stored in a `GCP Credentials` Block:
```python
from prefect_gcp import GcpCredentials
gcp_credentials_block = GcpCredentials.load("BLOCK_NAME")
```
""" # noqa
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/10424e311932e31c477ac2b9ef3d53cefbaad708-250x250.png" # noqa
_block_type_name = "GCP Credentials"
_documentation_url = "https://docs.prefect.io/integrations/prefect-gcp" # noqa: E501
service_account_file: Optional[Path] = Field(
default=None, description="Path to the service account JSON keyfile."
)
service_account_info: Optional[SecretDict] = Field(
default=None, description="The contents of the keyfile as a dict."
)
project: Optional[str] = Field(
default=None, description="The GCP project to use for the client."
)
_service_account_email: Optional[str] = None
def __hash__(self):
return hash(
(
hash(self.service_account_file),
hash(frozenset(self.service_account_info.get_secret_value().items()))
if self.service_account_info
else None,
hash(self.project),
hash(self._service_account_email),
)
)
@model_validator(mode="after")
def _provide_one_service_account_source(self):
"""
Ensure that only a service account file or service account info ias provided.
"""
if self.service_account_info and self.service_account_file:
raise ValueError(
"Only one of service_account_info or service_account_file "
"can be specified at once"
)
return self
@field_validator("service_account_file")
@classmethod
def _check_service_account_file(cls, file):
"""Get full path of provided file and make sure that it exists."""
if not file:
return file
service_account_file = Path(file).expanduser()
if not service_account_file.exists():
raise ValueError("The provided path to the service account is invalid")
return service_account_file
@field_validator("service_account_info", mode="before")
@classmethod
def _convert_json_string_json_service_account_info(cls, value):
"""
Converts service account info provided as a json formatted string
to a dictionary
"""
if isinstance(value, str):
try:
service_account_info = json.loads(value)
return service_account_info
except Exception:
raise ValueError("Unable to decode service_account_info")
else:
return value
def block_initialization(self):
credentials = self.get_credentials_from_service_account()
if self.project is None:
if self.service_account_info or self.service_account_file:
credentials_project = credentials.project_id
# google.auth.default using gcloud auth application-default login
elif credentials.quota_project_id:
credentials_project = credentials.quota_project_id
# compute-assigned service account via GCP metadata server
else:
_, credentials_project = google.auth.default()
self.project = credentials_project
if hasattr(credentials, "service_account_email"):
self._service_account_email = credentials.service_account_email
def get_credentials_from_service_account(self) -> Credentials:
"""
Helper method to serialize credentials by using either
service_account_file or service_account_info.
"""
if self.service_account_info:
credentials = Credentials.from_service_account_info(
self.service_account_info.get_secret_value(),
scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
elif self.service_account_file:
credentials = Credentials.from_service_account_file(
self.service_account_file,
scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
else:
credentials, _ = google.auth.default()
return credentials
def get_access_token(self):
"""
See: https://stackoverflow.com/a/69107745
Also: https://www.jhanley.com/google-cloud-creating-oauth-access-tokens-for-rest-api-calls/
""" # noqa
request = google.auth.transport.requests.Request()
credentials = self.get_credentials_from_service_account()
credentials.refresh(request)
return credentials.token
def get_client(
self,
client_type: Union[str, ClientType],
**get_client_kwargs: Dict[str, Any],
) -> Any:
"""
Helper method to dynamically get a client type.
Args:
client_type: The name of the client to get.
**get_client_kwargs: Additional keyword arguments to pass to the
`get_*_client` method.
Returns:
An authenticated client.
Raises:
ValueError: if the client is not supported.
"""
if isinstance(client_type, str):
client_type = ClientType(client_type)
client_type = client_type.value
get_client_method = getattr(self, f"get_{client_type}_client")
return get_client_method(**get_client_kwargs)
@_raise_help_msg("cloud_storage")
def get_cloud_storage_client(
self, project: Optional[str] = None
) -> "StorageClient":
"""
Gets an authenticated Cloud Storage client.
Args:
project: Name of the project to use; overrides the base
class's project if provided.
Returns:
An authenticated Cloud Storage client.
Examples:
Gets a GCP Cloud Storage client from a path.
```python
from prefect import flow
from prefect_gcp.credentials import GcpCredentials
@flow()
def example_get_client_flow():
service_account_file = "~/.secrets/prefect-service-account.json"
client = GcpCredentials(
service_account_file=service_account_file
).get_cloud_storage_client()
example_get_client_flow()
```
Gets a GCP Cloud Storage client from a dictionary.
```python
from prefect import flow
from prefect_gcp.credentials import GcpCredentials
@flow()
def example_get_client_flow():
service_account_info = {
"type": "service_account",
"project_id": "project_id",
"private_key_id": "private_key_id",
"private_key": "private_key",
"client_email": "client_email",
"client_id": "client_id",
"auth_uri": "auth_uri",
"token_uri": "token_uri",
"auth_provider_x509_cert_url": "auth_provider_x509_cert_url",
"client_x509_cert_url": "client_x509_cert_url"
}
client = GcpCredentials(
service_account_info=service_account_info
).get_cloud_storage_client()
example_get_client_flow()
```
"""
credentials = self.get_credentials_from_service_account()
# override class project if method project is provided
project = project or self.project
storage_client = StorageClient(credentials=credentials, project=project)
return storage_client
@_raise_help_msg("bigquery")
def get_bigquery_client(
self, project: Optional[str] = None, location: Optional[str] = None
) -> "BigQueryClient":
"""
Gets an authenticated BigQuery client.
Args:
project: Name of the project to use; overrides the base
class's project if provided.
location: Location to use.
Returns:
An authenticated BigQuery client.
Examples:
Gets a GCP BigQuery client from a path.
```python
from prefect import flow
from prefect_gcp.credentials import GcpCredentials
@flow()
def example_get_client_flow():
service_account_file = "~/.secrets/prefect-service-account.json"
client = GcpCredentials(
service_account_file=service_account_file
).get_bigquery_client()
example_get_client_flow()
```
Gets a GCP BigQuery client from a dictionary.
```python
from prefect import flow
from prefect_gcp.credentials import GcpCredentials
@flow()
def example_get_client_flow():
service_account_info = {
"type": "service_account",
"project_id": "project_id",
"private_key_id": "private_key_id",
"private_key": "private_key",
"client_email": "client_email",
"client_id": "client_id",
"auth_uri": "auth_uri",
"token_uri": "token_uri",
"auth_provider_x509_cert_url": "auth_provider_x509_cert_url",
"client_x509_cert_url": "client_x509_cert_url"
}
client = GcpCredentials(
service_account_info=service_account_info
).get_bigquery_client()
example_get_client_flow()
```
"""
credentials = self.get_credentials_from_service_account()
# override class project if method project is provided
project = project or self.project
big_query_client = BigQueryClient(
credentials=credentials, project=project, location=location
)
return big_query_client
@_raise_help_msg("secret_manager")
def get_secret_manager_client(self) -> "SecretManagerServiceClient":
"""
Gets an authenticated Secret Manager Service client.
Returns:
An authenticated Secret Manager Service client.
Examples:
Gets a GCP Secret Manager client from a path.
```python
from prefect import flow
from prefect_gcp.credentials import GcpCredentials
@flow()
def example_get_client_flow():
service_account_file = "~/.secrets/prefect-service-account.json"
client = GcpCredentials(
service_account_file=service_account_file
).get_secret_manager_client()
example_get_client_flow()
```
Gets a GCP Cloud Storage client from a dictionary.
```python
from prefect import flow
from prefect_gcp.credentials import GcpCredentials
@flow()
def example_get_client_flow():
service_account_info = {
"type": "service_account",
"project_id": "project_id",
"private_key_id": "private_key_id",
"private_key": "private_key",
"client_email": "client_email",
"client_id": "client_id",
"auth_uri": "auth_uri",
"token_uri": "token_uri",
"auth_provider_x509_cert_url": "auth_provider_x509_cert_url",
"client_x509_cert_url": "client_x509_cert_url"
}
client = GcpCredentials(
service_account_info=service_account_info
).get_secret_manager_client()
example_get_client_flow()
```
"""
credentials = self.get_credentials_from_service_account()
# doesn't accept project; must pass in project in tasks
secret_manager_client = SecretManagerServiceClient(credentials=credentials)
return secret_manager_client
@_raise_help_msg("aiplatform")
def get_job_service_client(
self, client_options: Union[Dict[str, Any], ClientOptions] = None
) -> "JobServiceClient":
"""
Gets an authenticated Job Service client for Vertex AI.
Returns:
An authenticated Job Service client.
Examples:
Gets a GCP Job Service client from a path.
```python
from prefect import flow
from prefect_gcp.credentials import GcpCredentials
@flow()
def example_get_client_flow():
service_account_file = "~/.secrets/prefect-service-account.json"
client = GcpCredentials(
service_account_file=service_account_file
).get_job_service_client()
example_get_client_flow()
```
Gets a GCP Cloud Storage client from a dictionary.
```python
from prefect import flow
from prefect_gcp.credentials import GcpCredentials
@flow()
def example_get_client_flow():
service_account_info = {
"type": "service_account",
"project_id": "project_id",
"private_key_id": "private_key_id",
"private_key": "private_key",
"client_email": "client_email",
"client_id": "client_id",
"auth_uri": "auth_uri",
"token_uri": "token_uri",
"auth_provider_x509_cert_url": "auth_provider_x509_cert_url",
"client_x509_cert_url": "client_x509_cert_url"
}
client = GcpCredentials(
service_account_info=service_account_info
).get_job_service_client()
example_get_client_flow()
```
"""
if isinstance(client_options, dict):
client_options = from_dict(client_options)
credentials = self.get_credentials_from_service_account()
return JobServiceClient(credentials=credentials, client_options=client_options)
@_raise_help_msg("aiplatform")
def get_job_service_async_client(
self, client_options: Union[Dict[str, Any], ClientOptions] = None
) -> "JobServiceAsyncClient":
"""
Gets an authenticated Job Service async client for Vertex AI.
Returns:
An authenticated Job Service async client.
Examples:
Gets a GCP Job Service client from a path.
```python
from prefect import flow
from prefect_gcp.credentials import GcpCredentials
@flow()
def example_get_client_flow():
service_account_file = "~/.secrets/prefect-service-account.json"
client = GcpCredentials(
service_account_file=service_account_file
).get_job_service_async_client()
example_get_client_flow()
```
Gets a GCP Cloud Storage client from a dictionary.
```python
from prefect import flow
from prefect_gcp.credentials import GcpCredentials
@flow()
def example_get_client_flow():
service_account_info = {
"type": "service_account",
"project_id": "project_id",
"private_key_id": "private_key_id",
"private_key": "private_key",
"client_email": "client_email",
"client_id": "client_id",
"auth_uri": "auth_uri",
"token_uri": "token_uri",
"auth_provider_x509_cert_url": "auth_provider_x509_cert_url",
"client_x509_cert_url": "client_x509_cert_url"
}
client = GcpCredentials(
service_account_info=service_account_info
).get_job_service_async_client()
example_get_client_flow()
```
"""
if isinstance(client_options, dict):
client_options = from_dict(client_options)
return _get_job_service_async_client_cached(
self, tuple(client_options.__dict__.items())
)
| GcpCredentials |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/abstractClass9.py | {
"start": 340,
"end": 413
} | class ____(MixinB, ClassA):
pass
ClassB(myproperty="myproperty")
| ClassB |
python | Pylons__pyramid | tests/test_config/test_adapters.py | {
"start": 13143,
"end": 13583
} | class ____:
def __init__(self, resource, request):
self.resource = resource
self.request = request
def predicate_maker(name):
class Predicate:
def __init__(self, val, config):
self.val = val
def phash(self):
return 'phash'
text = phash
def __call__(self, event):
return getattr(event, name, None) == self.val
return Predicate
| DummyResourceURL |
python | facebook__pyre-check | tools/incremental_test/tests/specification_tests.py | {
"start": 575,
"end": 8044
} | class ____(unittest.TestCase):
def test_create_repository_state(self) -> None:
self.assertEqual(
RepositoryState.from_json(
{"kind": "hg", "repository": ".", "commit_hash": "facefacefaceb000"}
),
HgRepositoryState(repository=Path("."), commit_hash="facefacefaceb000"),
)
files = {"a.py": "print('a')", "b.py": "print('b')"}
self.assertEqual(
RepositoryState.from_json({"kind": "file", "files": files}),
FileRepositoryState(files),
)
with self.assertRaises(InvalidSpecificationException):
RepositoryState.from_json({})
with self.assertRaises(InvalidSpecificationException):
RepositoryState.from_json({"kind": "hg", "commit_hash": "facefacefaceb000"})
with self.assertRaises(InvalidSpecificationException):
RepositoryState.from_json({"kind": "hg", "repository": "."})
with self.assertRaises(InvalidSpecificationException):
RepositoryState.from_json(
{"repository": ".", "commit_hash": "facefacefaceb000"}
)
with self.assertRaises(InvalidSpecificationException):
RepositoryState.from_json(
{"kind": "hg", "repository": 42, "commit_hash": "facefacefaceb000"}
)
with self.assertRaises(InvalidSpecificationException):
RepositoryState.from_json({"kind": "file", "no_files": ""})
with self.assertRaises(InvalidSpecificationException):
RepositoryState.from_json({"kind": "file", "files": "not_a_list"})
def test_create_repository_update(self) -> None:
self.assertEqual(
RepositoryUpdate.from_json(
{"kind": "hg", "commit_hash": "facefacefaceb000"}
),
HgRepositoryUpdate("facefacefaceb000"),
)
self.assertEqual(
RepositoryUpdate.from_json(
{"kind": "patch", "patch": "my_patch", "patch_flags": "my_flags"}
),
PatchRepositoryUpdate("my_patch", "my_flags"),
)
changes = {"a.py": "print('a')", "b.py": "print('b')"}
removals = ["c.py", "d.py"]
self.assertEqual(
RepositoryUpdate.from_json(
{"kind": "file", "changes": changes, "removals": removals}
),
FileRepositoryUpdate(changes=changes, removals=removals),
)
self.assertEqual(
RepositoryUpdate.from_json(
{
"kind": "batch",
"updates": [
{"kind": "hg", "commit_hash": "my_hash"},
{"kind": "patch", "patch": "my_patch"},
],
}
),
BatchRepositoryUpdate(
[
HgRepositoryUpdate(commit_hash="my_hash"),
PatchRepositoryUpdate(patch="my_patch", patch_flags=""),
]
),
)
with self.assertRaises(InvalidSpecificationException):
RepositoryUpdate.from_json({})
with self.assertRaises(InvalidSpecificationException):
RepositoryUpdate.from_json({"kind": "foo"})
with self.assertRaises(InvalidSpecificationException):
RepositoryUpdate.from_json({"kind": "hg", "commit_hash_missing": ""})
with self.assertRaises(InvalidSpecificationException):
RepositoryUpdate.from_json({"kind": "patch", "patch_missing": ""})
with self.assertRaises(InvalidSpecificationException):
RepositoryUpdate.from_json({"kind": "file", "changes": "not_dict"})
with self.assertRaises(InvalidSpecificationException):
RepositoryUpdate.from_json({"kind": "file", "removals": "not_list"})
with self.assertRaises(InvalidSpecificationException):
RepositoryUpdate.from_json({"kind": "file", "no_file_change": ""})
with self.assertRaises(InvalidSpecificationException):
RepositoryUpdate.from_json({"kind": "batch", "updates_missing": ""})
with self.assertRaises(InvalidSpecificationException):
RepositoryUpdate.from_json({"kind": "batch", "updates": "not_list"})
def test_create_specification(self) -> None:
self.assertEqual(
Specification.from_json(
{
"old_state": {
"kind": "hg",
"repository": ".",
"commit_hash": "old_hash",
},
"new_state": {"kind": "hg", "commit_hash": "new_hash"},
"foo": ".",
}
),
Specification(
old_state=HgRepositoryState(
repository=Path("."), commit_hash="old_hash"
),
new_state=HgRepositoryUpdate(commit_hash="new_hash"),
pyre_check_pyre_options="",
pyre_check_options="",
pyre_start_pyre_options="",
pyre_start_options="",
pyre_incremental_pyre_options="",
pyre_incremental_options="",
),
)
self.assertEqual(
Specification.from_json(
{
"old_state": {
"kind": "hg",
"repository": ".",
"commit_hash": "old_hash",
},
"new_state": {"kind": "hg", "commit_hash": "new_hash"},
"pyre_check_pyre_options": "--option1",
"pyre_check_options": "--option2",
"pyre_start_pyre_options": "--option3",
"pyre_start_options": "--option4",
"pyre_incremental_pyre_options": "--option5",
"pyre_incremental_options": "--option6",
}
),
Specification(
old_state=HgRepositoryState(
repository=Path("."), commit_hash="old_hash"
),
new_state=HgRepositoryUpdate(commit_hash="new_hash"),
pyre_check_pyre_options="--option1",
pyre_check_options="--option2",
pyre_start_pyre_options="--option3",
pyre_start_options="--option4",
pyre_incremental_pyre_options="--option5",
pyre_incremental_options="--option6",
),
)
with self.assertRaises(InvalidSpecificationException):
Specification.from_json({})
with self.assertRaises(InvalidSpecificationException):
Specification.from_json(
{
"old_state": {
"kind": "hg",
"repository": 42,
"commit_hash": "old_hash",
}
}
)
with self.assertRaises(InvalidSpecificationException):
Specification.from_json({"old_state": {"kind": "hg", "repository": "foo"}})
with self.assertRaises(InvalidSpecificationException):
Specification.from_json(
{
"old_state": {
"kind": "hg",
"repository": ".",
"commit_hash": "old_hash",
}
}
)
| SpecificationTest |
python | huggingface__transformers | src/transformers/tokenization_utils_sentencepiece.py | {
"start": 1276,
"end": 12591
} | class ____(PreTrainedTokenizer):
"""
Base class for SentencePiece-based tokenizers that load from sentencepiece.model files.
Inherits from [`~tokenization_utils.PreTrainedTokenizer`].
Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading
pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the
specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(self, **kwargs):
# Ensure optional dependency is available before loading
requires_backends(self, "sentencepiece")
# Extract sentencepiece-specific parameters
self.vocab_file = kwargs.get("vocab_file")
self.legacy = kwargs.get("legacy", True)
self.sp_model_kwargs = kwargs.pop("sp_model_kwargs", {})
# Set backend to "sentencepiece" if not already set
if "backend" not in kwargs:
kwargs["backend"] = "sentencepiece"
# Load the SentencePiece model before calling parent __init__
# This is needed because parent __init__ may call methods that depend on sp_model
tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
tokenizer.Load(self.vocab_file)
if not self.legacy:
model_pb2 = import_protobuf()
proto = model_pb2.ModelProto.FromString(tokenizer.serialized_model_proto())
if proto.normalizer_spec.add_dummy_prefix:
proto.normalizer_spec.add_dummy_prefix = False
tokenizer.LoadFromSerializedProto(proto.SerializeToString())
self.sp_model = tokenizer
# Initialize total_vocab_size before parent __init__ (which may call _add_tokens -> len(self))
self.total_vocab_size = self.sp_model.get_piece_size()
# Add sp_model_kwargs back to kwargs so it gets stored in init_kwargs
kwargs["sp_model_kwargs"] = self.sp_model_kwargs
# Call parent class __init__ (PreTrainedTokenizer)
# This handles tokens_trie, _added_tokens_decoder, _added_tokens_encoder,
# token_type_ids_pattern, special_tokens_pattern, and adds special tokens
super().__init__(**kwargs)
self._update_trie()
@property
def vocab_size(self) -> int:
"""Returns vocab size"""
return self.sp_model.get_piece_size()
def get_vocab(self):
"""Returns vocab as a dict"""
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _add_tokens(self, new_tokens: Union[list[str], list[AddedToken]], special_tokens: bool = False) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
it with indices starting from length of the current vocabulary. Special tokens are sometimes already in the
vocab which is why they have to be handled specifically.
Args:
new_tokens (`list[str]`or `list[tokenizers.AddedToken]`):
Token(s) to add in vocabulary. A token is counted as added if it's not already in the vocabulary
(tested by checking if the tokenizer assign the index of the `unk_token` to them). If a token is part
of the vocabulary then we simply mark this token as an `AddedToken` which allows to control the
stripping and normalization of this token. This is NOT possible in `tokenizers`.
special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the tokens should be added as special tokens.
Returns:
`int`: The number of tokens actually added to the vocabulary.
Examples:
```python
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
model = BertModel.from_pretrained("google-bert/bert-base-uncased")
num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
print("We have added", num_added_toks, "tokens")
# Note: resize_token_embeddings expects to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
```"""
if not new_tokens:
return 0
next_index = len(self) # total size (base + added)
num_added = 0
for token in new_tokens:
if not isinstance(token, (str, AddedToken)):
raise TypeError(f"Token {token} is not a string but a {type(token)}.")
if str(token) == "":
continue
if isinstance(token, str):
if token in self._added_tokens_encoder:
continue
is_special = token in self.all_special_tokens or special_tokens
token = AddedToken(token, rstrip=False, lstrip=False, normalized=not is_special, special=is_special)
elif special_tokens:
# doing token.special=True changes the normalization! will fix in rust
# this is important and the only reason why the AddedTokens in each class are normalized by default
token.__setstate__({"special": True, "normalized": token.normalized})
if token in self._added_tokens_decoder.values():
continue
if not token.special and token.normalized and getattr(self, "do_lower_case", False):
token.content = token.content.lower()
# Check if token already exists in the SentencePiece base vocab
tok_id = self.sp_model.piece_to_id(token.content)
in_base_vocab = (
tok_id < self.sp_model.get_piece_size() and self.sp_model.IdToPiece(tok_id) == token.content
)
if in_base_vocab:
token_index = tok_id
else:
token_index = next_index
next_index += 1
num_added += 1
if token.special and str(token) not in self.all_special_tokens:
self._extra_special_tokens.append(token)
# the setter automatically updates the reverse map
self._added_tokens_decoder[token_index] = token
self._added_tokens_encoder[token.content] = token_index
if self.verbose:
logger.info(f"Adding {token} to the vocabulary")
self._update_trie()
self._update_total_vocab_size()
return num_added
def _update_trie(self, unique_no_split_tokens: Optional[list[str]] = None):
# Add all added tokens
for token in self._added_tokens_decoder.values():
if token.content not in self.tokens_trie._tokens:
self.tokens_trie.add(token.content)
# Also add all special tokens (even if they're in base vocab) so they get split during tokenization
for token in self.all_special_tokens:
if token not in self.tokens_trie._tokens:
self.tokens_trie.add(token)
# Add any additional no-split tokens
for token in unique_no_split_tokens or []:
if token not in self.tokens_trie._tokens:
self.tokens_trie.add(token)
def _tokenize(self, text, **kwargs):
"""
Returns a tokenized string.
We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
`['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
`unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
`self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
"""
if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")):
return self.sp_model.encode(text, out_type=str)
# 1. Encode string + prefix ex: "<unk> Hey"
tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
# 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
unk_token_length = len(self.sp_model.encode(str(self.unk_token)))
return tokens[unk_token_length:] if len(tokens) >= unk_token_length else tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) to an id using the vocab."""
return self.sp_model.piece_to_id(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
token = self.sp_model.IdToPiece(index)
return token
def convert_tokens_to_string(self, tokens: list[str]) -> str:
"""Converts a sequence of tokens (string) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
"""
Save the sentencepiece vocabulary (copy original file) to a directory.
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
filename_prefix (`str`, *optional*):
An optional prefix to add to the named of the saved files.
Returns:
`tuple(str)`: Paths to the files saved.
"""
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
def _decode(
self,
token_ids: Union[int, list[int]],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: Optional[bool] = None,
spaces_between_special_tokens: bool = False,
**kwargs,
) -> str:
"""
Decode token ids to string.
Uses the generic decode path from PreTrainedTokenizer which works for all vocabularies,
including custom vocabularies that override _convert_id_to_token.
"""
# Use parent class's generic decode method - it's simpler and works for all cases
return super()._decode(
token_ids=token_ids,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
| SentencePieceBackend |
python | mitmproxy__pdoc | pdoc/doc.py | {
"start": 39786,
"end": 45221
} | class ____(Doc[None]):
"""
Representation of a variable's documentation. This includes module, class and instance variables.
"""
kind = "variable"
default_value: (
Any | empty
) # technically Any includes empty, but this conveys intent.
"""
The variable's default value.
In some cases, no default value is known. This may either be because a variable is only defined in the constructor,
or it is only declared with a type annotation without assignment (`foo: int`).
To distinguish this case from a default value of `None`, `pdoc.doc_types.empty` is used as a placeholder.
"""
annotation: type | empty
"""
The variable's type annotation.
If there is no type annotation, `pdoc.doc_types.empty` is used as a placeholder.
"""
def __init__(
self,
modulename: str,
qualname: str,
*,
taken_from: tuple[str, str],
docstring: str,
annotation: type | empty = empty,
default_value: Any | empty = empty,
):
"""
Construct a variable doc object.
While classes and functions can introspect themselves to see their docstring,
variables can't do that as we don't have a "variable object" we could query.
As such, docstring, declaration location, type annotation, and the default value
must be passed manually in the constructor.
"""
super().__init__(modulename, qualname, None, taken_from)
# noinspection PyPropertyAccess
self.docstring = inspect.cleandoc(docstring)
self.annotation = annotation
self.default_value = default_value
@cache
@_include_fullname_in_traceback
def __repr__(self):
if self.default_value_str:
default = f" = {self.default_value_str}"
else:
default = ""
return f"<var {self.qualname.rsplit('.')[-1]}{self.annotation_str}{default}{_docstr(self)}>"
@cached_property
def is_classvar(self) -> bool:
"""`True` if the variable is a class variable, `False` otherwise."""
if get_origin(self.annotation) is ClassVar:
return True
else:
return False
@cached_property
def is_typevar(self) -> bool:
"""`True` if the variable is a `typing.TypeVar`, `False` otherwise."""
if isinstance(self.default_value, TypeVar):
return True
else:
return False
@cached_property
def is_type_alias_type(self) -> bool:
"""`True` if the variable is a `typing.TypeAliasType`, `False` otherwise."""
return isinstance(self.default_value, TypeAliasType)
@cached_property
def is_enum_member(self) -> bool:
"""`True` if the variable is an enum member, `False` otherwise."""
if isinstance(self.default_value, enum.Enum):
return True
else:
return False
@cached_property
def default_value_str(self) -> str:
"""The variable's default value as a pretty-printed str."""
if self.default_value is empty:
return ""
if isinstance(self.default_value, TypeAliasType):
formatted = formatannotation(self.default_value.__value__)
return _remove_collections_abc(formatted)
elif self.annotation == TypeAlias:
formatted = formatannotation(self.default_value)
return _remove_collections_abc(formatted)
# This is not perfect, but a solid attempt at preventing accidental leakage of secrets.
# If you have input on how to improve the heuristic, please send a pull request!
value_taken_from_env_var = (
isinstance(self.default_value, str)
and len(self.default_value) >= 8
and self.default_value in _environ_lookup()
)
if value_taken_from_env_var and not os.environ.get("PDOC_DISPLAY_ENV_VARS", ""):
env_var = "$" + _environ_lookup()[self.default_value]
warnings.warn(
f"The default value of {self.fullname} matches the {env_var} environment variable. "
f"To prevent accidental leakage of secrets, the default value is not displayed. "
f"Disable this behavior by setting PDOC_DISPLAY_ENV_VARS=1 as an environment variable.",
RuntimeWarning,
)
return env_var
try:
pretty = repr(self.default_value)
except Exception as e:
warnings.warn(f"repr({self.fullname}) raised an exception ({e!r})")
return ""
pretty = _remove_memory_addresses(pretty)
return pretty
@cached_property
def annotation_str(self) -> str:
"""The variable's type annotation as a pretty-printed str."""
if self.annotation is not empty:
formatted = formatannotation(self.annotation)
# type aliases don't include the module name in their __repr__, so we add it here.
if isinstance(self.annotation, TypeAliasType):
formatted = f"{self.annotation.__module__}.{formatted}"
return f": {_remove_collections_abc(formatted)}"
else:
return ""
@cache
def _environ_lookup():
"""
A reverse lookup of os.environ. This is a cached function so that it is evaluated lazily.
"""
return {value: key for key, value in os.environ.items()}
| Variable |
python | pypa__pipenv | pipenv/patched/pip/_internal/metadata/base.py | {
"start": 2778,
"end": 21545
} | class ____(Protocol):
@classmethod
def from_directory(cls, directory: str) -> "BaseDistribution":
"""Load the distribution from a metadata directory.
:param directory: Path to a metadata directory, e.g. ``.dist-info``.
"""
raise NotImplementedError()
@classmethod
def from_metadata_file_contents(
cls,
metadata_contents: bytes,
filename: str,
project_name: str,
) -> "BaseDistribution":
"""Load the distribution from the contents of a METADATA file.
This is used to implement PEP 658 by generating a "shallow" dist object that can
be used for resolution without downloading or building the actual dist yet.
:param metadata_contents: The contents of a METADATA file.
:param filename: File name for the dist with this metadata.
:param project_name: Name of the project this dist represents.
"""
raise NotImplementedError()
@classmethod
def from_wheel(cls, wheel: "Wheel", name: str) -> "BaseDistribution":
"""Load the distribution from a given wheel.
:param wheel: A concrete wheel definition.
:param name: File name of the wheel.
:raises InvalidWheel: Whenever loading of the wheel causes a
:py:exc:`zipfile.BadZipFile` exception to be thrown.
:raises UnsupportedWheel: If the wheel is a valid zip, but malformed
internally.
"""
raise NotImplementedError()
def __repr__(self) -> str:
return f"{self.raw_name} {self.raw_version} ({self.location})"
def __str__(self) -> str:
return f"{self.raw_name} {self.raw_version}"
@property
def location(self) -> Optional[str]:
"""Where the distribution is loaded from.
A string value is not necessarily a filesystem path, since distributions
can be loaded from other sources, e.g. arbitrary zip archives. ``None``
means the distribution is created in-memory.
Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If
this is a symbolic link, we want to preserve the relative path between
it and files in the distribution.
"""
raise NotImplementedError()
@property
def editable_project_location(self) -> Optional[str]:
"""The project location for editable distributions.
This is the directory where pyproject.toml or setup.py is located.
None if the distribution is not installed in editable mode.
"""
# TODO: this property is relatively costly to compute, memoize it ?
direct_url = self.direct_url
if direct_url:
if direct_url.is_local_editable():
return url_to_path(direct_url.url)
else:
# Search for an .egg-link file by walking sys.path, as it was
# done before by dist_is_editable().
egg_link_path = egg_link_path_from_sys_path(self.raw_name)
if egg_link_path:
# TODO: get project location from second line of egg_link file
# (https://github.com/pypa/pip/issues/10243)
return self.location
return None
@property
def installed_location(self) -> Optional[str]:
"""The distribution's "installed" location.
This should generally be a ``site-packages`` directory. This is
usually ``dist.location``, except for legacy develop-installed packages,
where ``dist.location`` is the source code location, and this is where
the ``.egg-link`` file is.
The returned location is normalized (in particular, with symlinks removed).
"""
raise NotImplementedError()
@property
def info_location(self) -> Optional[str]:
"""Location of the .[egg|dist]-info directory or file.
Similarly to ``location``, a string value is not necessarily a
filesystem path. ``None`` means the distribution is created in-memory.
For a modern .dist-info installation on disk, this should be something
like ``{location}/{raw_name}-{version}.dist-info``.
Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If
this is a symbolic link, we want to preserve the relative path between
it and other files in the distribution.
"""
raise NotImplementedError()
@property
def installed_by_distutils(self) -> bool:
"""Whether this distribution is installed with legacy distutils format.
A distribution installed with "raw" distutils not patched by setuptools
uses one single file at ``info_location`` to store metadata. We need to
treat this specially on uninstallation.
"""
info_location = self.info_location
if not info_location:
return False
return pathlib.Path(info_location).is_file()
@property
def installed_as_egg(self) -> bool:
"""Whether this distribution is installed as an egg.
This usually indicates the distribution was installed by (older versions
of) easy_install.
"""
location = self.location
if not location:
return False
# XXX if the distribution is a zipped egg, location has a trailing /
# so we resort to pathlib.Path to check the suffix in a reliable way.
return pathlib.Path(location).suffix == ".egg"
@property
def installed_with_setuptools_egg_info(self) -> bool:
"""Whether this distribution is installed with the ``.egg-info`` format.
This usually indicates the distribution was installed with setuptools
with an old pip version or with ``single-version-externally-managed``.
Note that this ensure the metadata store is a directory. distutils can
also installs an ``.egg-info``, but as a file, not a directory. This
property is *False* for that case. Also see ``installed_by_distutils``.
"""
info_location = self.info_location
if not info_location:
return False
if not info_location.endswith(".egg-info"):
return False
return pathlib.Path(info_location).is_dir()
@property
def installed_with_dist_info(self) -> bool:
"""Whether this distribution is installed with the "modern format".
This indicates a "modern" installation, e.g. storing metadata in the
``.dist-info`` directory. This applies to installations made by
setuptools (but through pip, not directly), or anything using the
standardized build backend interface (PEP 517).
"""
info_location = self.info_location
if not info_location:
return False
if not info_location.endswith(".dist-info"):
return False
return pathlib.Path(info_location).is_dir()
@property
def canonical_name(self) -> NormalizedName:
raise NotImplementedError()
@property
def version(self) -> Version:
raise NotImplementedError()
@property
def raw_version(self) -> str:
raise NotImplementedError()
@property
def setuptools_filename(self) -> str:
"""Convert a project name to its setuptools-compatible filename.
This is a copy of ``pkg_resources.to_filename()`` for compatibility.
"""
return self.raw_name.replace("-", "_")
@property
def direct_url(self) -> Optional[DirectUrl]:
"""Obtain a DirectUrl from this distribution.
Returns None if the distribution has no `direct_url.json` metadata,
or if `direct_url.json` is invalid.
"""
try:
content = self.read_text(DIRECT_URL_METADATA_NAME)
except FileNotFoundError:
return None
try:
return DirectUrl.from_json(content)
except (
UnicodeDecodeError,
json.JSONDecodeError,
DirectUrlValidationError,
) as e:
logger.warning(
"Error parsing %s for %s: %s",
DIRECT_URL_METADATA_NAME,
self.canonical_name,
e,
)
return None
@property
def installer(self) -> str:
try:
installer_text = self.read_text("INSTALLER")
except (OSError, ValueError, NoneMetadataError):
return "" # Fail silently if the installer file cannot be read.
for line in installer_text.splitlines():
cleaned_line = line.strip()
if cleaned_line:
return cleaned_line
return ""
@property
def requested(self) -> bool:
return self.is_file("REQUESTED")
@property
def editable(self) -> bool:
return bool(self.editable_project_location)
@property
def local(self) -> bool:
"""If distribution is installed in the current virtual environment.
Always True if we're not in a virtualenv.
"""
if self.installed_location is None:
return False
return is_local(self.installed_location)
@property
def in_usersite(self) -> bool:
if self.installed_location is None or user_site is None:
return False
return self.installed_location.startswith(normalize_path(user_site))
@property
def in_site_packages(self) -> bool:
if self.installed_location is None or site_packages is None:
return False
return self.installed_location.startswith(normalize_path(site_packages))
def is_file(self, path: InfoPath) -> bool:
"""Check whether an entry in the info directory is a file."""
raise NotImplementedError()
def iter_distutils_script_names(self) -> Iterator[str]:
"""Find distutils 'scripts' entries metadata.
If 'scripts' is supplied in ``setup.py``, distutils records those in the
installed distribution's ``scripts`` directory, a file for each script.
"""
raise NotImplementedError()
def read_text(self, path: InfoPath) -> str:
"""Read a file in the info directory.
:raise FileNotFoundError: If ``path`` does not exist in the directory.
:raise NoneMetadataError: If ``path`` exists in the info directory, but
cannot be read.
"""
raise NotImplementedError()
def iter_entry_points(self) -> Iterable[BaseEntryPoint]:
raise NotImplementedError()
def _metadata_impl(self) -> email.message.Message:
raise NotImplementedError()
@functools.cached_property
def metadata(self) -> email.message.Message:
"""Metadata of distribution parsed from e.g. METADATA or PKG-INFO.
This should return an empty message if the metadata file is unavailable.
:raises NoneMetadataError: If the metadata file is available, but does
not contain valid metadata.
"""
metadata = self._metadata_impl()
self._add_egg_info_requires(metadata)
return metadata
@property
def metadata_dict(self) -> Dict[str, Any]:
"""PEP 566 compliant JSON-serializable representation of METADATA or PKG-INFO.
This should return an empty dict if the metadata file is unavailable.
:raises NoneMetadataError: If the metadata file is available, but does
not contain valid metadata.
"""
return msg_to_json(self.metadata)
@property
def metadata_version(self) -> Optional[str]:
"""Value of "Metadata-Version:" in distribution metadata, if available."""
return self.metadata.get("Metadata-Version")
@property
def raw_name(self) -> str:
"""Value of "Name:" in distribution metadata."""
# The metadata should NEVER be missing the Name: key, but if it somehow
# does, fall back to the known canonical name.
return self.metadata.get("Name", self.canonical_name)
@property
def requires_python(self) -> SpecifierSet:
"""Value of "Requires-Python:" in distribution metadata.
If the key does not exist or contains an invalid value, an empty
SpecifierSet should be returned.
"""
value = self.metadata.get("Requires-Python")
if value is None:
return SpecifierSet()
try:
# Convert to str to satisfy the type checker; this can be a Header object.
spec = SpecifierSet(str(value))
except InvalidSpecifier as e:
message = "Package %r has an invalid Requires-Python: %s"
logger.warning(message, self.raw_name, e)
return SpecifierSet()
return spec
def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:
"""Dependencies of this distribution.
For modern .dist-info distributions, this is the collection of
"Requires-Dist:" entries in distribution metadata.
"""
raise NotImplementedError()
def iter_raw_dependencies(self) -> Iterable[str]:
"""Raw Requires-Dist metadata."""
return self.metadata.get_all("Requires-Dist", [])
def iter_provided_extras(self) -> Iterable[NormalizedName]:
"""Extras provided by this distribution.
For modern .dist-info distributions, this is the collection of
"Provides-Extra:" entries in distribution metadata.
The return value of this function is expected to be normalised names,
per PEP 685, with the returned value being handled appropriately by
`iter_dependencies`.
"""
raise NotImplementedError()
def _iter_declared_entries_from_record(self) -> Optional[Iterator[str]]:
try:
text = self.read_text("RECORD")
except FileNotFoundError:
return None
# This extra Path-str cast normalizes entries.
return (str(pathlib.Path(row[0])) for row in csv.reader(text.splitlines()))
def _iter_declared_entries_from_legacy(self) -> Optional[Iterator[str]]:
try:
text = self.read_text("installed-files.txt")
except FileNotFoundError:
return None
paths = (p for p in text.splitlines(keepends=False) if p)
root = self.location
info = self.info_location
if root is None or info is None:
return paths
try:
info_rel = pathlib.Path(info).relative_to(root)
except ValueError: # info is not relative to root.
return paths
if not info_rel.parts: # info *is* root.
return paths
return (
_convert_installed_files_path(pathlib.Path(p).parts, info_rel.parts)
for p in paths
)
def iter_declared_entries(self) -> Optional[Iterator[str]]:
"""Iterate through file entries declared in this distribution.
For modern .dist-info distributions, this is the files listed in the
``RECORD`` metadata file. For legacy setuptools distributions, this
comes from ``installed-files.txt``, with entries normalized to be
compatible with the format used by ``RECORD``.
:return: An iterator for listed entries, or None if the distribution
contains neither ``RECORD`` nor ``installed-files.txt``.
"""
return (
self._iter_declared_entries_from_record()
or self._iter_declared_entries_from_legacy()
)
def _iter_requires_txt_entries(self) -> Iterator[RequiresEntry]:
"""Parse a ``requires.txt`` in an egg-info directory.
This is an INI-ish format where an egg-info stores dependencies. A
section name describes extra other environment markers, while each entry
is an arbitrary string (not a key-value pair) representing a dependency
as a requirement string (no markers).
There is a construct in ``importlib.metadata`` called ``Sectioned`` that
does mostly the same, but the format is currently considered private.
"""
try:
content = self.read_text("requires.txt")
except FileNotFoundError:
return
extra = marker = "" # Section-less entries don't have markers.
for line in content.splitlines():
line = line.strip()
if not line or line.startswith("#"): # Comment; ignored.
continue
if line.startswith("[") and line.endswith("]"): # A section header.
extra, _, marker = line.strip("[]").partition(":")
continue
yield RequiresEntry(requirement=line, extra=extra, marker=marker)
def _iter_egg_info_extras(self) -> Iterable[str]:
"""Get extras from the egg-info directory."""
known_extras = {""}
for entry in self._iter_requires_txt_entries():
extra = canonicalize_name(entry.extra)
if extra in known_extras:
continue
known_extras.add(extra)
yield extra
def _iter_egg_info_dependencies(self) -> Iterable[str]:
"""Get distribution dependencies from the egg-info directory.
To ease parsing, this converts a legacy dependency entry into a PEP 508
requirement string. Like ``_iter_requires_txt_entries()``, there is code
in ``importlib.metadata`` that does mostly the same, but not do exactly
what we need.
Namely, ``importlib.metadata`` does not normalize the extra name before
putting it into the requirement string, which causes marker comparison
to fail because the dist-info format do normalize. This is consistent in
all currently available PEP 517 backends, although not standardized.
"""
for entry in self._iter_requires_txt_entries():
extra = canonicalize_name(entry.extra)
if extra and entry.marker:
marker = f'({entry.marker}) and extra == "{extra}"'
elif extra:
marker = f'extra == "{extra}"'
elif entry.marker:
marker = entry.marker
else:
marker = ""
if marker:
yield f"{entry.requirement} ; {marker}"
else:
yield entry.requirement
def _add_egg_info_requires(self, metadata: email.message.Message) -> None:
"""Add egg-info requires.txt information to the metadata."""
if not metadata.get_all("Requires-Dist"):
for dep in self._iter_egg_info_dependencies():
metadata["Requires-Dist"] = dep
if not metadata.get_all("Provides-Extra"):
for extra in self._iter_egg_info_extras():
metadata["Provides-Extra"] = extra
| BaseDistribution |
python | pypa__pip | src/pip/_internal/exceptions.py | {
"start": 21709,
"end": 24604
} | class ____(DiagnosticPipError):
"""The current environment is externally managed.
This is raised when the current environment is externally managed, as
defined by `PEP 668`_. The ``EXTERNALLY-MANAGED`` configuration is checked
and displayed when the error is bubbled up to the user.
:param error: The error message read from ``EXTERNALLY-MANAGED``.
"""
reference = "externally-managed-environment"
def __init__(self, error: str | None) -> None:
if error is None:
context = Text(_DEFAULT_EXTERNALLY_MANAGED_ERROR)
else:
context = Text(error)
super().__init__(
message="This environment is externally managed",
context=context,
note_stmt=(
"If you believe this is a mistake, please contact your "
"Python installation or OS distribution provider. "
"You can override this, at the risk of breaking your Python "
"installation or OS, by passing --break-system-packages."
),
hint_stmt=Text("See PEP 668 for the detailed specification."),
)
@staticmethod
def _iter_externally_managed_error_keys() -> Iterator[str]:
# LC_MESSAGES is in POSIX, but not the C standard. The most common
# platform that does not implement this category is Windows, where
# using other categories for console message localization is equally
# unreliable, so we fall back to the locale-less vendor message. This
# can always be re-evaluated when a vendor proposes a new alternative.
try:
category = locale.LC_MESSAGES
except AttributeError:
lang: str | None = None
else:
lang, _ = locale.getlocale(category)
if lang is not None:
yield f"Error-{lang}"
for sep in ("-", "_"):
before, found, _ = lang.partition(sep)
if not found:
continue
yield f"Error-{before}"
yield "Error"
@classmethod
def from_config(
cls,
config: pathlib.Path | str,
) -> ExternallyManagedEnvironment:
parser = configparser.ConfigParser(interpolation=None)
try:
parser.read(config, encoding="utf-8")
section = parser["externally-managed"]
for key in cls._iter_externally_managed_error_keys():
with contextlib.suppress(KeyError):
return cls(section[key])
except KeyError:
pass
except (OSError, UnicodeDecodeError, configparser.ParsingError):
from pip._internal.utils._log import VERBOSE
exc_info = logger.isEnabledFor(VERBOSE)
logger.warning("Failed to read %s", config, exc_info=exc_info)
return cls(None)
| ExternallyManagedEnvironment |
python | openai__openai-python | src/openai/lib/streaming/chat/_events.py | {
"start": 1026,
"end": 1354
} | class ____(BaseModel):
type: Literal["tool_calls.function.arguments.delta"]
name: str
index: int
arguments: str
"""Accumulated raw JSON string"""
parsed_arguments: object
"""The parsed arguments so far"""
arguments_delta: str
"""The JSON string delta"""
| FunctionToolCallArgumentsDeltaEvent |
python | walkccc__LeetCode | solutions/3203. Find Minimum Diameter After Merging Two Trees/3203.py | {
"start": 0,
"end": 1314
} | class ____:
def minimumDiameterAfterMerge(
self,
edges1: list[list[int]],
edges2: list[list[int]],
) -> int:
diameter1 = self._getDiameter(edges1)
diameter2 = self._getDiameter(edges2)
combinedDiameter = (diameter1 + 1) // 2 + (diameter2 + 1) // 2 + 1
return max(diameter1, diameter2, combinedDiameter)
def _getDiameter(self, edges: list[list[int]]) -> int:
n = len(edges) + 1
graph = [[] for _ in range(n)]
for u, v in edges:
graph[u].append(v)
graph[v].append(u)
maxDiameter = [0]
self._maxDepth(graph, 0, -1, maxDiameter)
return maxDiameter[0]
# Similar to 1522. Diameter of N-Ary Tree
def _maxDepth(
self,
graph: list[list[int]],
u: int,
prev: int,
maxDiameter: list[int],
) -> int:
"""Returns the maximum depth of the subtree rooted at u."""
maxSubDepth1 = 0
maxSubDepth2 = 0
for v in graph[u]:
if v == prev:
continue
maxSubDepth = self._maxDepth(graph, v, u, maxDiameter)
if maxSubDepth > maxSubDepth1:
maxSubDepth2 = maxSubDepth1
maxSubDepth1 = maxSubDepth
elif maxSubDepth > maxSubDepth2:
maxSubDepth2 = maxSubDepth
maxDiameter[0] = max(maxDiameter[0], maxSubDepth1 + maxSubDepth2)
return 1 + maxSubDepth1
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol3.py | {
"start": 4805,
"end": 4846
} | class ____:
prop1: int = 0
| Concrete15_2 |
python | pallets__quart | src/quart/typing.py | {
"start": 4102,
"end": 4315
} | class ____(Protocol):
def __init__(self, app: Quart, scope: HTTPScope) -> None: ...
async def __call__(
self, receive: ASGIReceiveCallable, send: ASGISendCallable
) -> None: ...
| ASGIHTTPProtocol |
python | walkccc__LeetCode | solutions/452. Minimum Number of Arrows to Burst Balloons/452.py | {
"start": 0,
"end": 255
} | class ____:
def findMinArrowShots(self, points: list[list[int]]) -> int:
ans = 0
arrowX = -math.inf
for point in sorted(points, key=lambda x: x[1]):
if point[0] > arrowX:
ans += 1
arrowX = point[1]
return ans
| Solution |
python | dask__distributed | distributed/dashboard/components/worker.py | {
"start": 1856,
"end": 2906
} | class ____(DashboardComponent):
"""Currently running tasks"""
def __init__(self, worker):
self.worker = worker
names = ["Stored", "Executing", "Ready", "Waiting", "Connections", "Serving"]
self.source = ColumnDataSource({name: [] for name in names})
columns = {name: TableColumn(field=name, title=name) for name in names}
table = DataTable(
source=self.source,
columns=[columns[n] for n in names],
height=70,
**_DATATABLE_STYLESHEETS_KWARGS,
)
self.root = table
@without_property_validation
@log_errors
def update(self):
w = self.worker
d = {
"Stored": [len(w.data)],
"Executing": ["%d / %d" % (w.state.executing_count, w.state.nthreads)],
"Ready": [len(w.state.ready)],
"Waiting": [len(w.state.waiting)],
"Connections": [w.state.transfer_incoming_count],
"Serving": [len(w._comms)],
}
update(self.source, d)
| StateTable |
python | great-expectations__great_expectations | great_expectations/compatibility/postgresql.py | {
"start": 2291,
"end": 2590
} | class ____:
"""Namespace for PostgreSQL dialect types."""
TEXT = TEXT
CHAR = CHAR
INTEGER = INTEGER
SMALLINT = SMALLINT
BIGINT = BIGINT
TIMESTAMP = TIMESTAMP
DATE = DATE
DOUBLE_PRECISION = DOUBLE_PRECISION
BOOLEAN = BOOLEAN
NUMERIC = NUMERIC
| POSTGRESQL_TYPES |
python | getsentry__sentry | tests/sentry/integrations/github/test_client.py | {
"start": 38098,
"end": 51926
} | class ____(GitHubClientFileBlameBase):
"""
Tests that get_blame_for_files builds the correct GraphQL query
"""
def setUp(self) -> None:
super().setUp()
@mock.patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_get_blame_for_files_same_repo(self, get_jwt) -> None:
"""
When all files are in the same repo, only one repository object should be
queried and files blames within the repo should be deduped
"""
file1 = SourceLineInfo(
path="src/sentry/integrations/github/client_1.py",
lineno=10,
ref="master",
repo=self.repo_1,
code_mapping=None, # type: ignore[arg-type]
)
file2 = SourceLineInfo(
path="src/sentry/integrations/github/client_1.py",
lineno=15,
ref="master",
repo=self.repo_1,
code_mapping=None, # type: ignore[arg-type]
)
file3 = SourceLineInfo(
path="src/sentry/integrations/github/client_2.py",
lineno=20,
ref="master",
repo=self.repo_1,
code_mapping=None, # type: ignore[arg-type]
)
query = """query ($repo_name_0: String!, $repo_owner_0: String!, $ref_0_0: String!, $path_0_0_0: String!, $path_0_0_1: String!) {
repository0: repository(name: $repo_name_0, owner: $repo_owner_0) {
ref0: ref(qualifiedName: $ref_0_0) {
target {
... on Commit {
blame0: blame(path: $path_0_0_0) {
ranges {
commit {
oid
author {
name
email
}
message
committedDate
}
startingLine
endingLine
age
}
}
blame1: blame(path: $path_0_0_1) {
ranges {
commit {
oid
author {
name
email
}
message
committedDate
}
startingLine
endingLine
age
}
}
}
}
}
}
}"""
responses.add(
method=responses.POST,
url="https://api.github.com/graphql",
json={
"query": query,
"data": {},
},
content_type="application/json",
)
self.github_client.get_blame_for_files([file1, file2, file3], extra={})
assert orjson.loads(responses.calls[1].request.body)["query"] == query
assert orjson.loads(responses.calls[1].request.body)["variables"] == {
"repo_name_0": "foo",
"repo_owner_0": "Test-Organization",
"ref_0_0": "master",
"path_0_0_0": "src/sentry/integrations/github/client_1.py",
"path_0_0_1": "src/sentry/integrations/github/client_2.py",
}
@mock.patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_get_blame_for_files_different_repos(self, get_jwt) -> None:
"""
When files are in different repos, multiple repository objects should be
queried. Files within the same repo and branch should be deduped.
"""
file1 = SourceLineInfo(
path="src/sentry/integrations/github/client_1.py",
lineno=10,
ref="master",
repo=self.repo_1,
code_mapping=None, # type: ignore[arg-type]
)
file2 = SourceLineInfo(
path="src/sentry/integrations/github/client_2.py",
lineno=15,
ref="master",
repo=self.repo_1,
code_mapping=None, # type: ignore[arg-type]
)
file3 = SourceLineInfo(
path="src/getsentry/file.py",
lineno=20,
ref="master",
repo=self.repo_2,
code_mapping=None, # type: ignore[arg-type]
)
query = """query ($repo_name_0: String!, $repo_owner_0: String!, $ref_0_0: String!, $path_0_0_0: String!, $path_0_0_1: String!, $repo_name_1: String!, $repo_owner_1: String!, $ref_1_0: String!, $path_1_0_0: String!) {
repository0: repository(name: $repo_name_0, owner: $repo_owner_0) {
ref0: ref(qualifiedName: $ref_0_0) {
target {
... on Commit {
blame0: blame(path: $path_0_0_0) {
ranges {
commit {
oid
author {
name
email
}
message
committedDate
}
startingLine
endingLine
age
}
}
blame1: blame(path: $path_0_0_1) {
ranges {
commit {
oid
author {
name
email
}
message
committedDate
}
startingLine
endingLine
age
}
}
}
}
}
}
repository1: repository(name: $repo_name_1, owner: $repo_owner_1) {
ref0: ref(qualifiedName: $ref_1_0) {
target {
... on Commit {
blame0: blame(path: $path_1_0_0) {
ranges {
commit {
oid
author {
name
email
}
message
committedDate
}
startingLine
endingLine
age
}
}
}
}
}
}
}"""
responses.add(
method=responses.POST,
url="https://api.github.com/graphql",
json={
"query": query,
"data": {},
},
content_type="application/json",
)
self.github_client.get_blame_for_files([file1, file2, file3], extra={})
assert orjson.loads(responses.calls[1].request.body)["query"] == query
assert orjson.loads(responses.calls[1].request.body)["variables"] == {
"repo_name_0": "foo",
"repo_owner_0": "Test-Organization",
"ref_0_0": "master",
"path_0_0_0": "src/sentry/integrations/github/client_1.py",
"path_0_0_1": "src/sentry/integrations/github/client_2.py",
"repo_name_1": "bar",
"repo_owner_1": "Test-Organization",
"ref_1_0": "master",
"path_1_0_0": "src/getsentry/file.py",
}
@mock.patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_get_blame_for_files_different_refs(self, get_jwt) -> None:
"""
When files are in the same repo but different branches, query multiple
ref objects. Files should still be deduped.
"""
file1 = SourceLineInfo(
path="src/sentry/integrations/github/client.py",
lineno=10,
ref="master",
repo=self.repo_1,
code_mapping=None, # type: ignore[arg-type]
)
file2 = SourceLineInfo(
path="src/sentry/integrations/github/client.py",
lineno=15,
ref="master",
repo=self.repo_1,
code_mapping=None, # type: ignore[arg-type]
)
file3 = SourceLineInfo(
path="src/sentry/integrations/github/client.py",
lineno=20,
ref="staging",
repo=self.repo_1,
code_mapping=None, # type: ignore[arg-type]
)
query = """query ($repo_name_0: String!, $repo_owner_0: String!, $ref_0_0: String!, $path_0_0_0: String!, $ref_0_1: String!, $path_0_1_0: String!) {
repository0: repository(name: $repo_name_0, owner: $repo_owner_0) {
ref0: ref(qualifiedName: $ref_0_0) {
target {
... on Commit {
blame0: blame(path: $path_0_0_0) {
ranges {
commit {
oid
author {
name
email
}
message
committedDate
}
startingLine
endingLine
age
}
}
}
}
}
ref1: ref(qualifiedName: $ref_0_1) {
target {
... on Commit {
blame0: blame(path: $path_0_1_0) {
ranges {
commit {
oid
author {
name
email
}
message
committedDate
}
startingLine
endingLine
age
}
}
}
}
}
}
}"""
responses.add(
method=responses.POST,
url="https://api.github.com/graphql",
json={
"query": query,
"data": {},
},
content_type="application/json",
)
self.github_client.get_blame_for_files([file1, file2, file3], extra={})
assert orjson.loads(responses.calls[1].request.body)["query"] == query
assert orjson.loads(responses.calls[1].request.body)["variables"] == {
"repo_name_0": "foo",
"repo_owner_0": "Test-Organization",
"ref_0_0": "master",
"path_0_0_0": "src/sentry/integrations/github/client.py",
"ref_0_1": "staging",
"path_0_1_0": "src/sentry/integrations/github/client.py",
}
@mock.patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_trim_file_path_for_query(self, get_jwt) -> None:
"""
When file path has hanging forward slashes, trims them for the request.
The GitHub GraphQL API will return empty responses otherwise.
"""
file1 = SourceLineInfo(
path="/src/sentry/integrations/github/client.py/",
lineno=10,
ref="master",
repo=self.repo_1,
code_mapping=None, # type: ignore[arg-type]
)
query = """query ($repo_name_0: String!, $repo_owner_0: String!, $ref_0_0: String!, $path_0_0_0: String!) {
repository0: repository(name: $repo_name_0, owner: $repo_owner_0) {
ref0: ref(qualifiedName: $ref_0_0) {
target {
... on Commit {
blame0: blame(path: $path_0_0_0) {
ranges {
commit {
oid
author {
name
email
}
message
committedDate
}
startingLine
endingLine
age
}
}
}
}
}
}
}"""
responses.add(
method=responses.POST,
url="https://api.github.com/graphql",
json={
"query": query,
"data": {},
},
content_type="application/json",
)
self.github_client.get_blame_for_files([file1], extra={})
assert orjson.loads(responses.calls[1].request.body)["query"] == query
assert orjson.loads(responses.calls[1].request.body)["variables"] == {
"repo_name_0": "foo",
"repo_owner_0": "Test-Organization",
"ref_0_0": "master",
"path_0_0_0": "src/sentry/integrations/github/client.py",
}
| GitHubClientFileBlameQueryBuilderTest |
python | tensorflow__tensorflow | tensorflow/python/eager/device_placement_test.py | {
"start": 8217,
"end": 10210
} | class ____(test.TestCase):
def setUp(self):
super(ClusterPlacementTest, self).setUp()
context._reset_context()
config.set_soft_device_placement(enabled=True)
context.context().log_device_placement = True
workers, _ = test_util.create_local_cluster(2, 0)
remote.connect_to_remote_host([workers[0].target, workers[1].target])
@test_util.disable_tfrt('remote host not supported yet.')
def testNotFullySpecifiedTask(self):
a = constant_op.constant(1)
b = constant_op.constant(2)
with ops.device('/job:worker'):
c = a + b
self.assertIn('/job:worker/replica:0/task:0', c.device)
@test_util.disable_tfrt('remote host not supported yet.')
def testRemoteUnknownDevice(self):
a = constant_op.constant(1)
b = constant_op.constant(2)
# Right now we don't support soft device place on remote worker.
with self.assertRaises(errors.InvalidArgumentError) as cm:
with ops.device('/job:worker/replica:0/task:0/device:GPU:42'):
c = a + b
del c
self.assertIn('unknown device', cm.exception.message)
@test_util.disable_tfrt('remote host not supported yet.')
def testUnknownDeviceInFunctionReturnUnknowDevice(self):
@def_function.function
def f():
with ops.device('GPU:42'):
return constant_op.constant(1) + constant_op.constant(2)
gpus = config.list_physical_devices('GPU')
if not gpus:
self.assertIn('CPU:0', f().device)
else:
self.assertIn('GPU:0', f().device)
@test_util.disable_tfrt('remote host not supported yet.')
def testUnknownDeviceInFunction(self):
@def_function.function
def f():
with ops.device('GPU:42'):
a = constant_op.constant(1) + constant_op.constant(2)
return a + constant_op.constant(2)
gpus = config.list_physical_devices('GPU')
if not gpus:
self.assertIn('CPU:0', f().device)
else:
self.assertIn('GPU:0', f().device)
if __name__ == '__main__':
test.main()
| ClusterPlacementTest |
python | fastai__fastai | fastai/collab.py | {
"start": 1758,
"end": 4258
} | class ____(Module):
"Base dot model for collaborative filtering."
def __init__(self, n_factors, n_users, n_items, y_range=None):
self.y_range = y_range
(self.u_weight, self.i_weight, self.u_bias, self.i_bias) = [Embedding(*o) for o in [
(n_users, n_factors), (n_items, n_factors), (n_users,1), (n_items,1)
]]
def forward(self, x):
users,items = x[:,0],x[:,1]
dot = self.u_weight(users)* self.i_weight(items)
res = dot.sum(1) + self.u_bias(users).squeeze() + self.i_bias(items).squeeze()
if self.y_range is None: return res
return torch.sigmoid(res) * (self.y_range[1]-self.y_range[0]) + self.y_range[0]
@classmethod
def from_classes(cls, n_factors, classes, user=None, item=None, y_range=None):
"Build a model with `n_factors` by inferring `n_users` and `n_items` from `classes`"
if user is None: user = list(classes.keys())[0]
if item is None: item = list(classes.keys())[1]
res = cls(n_factors, len(classes[user]), len(classes[item]), y_range=y_range)
res.classes,res.user,res.item = classes,user,item
return res
def _get_idx(self, arr, is_item=True):
"Fetch item or user (based on `is_item`) for all in `arr`"
assert hasattr(self, 'classes'), "Build your model with `EmbeddingDotBias.from_classes` to use this functionality."
classes = self.classes[self.item] if is_item else self.classes[self.user]
c2i = {v:k for k,v in enumerate(classes)}
try: return tensor([c2i[o] for o in arr])
except KeyError as e:
message = f"You're trying to access {'an item' if is_item else 'a user'} that isn't in the training data. If it was in your original data, it may have been split such that it's only in the validation set now."
raise modify_exception(e, message, replace=True)
def bias(self, arr, is_item=True):
"Bias for item or user (based on `is_item`) for all in `arr`"
idx = self._get_idx(arr, is_item)
layer = (self.i_bias if is_item else self.u_bias).eval().cpu()
return to_detach(layer(idx).squeeze(),gather=False)
def weight(self, arr, is_item=True):
"Weight for item or user (based on `is_item`) for all in `arr`"
idx = self._get_idx(arr, is_item)
layer = (self.i_weight if is_item else self.u_weight).eval().cpu()
return to_detach(layer(idx),gather=False)
# %% ../nbs/45_collab.ipynb 34
| EmbeddingDotBias |
python | ethereum__web3.py | ens/exceptions.py | {
"start": 2344,
"end": 2442
} | class ____(ENSException):
"""
Raised if there is a validation error
"""
| ENSValidationError |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/library/Filters.py | {
"start": 6659,
"end": 6999
} | class ____(CtrlNode):
"""Removes linear trend from the data"""
nodeName = 'DetrendFilter'
def processData(self, data):
try:
from scipy.signal import detrend
except ImportError:
raise Exception("DetrendFilter node requires the package scipy.signal.")
return detrend(data)
| Detrend |
python | pydata__xarray | doc/examples/_code/accessor_example.py | {
"start": 59,
"end": 703
} | class ____:
def __init__(self, xarray_obj):
self._obj = xarray_obj
self._center = None
@property
def center(self):
"""Return the geographic center point of this dataset."""
if self._center is None:
# we can use a cache on our accessor objects, because accessors
# themselves are cached on instances that access them.
lon = self._obj.latitude
lat = self._obj.longitude
self._center = (float(lon.mean()), float(lat.mean()))
return self._center
def plot(self):
"""Plot data on a map."""
return "plotting!"
| GeoAccessor |
python | squidfunk__mkdocs-material | material/plugins/blog/structure/__init__.py | {
"start": 6580,
"end": 10158
} | class ____(Page):
# Initialize an excerpt for the given post - we create the Markdown parser
# when intitializing the excerpt in order to improve rendering performance
# for excerpts, as they are reused across several different views, because
# posts might be referenced from multiple different locations
def __init__(self, post: Post, config: MkDocsConfig, files: Files):
self.file = copy(post.file)
self.post = post
# Set canonical URL, or we can't print excerpts when debugging the
# blog plugin, as the `abs_url` property would be missing
self._set_canonical_url(config.site_url)
# Initialize configuration and metadata
self.config = post.config
self.meta = post.meta
# Initialize authors and categories - note that views usually contain
# subsets of those lists, which is why we need to manage them here
self.authors: list[Author] = []
self.categories: list[Category] = []
# Initialize content after separator - allow template authors to render
# posts inline or to provide a link to the post's page
self.more = None
# Initialize parser - note that we need to patch the configuration,
# more specifically the table of contents extension
config = _patch(config)
self.md = Markdown(
extensions = config.markdown_extensions,
extension_configs = config.mdx_configs,
)
# Register excerpt tree processor - this processor resolves anchors to
# posts from within views, so they point to the correct location
self.md.treeprocessors.register(
ExcerptTreeprocessor(post),
"excerpt",
0
)
# Register relative path tree processor - this processor resolves links
# to other pages and assets, and is used by MkDocs itself
self.md.treeprocessors.register(
_RelativePathTreeprocessor(self.file, files, config),
"relpath",
1
)
# Render an excerpt of the post on the given page - note that this is not
# thread-safe because excerpts are shared across views, as it cuts down on
# the cost of initialization. However, if in the future, we decide to render
# posts and views concurrently, we must change this behavior.
def render(self, page: Page, separator: str):
self.file.url = page.url
# Retrieve excerpt tree processor and set page as base
at = self.md.treeprocessors.get_index_for_name("excerpt")
processor: ExcerptTreeprocessor = self.md.treeprocessors[at]
processor.base = page
# Ensure that the excerpt includes a title in its content, since the
# title is linked to the post when rendering - see https://t.ly/5Gg2F
self.markdown = self.post.markdown
if not self.post._title_from_render:
self.markdown = "\n\n".join([f"# {self.post.title}", self.markdown])
# Convert Markdown to HTML and extract excerpt
self.content = self.md.convert(self.markdown)
self.content, *more = self.content.split(separator, 1)
if more:
self.more = more[0]
# Extract table of contents and reset post URL - if we wouldn't reset
# the excerpt URL, linking to the excerpt from the view would not work
self.toc = get_toc(getattr(self.md, "toc_tokens", []))
self.file.url = self.post.url
# -----------------------------------------------------------------------------
# View
| Excerpt |
python | joke2k__faker | tests/providers/test_bank.py | {
"start": 16391,
"end": 16595
} | class ____:
"""Test zh_CN bank provider"""
def test_bank(self, faker, num_samples):
for _ in range(num_samples):
assert re.match(r"[\u4e00-\u9fa5]{2,20}", faker.bank())
| TestZhCn |
python | walkccc__LeetCode | solutions/2130. Maximum Twin Sum of a Linked List/2130.py | {
"start": 0,
"end": 651
} | class ____:
def pairSum(self, head: ListNode | None) -> int:
def reverseList(head: ListNode) -> ListNode:
prev = None
while head:
next = head.next
head.next = prev
prev = head
head = next
return prev
ans = 0
slow = head
fast = head
# `slow` points to the start of the second half.
while fast and fast.next:
slow = slow.next
fast = fast.next.next
# `tail` points to the end of the reversed second half.
tail = reverseList(slow)
while tail:
ans = max(ans, head.val + tail.val)
head = head.next
tail = tail.next
return ans
| Solution |
python | pydata__xarray | xarray/core/extension_array.py | {
"start": 2635,
"end": 7269
} | class ____(NDArrayMixin, Generic[T_ExtensionArray]):
"""NEP-18 compliant wrapper for pandas extension arrays.
Parameters
----------
array : T_ExtensionArray
The array to be wrapped upon e.g,. :py:class:`xarray.Variable` creation.
```
"""
array: T_ExtensionArray
def __post_init__(self):
if not isinstance(self.array, pd.api.extensions.ExtensionArray):
raise TypeError(f"{self.array} is not an pandas ExtensionArray.")
# This does not use the UNSUPPORTED_EXTENSION_ARRAY_TYPES whitelist because
# we do support extension arrays from datetime, for example, that need
# duck array support internally via this class. These can appear from `DatetimeIndex`
# wrapped by `PandasIndex` internally, for example.
if not is_allowed_extension_array(self.array):
raise TypeError(
f"{self.array.dtype!r} should be converted to a numpy array in `xarray` internally."
)
def __array_function__(self, func, types, args, kwargs):
def replace_duck_with_extension_array(args) -> list:
args_as_list = list(args)
for index, value in enumerate(args_as_list):
if isinstance(value, PandasExtensionArray):
args_as_list[index] = value.array
elif isinstance(
value, tuple
): # should handle more than just tuple? iterable?
args_as_list[index] = tuple(
replace_duck_with_extension_array(value)
)
elif isinstance(value, list):
args_as_list[index] = replace_duck_with_extension_array(value)
return args_as_list
args = tuple(replace_duck_with_extension_array(args))
if func not in HANDLED_EXTENSION_ARRAY_FUNCTIONS:
raise KeyError("Function not registered for pandas extension arrays.")
res = HANDLED_EXTENSION_ARRAY_FUNCTIONS[func](*args, **kwargs)
if is_allowed_extension_array(res):
return PandasExtensionArray(res)
return res
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return ufunc(*inputs, **kwargs)
def __getitem__(self, key) -> PandasExtensionArray[T_ExtensionArray]:
item = self.array[key]
if is_allowed_extension_array(item):
return PandasExtensionArray(item)
if np.isscalar(item) or isinstance(key, int):
return PandasExtensionArray(type(self.array)._from_sequence([item])) # type: ignore[call-arg,attr-defined,unused-ignore]
return PandasExtensionArray(item)
def __setitem__(self, key, val):
self.array[key] = val
def __eq__(self, other):
if isinstance(other, PandasExtensionArray):
return self.array == other.array
return self.array == other
def __ne__(self, other):
return ~(self == other)
def __len__(self):
return len(self.array)
@property
def ndim(self) -> int:
return 1
def __array__(
self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None
) -> np.ndarray:
if Version(np.__version__) >= Version("2.0.0"):
return np.asarray(self.array, dtype=dtype, copy=copy)
else:
return np.asarray(self.array, dtype=dtype)
def __getattr__(self, attr: str) -> Any:
# with __deepcopy__ or __copy__, the object is first constructed and then the sub-objects are attached (see https://docs.python.org/3/library/copy.html)
# Thus, if we didn't have `super().__getattribute__("array")` this method would call `self.array` (i.e., `getattr(self, "array")`) again while looking for `__setstate__`
# (which is apparently the first thing sought in copy.copy from the under-construction copied object),
# which would cause a recursion error since `array` is not present on the object when it is being constructed during `__{deep}copy__`.
# Even though we have defined these two methods now below due to `test_extension_array_copy_arrow_type` (cause unknown)
# we leave this here as it more robust than self.array
return getattr(super().__getattribute__("array"), attr)
def __copy__(self) -> PandasExtensionArray[T_ExtensionArray]:
return PandasExtensionArray(copy.copy(self.array))
def __deepcopy__(
self, memo: dict[int, Any] | None = None
) -> PandasExtensionArray[T_ExtensionArray]:
return PandasExtensionArray(copy.deepcopy(self.array, memo=memo))
| PandasExtensionArray |
python | pytest-dev__pytest | testing/test_terminal.py | {
"start": 22175,
"end": 25086
} | class ____:
def test_setup_fixture_error(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
def setup_function(function):
print("setup func")
assert 0
def test_nada():
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at setup of test_nada*",
"*setup_function(function):*",
"*setup func*",
"*assert 0*",
"*1 error*",
]
)
assert result.ret != 0
def test_teardown_fixture_error(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_nada():
pass
def teardown_function(function):
print("teardown func")
assert 0
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at teardown*",
"*teardown_function(function):*",
"*assert 0*",
"*Captured stdout*",
"*teardown func*",
"*1 passed*1 error*",
]
)
def test_teardown_fixture_error_and_test_failure(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_fail():
assert 0, "failingfunc"
def teardown_function(function):
print("teardown func")
assert False
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*ERROR at teardown of test_fail*",
"*teardown_function(function):*",
"*assert False*",
"*Captured stdout*",
"*teardown func*",
"*test_fail*",
"*def test_fail():",
"*failingfunc*",
"*1 failed*1 error*",
]
)
def test_setup_teardown_output_and_test_failure(self, pytester: Pytester) -> None:
"""Test for issue #442."""
pytester.makepyfile(
"""
def setup_function(function):
print("setup func")
def test_fail():
assert 0, "failingfunc"
def teardown_function(function):
print("teardown func")
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*test_fail*",
"*def test_fail():",
"*failingfunc*",
"*Captured stdout setup*",
"*setup func*",
"*Captured stdout teardown*",
"*teardown func*",
"*1 failed*",
]
)
| TestFixtureReporting |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/streaming/_beta_messages.py | {
"start": 10321,
"end": 20395
} | class ____(Generic[ResponseFormatT]):
"""Wrapper over BetaAsyncMessageStream that is returned by `.stream()`
so that an async context manager can be used without `await`ing the
original client call.
```py
async with client.beta.messages.stream(...) as stream:
async for chunk in stream:
...
```
"""
def __init__(
self,
api_request: Awaitable[AsyncStream[BetaRawMessageStreamEvent]],
*,
output_format: ResponseFormatT | NotGiven = NOT_GIVEN,
) -> None:
self.__stream: BetaAsyncMessageStream[ResponseFormatT] | None = None
self.__api_request = api_request
self.__output_format = output_format
async def __aenter__(self) -> BetaAsyncMessageStream[ResponseFormatT]:
raw_stream = await self.__api_request
self.__stream = BetaAsyncMessageStream(raw_stream, output_format=self.__output_format)
return self.__stream
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
if self.__stream is not None:
await self.__stream.close()
def build_events(
*,
event: BetaRawMessageStreamEvent,
message_snapshot: ParsedBetaMessage[ResponseFormatT],
) -> list[ParsedBetaMessageStreamEvent[ResponseFormatT]]:
events_to_fire: list[ParsedBetaMessageStreamEvent[ResponseFormatT]] = []
if event.type == "message_start":
events_to_fire.append(event)
elif event.type == "message_delta":
events_to_fire.append(event)
elif event.type == "message_stop":
events_to_fire.append(
build(ParsedBetaMessageStopEvent[ResponseFormatT], type="message_stop", message=message_snapshot)
)
elif event.type == "content_block_start":
events_to_fire.append(event)
elif event.type == "content_block_delta":
events_to_fire.append(event)
content_block = message_snapshot.content[event.index]
if event.delta.type == "text_delta":
if content_block.type == "text":
events_to_fire.append(
build(
ParsedBetaTextEvent,
type="text",
text=event.delta.text,
snapshot=content_block.text,
)
)
elif event.delta.type == "input_json_delta":
if content_block.type == "tool_use" or content_block.type == "mcp_tool_use":
events_to_fire.append(
build(
BetaInputJsonEvent,
type="input_json",
partial_json=event.delta.partial_json,
snapshot=content_block.input,
)
)
elif event.delta.type == "citations_delta":
if content_block.type == "text":
events_to_fire.append(
build(
BetaCitationEvent,
type="citation",
citation=event.delta.citation,
snapshot=content_block.citations or [],
)
)
elif event.delta.type == "thinking_delta":
if content_block.type == "thinking":
events_to_fire.append(
build(
BetaThinkingEvent,
type="thinking",
thinking=event.delta.thinking,
snapshot=content_block.thinking,
)
)
elif event.delta.type == "signature_delta":
if content_block.type == "thinking":
events_to_fire.append(
build(
BetaSignatureEvent,
type="signature",
signature=content_block.signature,
)
)
pass
else:
# we only want exhaustive checking for linters, not at runtime
if TYPE_CHECKING: # type: ignore[unreachable]
assert_never(event.delta)
elif event.type == "content_block_stop":
content_block = message_snapshot.content[event.index]
event_to_fire = build(
ParsedBetaContentBlockStopEvent,
type="content_block_stop",
index=event.index,
content_block=content_block,
)
events_to_fire.append(event_to_fire)
else:
# we only want exhaustive checking for linters, not at runtime
if TYPE_CHECKING: # type: ignore[unreachable]
assert_never(event)
return events_to_fire
JSON_BUF_PROPERTY = "__json_buf"
TRACKS_TOOL_INPUT = (
BetaToolUseBlock,
BetaServerToolUseBlock,
BetaMCPToolUseBlock,
)
def accumulate_event(
*,
event: BetaRawMessageStreamEvent,
current_snapshot: ParsedBetaMessage[ResponseFormatT] | None,
request_headers: httpx.Headers,
output_format: ResponseFormatT | NotGiven = NOT_GIVEN,
) -> ParsedBetaMessage[ResponseFormatT]:
if not isinstance(cast(Any, event), BaseModel):
event = cast( # pyright: ignore[reportUnnecessaryCast]
BetaRawMessageStreamEvent,
construct_type_unchecked(
type_=cast(Type[BetaRawMessageStreamEvent], BetaRawMessageStreamEvent),
value=event,
),
)
if not isinstance(cast(Any, event), BaseModel):
raise TypeError(
f"Unexpected event runtime type, after deserialising twice - {event} - {builtins.type(event)}"
)
if current_snapshot is None:
if event.type == "message_start":
return cast(
ParsedBetaMessage[ResponseFormatT], ParsedBetaMessage.construct(**cast(Any, event.message.to_dict()))
)
raise RuntimeError(f'Unexpected event order, got {event.type} before "message_start"')
if event.type == "content_block_start":
# TODO: check index
current_snapshot.content.append(
cast(
Any, # Pydantic does not support generic unions at runtime
construct_type(type_=ParsedBetaContentBlock, value=event.content_block.to_dict()),
),
)
elif event.type == "content_block_delta":
content = current_snapshot.content[event.index]
if event.delta.type == "text_delta":
if content.type == "text":
content.text += event.delta.text
elif event.delta.type == "input_json_delta":
if isinstance(content, TRACKS_TOOL_INPUT):
from jiter import from_json
# we need to keep track of the raw JSON string as well so that we can
# re-parse it for each delta, for now we just store it as an untyped
# property on the snapshot
json_buf = cast(bytes, getattr(content, JSON_BUF_PROPERTY, b""))
json_buf += bytes(event.delta.partial_json, "utf-8")
if json_buf:
try:
anthropic_beta = request_headers.get("anthropic-beta", "") if request_headers else ""
if "fine-grained-tool-streaming-2025-05-14" in anthropic_beta:
content.input = from_json(json_buf, partial_mode="trailing-strings")
else:
content.input = from_json(json_buf, partial_mode=True)
except ValueError as e:
raise ValueError(
f"Unable to parse tool parameter JSON from model. Please retry your request or adjust your prompt. Error: {e}. JSON: {json_buf.decode('utf-8')}"
) from e
setattr(content, JSON_BUF_PROPERTY, json_buf)
elif event.delta.type == "citations_delta":
if content.type == "text":
if not content.citations:
content.citations = [event.delta.citation]
else:
content.citations.append(event.delta.citation)
elif event.delta.type == "thinking_delta":
if content.type == "thinking":
content.thinking += event.delta.thinking
elif event.delta.type == "signature_delta":
if content.type == "thinking":
content.signature = event.delta.signature
else:
# we only want exhaustive checking for linters, not at runtime
if TYPE_CHECKING: # type: ignore[unreachable]
assert_never(event.delta)
elif event.type == "content_block_stop":
content_block = current_snapshot.content[event.index]
if content_block.type == "text" and is_given(output_format):
content_block.parsed_output = parse_text(content_block.text, output_format)
elif event.type == "message_delta":
current_snapshot.container = event.delta.container
current_snapshot.stop_reason = event.delta.stop_reason
current_snapshot.stop_sequence = event.delta.stop_sequence
current_snapshot.usage.output_tokens = event.usage.output_tokens
current_snapshot.context_management = event.context_management
# Update other usage fields if they exist in the event
if event.usage.input_tokens is not None:
current_snapshot.usage.input_tokens = event.usage.input_tokens
if event.usage.cache_creation_input_tokens is not None:
current_snapshot.usage.cache_creation_input_tokens = event.usage.cache_creation_input_tokens
if event.usage.cache_read_input_tokens is not None:
current_snapshot.usage.cache_read_input_tokens = event.usage.cache_read_input_tokens
if event.usage.server_tool_use is not None:
current_snapshot.usage.server_tool_use = event.usage.server_tool_use
return current_snapshot
| BetaAsyncMessageStreamManager |
python | astropy__astropy | astropy/modeling/core.py | {
"start": 19271,
"end": 114466
} | class ____(metaclass=_ModelMeta):
"""
Base class for all models.
This is an abstract class and should not be instantiated directly.
The following initialization arguments apply to the majority of Model
subclasses by default (exceptions include specialized utility models
like `~astropy.modeling.mappings.Mapping`). Parametric models take all
their parameters as arguments, followed by any of the following optional
keyword arguments:
Parameters
----------
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict, optional
An optional dict of user-defined metadata to attach to this model.
How this is used and interpreted is up to the user or individual use
case.
n_models : int, optional
If given an integer greater than 1, a *model set* is instantiated
instead of a single model. This affects how the parameter arguments
are interpreted. In this case each parameter must be given as a list
or array--elements of this array are taken along the first axis (or
``model_set_axis`` if specified), such that the Nth element is the
value of that parameter for the Nth model in the set.
See the section on model sets in the documentation for more details.
model_set_axis : int, optional
This argument only applies when creating a model set (i.e. ``n_models >
1``). It changes how parameter values are interpreted. Normally the
first axis of each input parameter array (properly the 0th axis) is
taken as the axis corresponding to the model sets. However, any axis
of an input array may be taken as this "model set axis". This accepts
negative integers as well--for example use ``model_set_axis=-1`` if the
last (most rapidly changing) axis should be associated with the model
sets. Also, ``model_set_axis=False`` can be used to tell that a given
input should be used to evaluate all the models in the model set.
fixed : dict, optional
Dictionary ``{parameter_name: bool}`` setting the fixed constraint
for one or more parameters. `True` means the parameter is held fixed
during fitting and is prevented from updates once an instance of the
model has been created.
Alternatively the `~astropy.modeling.Parameter.fixed` property of a
parameter may be used to lock or unlock individual parameters.
tied : dict, optional
Dictionary ``{parameter_name: callable}`` of parameters which are
linked to some other parameter. The dictionary values are callables
providing the linking relationship.
Alternatively the `~astropy.modeling.Parameter.tied` property of a
parameter may be used to set the ``tied`` constraint on individual
parameters.
bounds : dict, optional
A dictionary ``{parameter_name: value}`` of lower and upper bounds of
parameters. Keys are parameter names. Values are a list or a tuple
of length 2 giving the desired range for the parameter.
Alternatively the `~astropy.modeling.Parameter.min` and
`~astropy.modeling.Parameter.max` or
~astropy.modeling.Parameter.bounds` properties of a parameter may be
used to set bounds on individual parameters.
eqcons : list, optional
List of functions of length n such that ``eqcons[j](x0, *args) == 0.0``
in a successfully optimized problem.
ineqcons : list, optional
List of functions of length n such that ``ieqcons[j](x0, *args) >=
0.0`` is a successfully optimized problem.
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that ``'mean'`` is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
"""
parameter_constraints = Parameter.constraints
"""
Primarily for informational purposes, these are the types of constraints
that can be set on a model's parameters.
"""
model_constraints = ("eqcons", "ineqcons")
"""
Primarily for informational purposes, these are the types of constraints
that constrain model evaluation.
"""
param_names = ()
"""
Names of the parameters that describe models of this type.
The parameters in this tuple are in the same order they should be passed in
when initializing a model of a specific type. Some types of models, such
as polynomial models, have a different number of parameters depending on
some other property of the model, such as the degree.
When defining a custom model class the value of this attribute is
automatically set by the `~astropy.modeling.Parameter` attributes defined
in the class body.
"""
standard_broadcasting = True
fittable = False
linear = True
_separable = None
""" A boolean flag to indicate whether a model is separable."""
meta = metadata.MetaData()
"""A dict-like object to store optional information."""
# By default models either use their own inverse property or have no
# inverse at all, but users may also assign a custom inverse to a model,
# optionally; in that case it is of course up to the user to determine
# whether their inverse is *actually* an inverse to the model they assign
# it to.
_inverse = None
_user_inverse = None
_bounding_box = None
_user_bounding_box = None
_has_inverse_bounding_box = False
# Default n_models attribute, so that __len__ is still defined even when a
# model hasn't completed initialization yet
_n_models = 1
# New classes can set this as a boolean value.
# It is converted to a dictionary mapping input name to a boolean value.
_input_units_strict = False
# Allow dimensionless input (and corresponding output). If this is True,
# input values to evaluate will gain the units specified in input_units. If
# this is a dictionary then it should map input name to a bool to allow
# dimensionless numbers for that input.
# Only has an effect if input_units is defined.
_input_units_allow_dimensionless = False
# Default equivalencies to apply to input values. If set, this should be a
# dictionary where each key is a string that corresponds to one of the
# model inputs. Only has an effect if input_units is defined.
input_units_equivalencies = None
# Covariance matrix can be set by fitter if available.
# If cov_matrix is available, then std will set as well
_cov_matrix = None
_stds = None
def __init_subclass__(cls, **kwargs):
super().__init_subclass__()
def __init__(self, *args, meta=None, name=None, **kwargs):
super().__init__()
self._default_inputs_outputs()
if meta is not None:
self.meta = meta
self._name = name
# add parameters to instance level by walking MRO list
mro = self.__class__.__mro__
for cls in mro:
if issubclass(cls, Model):
for parname, val in cls._parameters_.items():
newpar = copy.deepcopy(val)
newpar.model = self
if parname not in self.__dict__:
self.__dict__[parname] = newpar
self._initialize_constraints(kwargs)
kwargs = self._initialize_setters(kwargs)
# Remaining keyword args are either parameter values or invalid
# Parameter values must be passed in as keyword arguments in order to
# distinguish them
self._initialize_parameters(args, kwargs)
self._initialize_slices()
self._initialize_unit_support()
# Initialize the cache for the constraints (used primarily when
# sync_constraints is False)
self._constraints_cache = {}
def _default_inputs_outputs(self):
if self.n_inputs == 1 and self.n_outputs == 1:
self._inputs = ("x",)
self._outputs = ("y",)
elif self.n_inputs == 2 and self.n_outputs == 1:
self._inputs = ("x", "y")
self._outputs = ("z",)
else:
try:
self._inputs = tuple("x" + str(idx) for idx in range(self.n_inputs))
self._outputs = tuple("x" + str(idx) for idx in range(self.n_outputs))
except TypeError:
# self.n_inputs and self.n_outputs are properties
# This is the case when subclasses of Model do not define
# ``n_inputs``, ``n_outputs``, ``inputs`` or ``outputs``.
self._inputs = ()
self._outputs = ()
def _initialize_setters(self, kwargs):
"""
This exists to inject defaults for settable properties for models
originating from `~astropy.modeling.custom_model`.
"""
if hasattr(self, "_settable_properties"):
setters = {
name: kwargs.pop(name, default)
for name, default in self._settable_properties.items()
}
for name, value in setters.items():
setattr(self, name, value)
return kwargs
@property
def inputs(self):
return self._inputs
@inputs.setter
def inputs(self, val):
if len(val) != self.n_inputs:
raise ValueError(
f"Expected {self.n_inputs} number of inputs, got {len(val)}."
)
self._inputs = val
self._initialize_unit_support()
@property
def outputs(self):
return self._outputs
@outputs.setter
def outputs(self, val):
if len(val) != self.n_outputs:
raise ValueError(
f"Expected {self.n_outputs} number of outputs, got {len(val)}."
)
self._outputs = val
@property
def n_inputs(self) -> int:
"""The number of inputs."""
return len(getattr(self, "inputs", ()))
@property
def n_outputs(self) -> int:
"""The number of outputs."""
return len(getattr(self, "outputs", ()))
def _calculate_separability_matrix(self):
"""
This is a hook which customises the behavior of modeling.separable.
This allows complex subclasses to customise the separability matrix.
If it returns `NotImplemented` the default behavior is used.
"""
return NotImplemented
def _initialize_unit_support(self):
"""
Convert self._input_units_strict and
self.input_units_allow_dimensionless to dictionaries
mapping input name to a boolean value.
"""
if isinstance(self._input_units_strict, bool):
self._input_units_strict = dict.fromkeys(
self.inputs, self._input_units_strict
)
if isinstance(self._input_units_allow_dimensionless, bool):
self._input_units_allow_dimensionless = dict.fromkeys(
self.inputs, self._input_units_allow_dimensionless
)
@property
def input_units_strict(self):
"""
Enforce strict units on inputs to evaluate. If this is set to True,
input values to evaluate will be in the exact units specified by
input_units. If the input quantities are convertible to input_units,
they are converted. If this is a dictionary then it should map input
name to a bool to set strict input units for that parameter.
"""
val = self._input_units_strict
if isinstance(val, bool):
return dict.fromkeys(self.inputs, val)
return dict(zip(self.inputs, val.values()))
@property
def input_units_allow_dimensionless(self):
"""
Allow dimensionless input (and corresponding output). If this is True,
input values to evaluate will gain the units specified in input_units. If
this is a dictionary then it should map input name to a bool to allow
dimensionless numbers for that input.
Only has an effect if input_units is defined.
"""
val = self._input_units_allow_dimensionless
if isinstance(val, bool):
return dict.fromkeys(self.inputs, val)
return dict(zip(self.inputs, val.values()))
@property
def uses_quantity(self):
"""
True if this model has been created with `~astropy.units.Quantity`
objects or if there are no parameters.
This can be used to determine if this model should be evaluated with
`~astropy.units.Quantity` or regular floats.
"""
pisq = [isinstance(p, Quantity) for p in self._param_sets(units=True)]
return (len(pisq) == 0) or any(pisq)
def __repr__(self):
return self._format_repr()
def __str__(self):
return self._format_str()
def __len__(self):
return self._n_models
@staticmethod
def _strip_ones(intup):
return tuple(item for item in intup if item != 1)
def __setattr__(self, attr, value):
if isinstance(self, CompoundModel):
param_names = self._param_names
param_names = self.param_names
if param_names is not None and attr in self.param_names:
param = self.__dict__[attr]
value = _tofloat(value)
if param._validator is not None:
param._validator(self, value)
# check consistency with previous shape and size
eshape = self._param_metrics[attr]["shape"]
if eshape == ():
eshape = (1,)
vshape = np.array(value).shape
if vshape == ():
vshape = (1,)
esize = self._param_metrics[attr]["size"]
if np.size(value) != esize or self._strip_ones(vshape) != self._strip_ones(
eshape
):
raise InputParameterError(
f"Value for parameter {attr} does not match shape or size\nexpected"
f" by model ({vshape}, {np.size(value)}) vs ({eshape}, {esize})"
)
if param.unit is None:
if isinstance(value, Quantity):
param._unit = value.unit
param.value = value.value
else:
param.value = value
else:
if not isinstance(value, Quantity):
raise UnitsError(
f"The '{param.name}' parameter should be given as a"
" Quantity because it was originally "
"initialized as a Quantity"
)
param._unit = value.unit
param.value = value.value
else:
if attr in ["fittable", "linear"]:
self.__dict__[attr] = value
else:
super().__setattr__(attr, value)
def _pre_evaluate(self, *args, **kwargs):
"""
Model specific input setup that needs to occur prior to model evaluation.
"""
# Broadcast inputs into common size
inputs, broadcasted_shapes = self.prepare_inputs(*args, **kwargs)
# Setup actual model evaluation method
parameters = self._param_sets(raw=True, units=self._has_units)
def evaluate(_inputs):
return self.evaluate(*_inputs, *parameters)
return evaluate, inputs, broadcasted_shapes, kwargs
def get_bounding_box(self, with_bbox=True):
"""
Return the ``bounding_box`` of a model if it exists or ``None``
otherwise.
Parameters
----------
with_bbox :
The value of the ``with_bounding_box`` keyword argument
when calling the model. Default is `True` for usage when
looking up the model's ``bounding_box`` without risk of error.
"""
bbox = None
if not isinstance(with_bbox, bool) or with_bbox:
try:
bbox = self.bounding_box
except NotImplementedError:
pass
if isinstance(bbox, CompoundBoundingBox) and not isinstance(
with_bbox, bool
):
bbox = bbox[with_bbox]
return bbox
@property
def _argnames(self):
"""The inputs used to determine input_shape for bounding_box evaluation."""
return self.inputs
def _validate_input_shape(
self, _input, idx, argnames, model_set_axis, check_model_set_axis
):
"""Perform basic validation of a single model input's shape.
The shape has the minimum dimensions for the given model_set_axis.
Returns the shape of the input if validation succeeds.
"""
input_shape = np.shape(_input)
# Ensure that the input's model_set_axis matches the model's
# n_models
if input_shape and check_model_set_axis:
# Note: Scalar inputs *only* get a pass on this
if len(input_shape) < model_set_axis + 1:
raise ValueError(
f"For model_set_axis={model_set_axis}, all inputs must be at "
f"least {model_set_axis + 1}-dimensional."
)
if input_shape[model_set_axis] != self._n_models:
try:
argname = argnames[idx]
except IndexError:
# the case of model.inputs = ()
argname = str(idx)
raise ValueError(
f"Input argument '{argname}' does not have the correct dimensions"
f" in model_set_axis={model_set_axis} for a model set with"
f" n_models={self._n_models}."
)
return input_shape
def _validate_input_shapes(self, inputs, argnames, model_set_axis):
"""
Perform basic validation of model inputs
--that they are mutually broadcastable and that they have
the minimum dimensions for the given model_set_axis.
If validation succeeds, returns the total shape that will result from
broadcasting the input arrays with each other.
"""
check_model_set_axis = self._n_models > 1 and model_set_axis is not False
all_shapes = []
for idx, _input in enumerate(inputs):
all_shapes.append(
self._validate_input_shape(
_input, idx, argnames, model_set_axis, check_model_set_axis
)
)
# If we only have one input we don't need to broadcast it
if len(all_shapes) == 1:
return all_shapes[0]
try:
return np.broadcast_shapes(*all_shapes)
except ValueError as exc:
exc.add_note("All inputs must have identical shapes or must be scalars.")
raise exc
def input_shape(self, inputs):
"""Get input shape for bounding_box evaluation."""
return self._validate_input_shapes(inputs, self._argnames, self.model_set_axis)
def _generic_evaluate(self, evaluate, _inputs, fill_value, with_bbox):
"""Generic model evaluation routine.
Selects and evaluates model with or without bounding_box enforcement.
"""
# Evaluate the model using the prepared evaluation method either
# enforcing the bounding_box or not.
bbox = self.get_bounding_box(with_bbox)
if (not isinstance(with_bbox, bool) or with_bbox) and bbox is not None:
outputs = bbox.evaluate(evaluate, _inputs, fill_value)
else:
outputs = evaluate(_inputs)
return outputs
def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs):
"""
Model specific post evaluation processing of outputs.
"""
if self.get_bounding_box(with_bbox) is None and self.n_outputs == 1:
outputs = (outputs,)
outputs = self.prepare_outputs(broadcasted_shapes, *outputs, **kwargs)
outputs = self._process_output_units(inputs, outputs)
if self.n_outputs == 1:
return outputs[0]
return outputs
@property
def bbox_with_units(self):
return not isinstance(self, CompoundModel)
def __call__(self, *args, **kwargs):
"""
Evaluate this model using the given input(s) and the parameter values
that were specified when the model was instantiated.
"""
# Turn any keyword arguments into positional arguments.
args, kwargs = self._get_renamed_inputs_as_positional(*args, **kwargs)
# Read model evaluation related parameters
with_bbox = kwargs.pop("with_bounding_box", False)
fill_value = kwargs.pop("fill_value", np.nan)
# prepare for model evaluation (overridden in CompoundModel)
evaluate, inputs, broadcasted_shapes, kwargs = self._pre_evaluate(
*args, **kwargs
)
outputs = self._generic_evaluate(evaluate, inputs, fill_value, with_bbox)
# post-process evaluation results (overridden in CompoundModel)
return self._post_evaluate(
inputs, outputs, broadcasted_shapes, with_bbox, **kwargs
)
def _get_renamed_inputs_as_positional(self, *args, **kwargs):
def _keyword2positional(kwargs):
# Inputs were passed as keyword (not positional) arguments.
# Because the signature of the ``__call__`` is defined at
# the class level, the name of the inputs cannot be changed at
# the instance level and the old names are always present in the
# signature of the method. In order to use the new names of the
# inputs, the old names are taken out of ``kwargs``, the input
# values are sorted in the order of self.inputs and passed as
# positional arguments to ``__call__``.
# These are the keys that are always present as keyword arguments.
keys = [
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
]
new_inputs = {}
# kwargs contain the names of the new inputs + ``keys``
allkeys = list(kwargs.keys())
# Remove the names of the new inputs from kwargs and save them
# to a dict ``new_inputs``.
for key in allkeys:
if key not in keys:
new_inputs[key] = kwargs[key]
del kwargs[key]
return new_inputs, kwargs
n_args = len(args)
new_inputs, kwargs = _keyword2positional(kwargs)
n_all_args = n_args + len(new_inputs)
if n_all_args < self.n_inputs:
raise ValueError(
f"Missing input arguments - expected {self.n_inputs}, got {n_all_args}"
)
elif n_all_args > self.n_inputs:
raise ValueError(
f"Too many input arguments - expected {self.n_inputs}, got {n_all_args}"
)
if n_args == 0:
# Create positional arguments from the keyword arguments in ``new_inputs``.
new_args = []
for k in self.inputs:
new_args.append(new_inputs[k])
elif n_args != self.n_inputs:
# Some inputs are passed as positional, others as keyword arguments.
args = list(args)
# Create positional arguments from the keyword arguments in ``new_inputs``.
new_args = []
for k in self.inputs:
if k in new_inputs:
new_args.append(new_inputs[k])
else:
new_args.append(args[0])
del args[0]
else:
new_args = args
return new_args, kwargs
# *** Properties ***
@property
def name(self):
"""User-provided name for this model instance."""
return self._name
@name.setter
def name(self, val):
"""Assign a (new) name to this model."""
self._name = val
@property
def model_set_axis(self):
"""
The index of the model set axis--that is the axis of a parameter array
that pertains to which model a parameter value pertains to--as
specified when the model was initialized.
See the documentation on :ref:`astropy:modeling-model-sets`
for more details.
"""
return self._model_set_axis
@property
def param_sets(self):
"""
Return parameters as a pset.
This is a list with one item per parameter set, which is an array of
that parameter's values across all parameter sets, with the last axis
associated with the parameter set.
"""
return self._param_sets()
@property
def parameters(self):
"""
A flattened array of all parameter values in all parameter sets.
Fittable parameters maintain this list and fitters modify it.
"""
# Currently the sequence of a model's parameters must be contiguous
# within the _parameters array (which may be a view of a larger array,
# for example when taking a sub-expression of a compound model), so
# the assumption here is reliable:
if not self.param_names:
# Trivial, but not unheard of
return self._parameters
self._parameters_to_array()
start = self._param_metrics[self.param_names[0]]["slice"].start
stop = self._param_metrics[self.param_names[-1]]["slice"].stop
return self._parameters[start:stop]
@parameters.setter
def parameters(self, value):
"""
Assigning to this attribute updates the parameters array rather than
replacing it.
"""
if not self.param_names:
return
start = self._param_metrics[self.param_names[0]]["slice"].start
stop = self._param_metrics[self.param_names[-1]]["slice"].stop
try:
value = np.asanyarray(value).ravel()
self._parameters[start:stop] = value
except ValueError as e:
raise InputParameterError(
"Input parameter values not compatible with the model "
f"parameters array: {e!r}"
)
self._array_to_parameters()
@property
def sync_constraints(self):
"""
This is a boolean property that indicates whether or not accessing constraints
automatically check the constituent models current values. It defaults to True
on creation of a model, but for fitting purposes it should be set to False
for performance reasons.
"""
if not hasattr(self, "_sync_constraints"):
self._sync_constraints = True
return self._sync_constraints
@sync_constraints.setter
def sync_constraints(self, value):
if not isinstance(value, bool):
raise ValueError("sync_constraints only accepts True or False as values")
self._sync_constraints = value
# We need to invalidate the cache whenever sync_constraints is changed.
# If we are setting sync_constraints to True, then this will ensure
# that we recompute the properties next time they are called, and if
# setting to False, it will allow us to make sure the cache is up-to-date
# below before disabling syncing.
self._constraints_cache.clear()
# If setting to False, cache all the values with the present state
# to make sure we don't ever update the cache once the syncing is
# disabled. Note that these will automatically then cause 'fixed',
# 'bounds' and 'tied' to be called.
if not value:
_ = self.has_fixed
_ = self.has_bounds
_ = self.has_tied
@property
def fixed(self):
"""
A ``dict`` mapping parameter names to their fixed constraint.
"""
if "fixed" not in self._constraints_cache or self.sync_constraints:
self._constraints_cache["fixed"] = _ConstraintsDict(self, "fixed")
return self._constraints_cache["fixed"]
@property
def bounds(self):
"""
A ``dict`` mapping parameter names to their upper and lower bounds as
``(min, max)`` tuples or ``[min, max]`` lists.
"""
if "bounds" not in self._constraints_cache or self.sync_constraints:
self._constraints_cache["bounds"] = _ConstraintsDict(self, "bounds")
return self._constraints_cache["bounds"]
@property
def tied(self):
"""
A ``dict`` mapping parameter names to their tied constraint.
"""
if "tied" not in self._constraints_cache or self.sync_constraints:
self._constraints_cache["tied"] = _ConstraintsDict(self, "tied")
return self._constraints_cache["tied"]
@property
def has_fixed(self):
"""
Whether the model has any fixed constraints.
"""
if "has_fixed" not in self._constraints_cache or self.sync_constraints:
self._constraints_cache["has_fixed"] = any(self.fixed.values())
return self._constraints_cache["has_fixed"]
@property
def has_bounds(self):
"""
Whether the model has any bounds constraints.
"""
if "has_bounds" not in self._constraints_cache or self.sync_constraints:
self._constraints_cache["has_bounds"] = any(
b != (None, None) for b in self.bounds.values()
)
return self._constraints_cache["has_bounds"]
@property
def has_tied(self):
"""
Whether the model has any tied constraints.
"""
if "has_tied" not in self._constraints_cache or self.sync_constraints:
self._constraints_cache["has_tied"] = any(self.tied.values())
return self._constraints_cache["has_tied"]
@property
def eqcons(self):
"""List of parameter equality constraints."""
return self._mconstraints["eqcons"]
@property
def ineqcons(self):
"""List of parameter inequality constraints."""
return self._mconstraints["ineqcons"]
def has_inverse(self):
"""
Returns True if the model has an analytic or user
inverse defined.
"""
try:
self.inverse # noqa: B018
except NotImplementedError:
return False
return True
@property
def inverse(self):
"""
Returns a new `~astropy.modeling.Model` instance which performs the
inverse transform, if an analytic inverse is defined for this model.
Even on models that don't have an inverse defined, this property can be
set with a manually-defined inverse, such a pre-computed or
experimentally determined inverse (often given as a
`~astropy.modeling.polynomial.PolynomialModel`, but not by
requirement).
A custom inverse can be deleted with ``del model.inverse``. In this
case the model's inverse is reset to its default, if a default exists
(otherwise the default is to raise `NotImplementedError`).
Note to authors of `~astropy.modeling.Model` subclasses: To define an
inverse for a model simply override this property to return the
appropriate model representing the inverse. The machinery that will
make the inverse manually-overridable is added automatically by the
base class.
"""
if self._user_inverse is not None:
return self._user_inverse
elif self._inverse is not None:
result = self._inverse()
if result is not NotImplemented:
if not self._has_inverse_bounding_box:
result.bounding_box = None
return result
raise NotImplementedError(
"No analytical or user-supplied inverse transform "
"has been implemented for this model."
)
@inverse.setter
def inverse(self, value):
if not isinstance(value, (Model, type(None))):
raise ValueError(
"The inverse attribute may be assigned a Model instance "
"or None (where None explicitly forces the model to have "
"no inverse."
)
self._user_inverse = value
@inverse.deleter
def inverse(self):
"""
Resets the model's inverse to its default (if one exists, otherwise
the model will have no inverse).
"""
try:
del self._user_inverse
except AttributeError:
pass
@property
def has_user_inverse(self):
"""
A flag indicating whether or not a custom inverse model has been
assigned to this model by a user, via assignment to ``model.inverse``.
"""
return self._user_inverse is not None
@property
def bounding_box(self):
r"""
A `tuple` of length `n_inputs` defining the bounding box limits, or
raise `NotImplementedError` for no bounding_box.
The default limits are given by a ``bounding_box`` property or method
defined in the class body of a specific model. If not defined then
this property just raises `NotImplementedError` by default (but may be
assigned a custom value by a user). ``bounding_box`` can be set
manually to an array-like object of shape ``(model.n_inputs, 2)``. For
further usage, see :ref:`astropy:bounding-boxes`
The limits are ordered according to the `numpy` ``'C'`` indexing
convention, and are the reverse of the model input order,
e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined:
* for 1D: ``(x_low, x_high)``
* for 2D: ``((y_low, y_high), (x_low, x_high))``
* for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))``
Examples
--------
Setting the ``bounding_box`` limits for a 1D and 2D model:
>>> from astropy.modeling.models import Gaussian1D, Gaussian2D
>>> model_1d = Gaussian1D()
>>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1)
>>> model_1d.bounding_box = (-5, 5)
>>> model_2d.bounding_box = ((-6, 6), (-5, 5))
Setting the bounding_box limits for a user-defined 3D
`~astropy.modeling.custom_model`:
>>> from astropy.modeling.models import custom_model
>>> def const3d(x, y, z, amp=1):
... return amp
...
>>> Const3D = custom_model(const3d)
>>> model_3d = Const3D()
>>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4))
To reset ``bounding_box`` to its default limits just delete the
user-defined value--this will reset it back to the default defined
on the class:
>>> del model_1d.bounding_box
To disable the bounding box entirely (including the default),
set ``bounding_box`` to `None`:
>>> model_1d.bounding_box = None
>>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
NotImplementedError: No bounding box is defined for this model
(note: the bounding box was explicitly disabled for this model;
use `del model.bounding_box` to restore the default bounding box,
if one is defined for this model).
"""
if self._user_bounding_box is not None:
if self._user_bounding_box is NotImplemented:
raise NotImplementedError(
"No bounding box is defined for this model (note: the "
"bounding box was explicitly disabled for this model; "
"use `del model.bounding_box` to restore the default "
"bounding box, if one is defined for this model)."
)
return self._user_bounding_box
elif self._bounding_box is None:
raise NotImplementedError("No bounding box is defined for this model.")
elif isinstance(self._bounding_box, ModelBoundingBox):
# This typically implies a hard-coded bounding box. This will
# probably be rare, but it is an option
return self._bounding_box
elif inspect.ismethod(self._bounding_box):
return ModelBoundingBox.validate(self, self._bounding_box())
else:
# The only other allowed possibility is that it's a ModelBoundingBox
# subclass, so we call it with its default arguments and return an
# instance of it (that can be called to recompute the bounding box
# with any optional parameters)
# (In other words, in this case self._bounding_box is a *class*)
bounding_box = self._bounding_box((), model=self)()
return self._bounding_box(bounding_box, model=self)
@bounding_box.setter
def bounding_box(self, bounding_box):
"""
Assigns the bounding box limits.
"""
if bounding_box is None:
cls = None
# We use this to explicitly set an unimplemented bounding box (as
# opposed to no user bounding box defined)
bounding_box = NotImplemented
elif isinstance(bounding_box, (CompoundBoundingBox, dict)):
cls = CompoundBoundingBox
elif isinstance(self._bounding_box, type) and issubclass(
self._bounding_box, ModelBoundingBox
):
cls = self._bounding_box
else:
cls = ModelBoundingBox
if cls is not None:
try:
bounding_box = cls.validate(self, bounding_box, _preserve_ignore=True)
except ValueError as exc:
raise ValueError(exc.args[0])
self._user_bounding_box = bounding_box
def set_slice_args(self, *args):
if isinstance(self._user_bounding_box, CompoundBoundingBox):
self._user_bounding_box.slice_args = args
else:
raise RuntimeError("The bounding_box for this model is not compound")
@bounding_box.deleter
def bounding_box(self):
self._user_bounding_box = None
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
@property
def cov_matrix(self):
"""
Fitter should set covariance matrix, if available.
"""
return self._cov_matrix
@cov_matrix.setter
def cov_matrix(self, cov):
self._cov_matrix = cov
unfix_untied_params = [
p
for p in self.param_names
if (self.fixed[p] is False) and (self.tied[p] is False)
]
if type(cov) == list: # model set
param_stds = []
for c in cov:
param_stds.append(
[np.sqrt(x) if x > 0 else None for x in np.diag(c.cov_matrix)]
)
for p, param_name in enumerate(unfix_untied_params):
par = getattr(self, param_name)
par.std = [item[p] for item in param_stds]
setattr(self, param_name, par)
else:
param_stds = [
np.sqrt(x) if x > 0 else None for x in np.diag(cov.cov_matrix)
]
for param_name in unfix_untied_params:
par = getattr(self, param_name)
par.std = param_stds.pop(0)
setattr(self, param_name, par)
@property
def stds(self):
"""
Standard deviation of parameters, if covariance matrix is available.
"""
return self._stds
@stds.setter
def stds(self, stds):
self._stds = stds
@property
def separable(self):
"""A flag indicating whether a model is separable."""
if self._separable is not None:
return self._separable
raise NotImplementedError(
'The "separable" property is not defined for '
f"model {self.__class__.__name__}"
)
# *** Public methods ***
def without_units_for_data(self, **kwargs):
"""
Return an instance of the model for which the parameter values have
been converted to the right units for the data, then the units have
been stripped away.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters should be converted to are not
necessarily the units of the input data, but are derived from them.
Model subclasses that want fitting to work in the presence of
quantities need to define a ``_parameter_units_for_data_units`` method
that takes the input and output units (as two dictionaries) and
returns a dictionary giving the target units for each parameter.
"""
model = self.copy()
inputs_unit = {
inp: getattr(kwargs[inp], "unit", dimensionless_unscaled)
for inp in self.inputs
if kwargs[inp] is not None
}
outputs_unit = {
out: getattr(kwargs[out], "unit", dimensionless_unscaled)
for out in self.outputs
if kwargs[out] is not None
}
parameter_units = self._parameter_units_for_data_units(
inputs_unit, outputs_unit
)
for name, unit in parameter_units.items():
parameter = getattr(model, name)
if parameter.unit is not None:
parameter.value = parameter.quantity.to(unit).value
parameter._set_unit(None, force=True)
if isinstance(model, CompoundModel):
model.strip_units_from_tree()
return model
def output_units(self, **kwargs):
"""
Return a dictionary of output units for this model given a dictionary
of fitting inputs and outputs.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
This method will force extra model evaluations, which maybe computationally
expensive. To avoid this, one can add a return_units property to the model,
see :ref:`astropy:models_return_units`.
"""
units = self.return_units
if units is None or units == {}:
inputs = {inp: kwargs[inp] for inp in self.inputs}
values = self(**inputs)
if self.n_outputs == 1:
values = (values,)
units = {
out: getattr(values[index], "unit", dimensionless_unscaled)
for index, out in enumerate(self.outputs)
}
return units
def strip_units_from_tree(self):
for item in self._leaflist:
for parname in item.param_names:
par = getattr(item, parname)
par._set_unit(None, force=True)
def with_units_from_data(self, **kwargs):
"""
Return an instance of the model which has units for which the parameter
values are compatible with the data units specified.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters will gain are not necessarily the units
of the input data, but are derived from them. Model subclasses that
want fitting to work in the presence of quantities need to define a
``_parameter_units_for_data_units`` method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {
inp: getattr(kwargs[inp], "unit", dimensionless_unscaled)
for inp in self.inputs
if kwargs[inp] is not None
}
outputs_unit = {
out: getattr(kwargs[out], "unit", dimensionless_unscaled)
for out in self.outputs
if kwargs[out] is not None
}
parameter_units = self._parameter_units_for_data_units(
inputs_unit, outputs_unit
)
# We are adding units to parameters that already have a value, but we
# don't want to convert the parameter, just add the unit directly,
# hence the call to ``_set_unit``.
for name, unit in parameter_units.items():
parameter = getattr(model, name)
parameter._set_unit(unit, force=True)
return model
@property
def _has_units(self):
# Returns True if any of the parameters have units
return any(getattr(self, param).unit is not None for param in self.param_names)
@property
def _supports_unit_fitting(self):
# If the model has a ``_parameter_units_for_data_units`` method, this
# indicates that we have enough information to strip the units away
# and add them back after fitting, when fitting quantities
return hasattr(self, "_parameter_units_for_data_units")
@abc.abstractmethod
def evaluate(self, *args, **kwargs):
"""Evaluate the model on some input variables."""
def sum_of_implicit_terms(self, *args, **kwargs):
"""
Evaluate the sum of any implicit model terms on some input variables.
This includes any fixed terms used in evaluating a linear model that
do not have corresponding parameters exposed to the user. The
prototypical case is `astropy.modeling.functional_models.Shift`, which
corresponds to a function y = a + bx, where b=1 is intrinsically fixed
by the type of model, such that sum_of_implicit_terms(x) == x. This
method is needed by linear fitters to correct the dependent variable
for the implicit term(s) when solving for the remaining terms
(ie. a = y - bx).
"""
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the
`~astropy.modeling.Model.bounding_box` attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of
the returned array. If this is not provided (or None), the model
will be evaluated on a grid determined by
`~astropy.modeling.Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``,
or else a new array from evaluating the model
over ``coords``. If ``out`` and ``coords`` are
both `None`, the returned array is limited to the
`~astropy.modeling.Model.bounding_box` limits. If
`~astropy.modeling.Model.bounding_box` is `None`, ``arr`` or
``coords`` must be passed.
Raises
------
ValueError
If ``coords`` are not given and the
`~astropy.modeling.Model.bounding_box` of this model is not
set.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError("If no bounding_box is set, coords or out must be input.")
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError("inconsistent shape of the output.")
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out)
if out.ndim != ndim:
raise ValueError(
"the array and model must have the same number of dimensions."
)
if bbox is not None:
# Assures position is at center pixel,
# important when using add_array.
pd = (
np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox])
.astype(int)
.T
)
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array(
[extract_array(c, sub_shape, pos) for c in coords]
)
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
"The `bounding_box` is larger than the input out in "
"one or more dimensions. Set "
"`model.bounding_box = None`."
)
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
@property
def input_units(self):
"""
This property is used to indicate what units or sets of units the
evaluate method expects, and returns a dictionary mapping inputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid input units, in which case this property should
not be overridden since it will return the input units based on the
annotations.
"""
if hasattr(self, "_input_units"):
return self._input_units
elif hasattr(self.evaluate, "__annotations__"):
annotations = self.evaluate.__annotations__.copy()
annotations.pop("return", None)
if annotations:
# If there are not annotations for all inputs this will error.
return {name: annotations[name] for name in self.inputs}
else:
# None means any unit is accepted
return None
@property
def return_units(self):
"""
This property is used to indicate what units or sets of units the
output of evaluate should be in, and returns a dictionary mapping
outputs to units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid output units, in which case this property should not be
overridden since it will return the return units based on the
annotations.
"""
if hasattr(self, "_return_units"):
return self._return_units
elif hasattr(self.evaluate, "__annotations__"):
return self.evaluate.__annotations__.get("return", None)
else:
# None means any unit is accepted
return None
def _prepare_inputs_single_model(self, params, inputs, **kwargs):
broadcasts = []
for idx, _input in enumerate(inputs):
input_shape = _input.shape
# Ensure that array scalars are always upgrade to 1-D arrays for the
# sake of consistency with how parameters work. They will be cast back
# to scalars at the end
if not input_shape:
inputs[idx] = _input.reshape((1,))
if not params:
max_broadcast = input_shape
else:
max_broadcast = ()
for param in params:
try:
# bypass the broadcast_shapes call for performance reasons
# if parameter is a scalar
if self.standard_broadcasting and param.shape:
broadcast = np.broadcast_shapes(input_shape, param.shape)
else:
broadcast = input_shape
except ValueError as exc:
exc.add_note(
f"self input argument {self.inputs[idx]!r} of shape"
f" {input_shape!r} cannot be broadcast with parameter"
f" {param.name!r} of shape {param.shape!r}.",
)
raise exc
if len(broadcast) > len(max_broadcast):
max_broadcast = broadcast
elif len(broadcast) == len(max_broadcast):
max_broadcast = max(max_broadcast, broadcast)
broadcasts.append(max_broadcast)
if self.n_outputs > self.n_inputs:
extra_outputs = self.n_outputs - self.n_inputs
if not broadcasts:
# If there were no inputs then the broadcasts list is empty
# just add a None since there is no broadcasting of outputs and
# inputs necessary (see _prepare_outputs_single_self)
broadcasts.append(None)
broadcasts.extend([broadcasts[0]] * extra_outputs)
return inputs, (broadcasts,)
@staticmethod
def _remove_axes_from_shape(shape, axis):
"""
Given a shape tuple as the first input, construct a new one by removing
that particular axis from the shape and all preceding axes. Negative axis
numbers are permittted, where the axis is relative to the last axis.
"""
if len(shape) == 0:
return shape
if axis < 0:
axis = len(shape) + axis
return shape[:axis] + shape[axis + 1 :]
if axis >= len(shape):
axis = len(shape) - 1
shape = shape[axis + 1 :]
return shape
def _prepare_inputs_model_set(self, params, inputs, model_set_axis_input, **kwargs):
reshaped = []
pivots = []
model_set_axis_param = self.model_set_axis # needed to reshape param
for idx, _input in enumerate(inputs):
max_param_shape = ()
if self._n_models > 1 and model_set_axis_input is not False:
# Use the shape of the input *excluding* the model axis
input_shape = (
_input.shape[:model_set_axis_input]
+ _input.shape[model_set_axis_input + 1 :]
)
else:
input_shape = _input.shape
for param in params:
try:
np.broadcast_shapes(
input_shape,
self._remove_axes_from_shape(param.shape, model_set_axis_param),
)
except ValueError as exc:
exc.add_note(
f"Model input argument {self.inputs[idx]!r} of shape"
f" {input_shape!r} "
f"cannot be broadcast with parameter {param.name!r} of shape "
f"{self._remove_axes_from_shape(param.shape, model_set_axis_param)!r}.",
)
raise exc
if len(param.shape) - 1 > len(max_param_shape):
max_param_shape = self._remove_axes_from_shape(
param.shape, model_set_axis_param
)
# We've now determined that, excluding the model_set_axis, the
# input can broadcast with all the parameters
input_ndim = len(input_shape)
if model_set_axis_input is False:
if len(max_param_shape) > input_ndim:
# Just needs to prepend new axes to the input
n_new_axes = 1 + len(max_param_shape) - input_ndim
new_axes = (1,) * n_new_axes
new_shape = new_axes + _input.shape
pivot = model_set_axis_param
else:
pivot = input_ndim - len(max_param_shape)
new_shape = _input.shape[:pivot] + (1,) + _input.shape[pivot:]
new_input = _input.reshape(new_shape)
else:
if len(max_param_shape) >= input_ndim:
n_new_axes = len(max_param_shape) - input_ndim
pivot = self.model_set_axis
new_axes = (1,) * n_new_axes
new_shape = (
_input.shape[: pivot + 1] + new_axes + _input.shape[pivot + 1 :]
)
new_input = _input.reshape(new_shape)
else:
pivot = _input.ndim - len(max_param_shape) - 1
new_input = np.rollaxis(_input, model_set_axis_input, pivot + 1)
pivots.append(pivot)
reshaped.append(new_input)
if self.n_inputs < self.n_outputs:
pivots.extend([model_set_axis_input] * (self.n_outputs - self.n_inputs))
return reshaped, (pivots,)
def prepare_inputs(
self, *inputs, model_set_axis=None, equivalencies=None, **kwargs
):
"""
This method is used in `~astropy.modeling.Model.__call__` to ensure
that all the inputs to the model can be broadcast into compatible
shapes (if one or both of them are input as arrays), particularly if
there are more than one parameter sets. This also makes sure that (if
applicable) the units of the input will be compatible with the evaluate
method.
"""
# When we instantiate the model class, we make sure that __call__ can
# take the following two keyword arguments: model_set_axis and
# equivalencies.
if model_set_axis is None:
# By default the model_set_axis for the input is assumed to be the
# same as that for the parameters the model was defined with
# TODO: Ensure that negative model_set_axis arguments are respected
model_set_axis = self.model_set_axis
params = [getattr(self, name) for name in self.param_names]
inputs = [np.asanyarray(_input, dtype=float) for _input in inputs]
self._validate_input_shapes(inputs, self.inputs, model_set_axis)
inputs_map = kwargs.get("inputs_map")
inputs = self._validate_input_units(inputs, equivalencies, inputs_map)
# The input formatting required for single models versus a multiple
# model set are different enough that they've been split into separate
# subroutines
if self._n_models == 1:
return self._prepare_inputs_single_model(params, inputs, **kwargs)
else:
return self._prepare_inputs_model_set(
params, inputs, model_set_axis, **kwargs
)
def _validate_input_units(self, inputs, equivalencies=None, inputs_map=None):
inputs = list(inputs)
name = self.name or self.__class__.__name__
# Check that the units are correct, if applicable
if self.input_units is not None:
# If a leaflist is provided that means this is in the context of
# a compound model and it is necessary to create the appropriate
# alias for the input coordinate name for the equivalencies dict
if inputs_map:
edict = {}
for mod, mapping in inputs_map:
if self is mod:
edict[mapping[0]] = equivalencies[mapping[1]]
else:
edict = equivalencies
# We combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
self.inputs, edict, self.input_units_equivalencies
)
# We now iterate over the different inputs and make sure that their
# units are consistent with those specified in input_units.
for i in range(len(inputs)):
input_name = self.inputs[i]
input_unit = self.input_units.get(input_name, None)
if input_unit is None:
continue
if isinstance(inputs[i], Quantity):
# We check for consistency of the units with input_units,
# taking into account any equivalencies
if inputs[i].unit.is_equivalent(
input_unit, equivalencies=input_units_equivalencies[input_name]
):
# If equivalencies have been specified, we need to
# convert the input to the input units - this is
# because some equivalencies are non-linear, and
# we need to be sure that we evaluate the model in
# its own frame of reference. If input_units_strict
# is set, we also need to convert to the input units.
if (
len(input_units_equivalencies) > 0
or self.input_units_strict[input_name]
):
inputs[i] = inputs[i].to(
input_unit,
equivalencies=input_units_equivalencies[input_name],
)
else:
# We consider the following two cases separately so as
# to be able to raise more appropriate/nicer exceptions
if input_unit is dimensionless_unscaled:
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}', "
f"{inputs[i].unit} ({inputs[i].unit.physical_type}),"
"could not be converted to "
"required dimensionless "
"input"
)
else:
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}', "
f"{inputs[i].unit} ({inputs[i].unit.physical_type}),"
" could not be "
"converted to required input"
f" units of {input_unit} ({input_unit.physical_type})"
)
else:
# If we allow dimensionless input, we add the units to the
# input values without conversion, otherwise we raise an
# exception.
if (
not self.input_units_allow_dimensionless[input_name]
and input_unit is not dimensionless_unscaled
and input_unit is not None
):
if np.any(inputs[i] != 0):
raise UnitsError(
f"{name}: Units of input '{self.inputs[i]}',"
" (dimensionless), could not be converted to required "
f"input units of {input_unit} "
f"({input_unit.physical_type})"
)
return inputs
def _process_output_units(self, inputs, outputs):
inputs_are_quantity = any(isinstance(i, Quantity) for i in inputs)
if self.return_units and inputs_are_quantity:
# We allow a non-iterable unit only if there is one output
if self.n_outputs == 1 and not np.iterable(self.return_units):
return_units = {self.outputs[0]: self.return_units}
else:
return_units = self.return_units
outputs = tuple(
Quantity(out, return_units.get(out_name, None), subok=True)
for out, out_name in zip(outputs, self.outputs)
)
return outputs
@staticmethod
def _prepare_output_single_model(output, broadcast_shape):
if broadcast_shape is not None:
if not broadcast_shape:
return output.item()
else:
try:
return output.reshape(broadcast_shape)
except ValueError:
try:
return output.item()
except ValueError:
return output
return output
def _prepare_outputs_single_model(self, outputs, broadcasted_shapes):
outputs = list(outputs)
shapes = broadcasted_shapes[0]
for idx, output in enumerate(outputs):
if None in shapes:
# Previously, we used our own function (check_broadcast) instead
# of np.broadcast_shapes in the following try block
# - check_broadcast raised an exception when passed a None.
# - as of numpy 1.26, np.broadcast raises a deprecation warning
# when passed a `None` value, but returns an empty tuple.
#
# Since () and None have different effects downstream of this function,
# and to preserve backward-compatibility, we handle this special here
broadcast_shape = shapes[idx]
else:
try:
broadcast_shape = np.broadcast_shapes(*shapes)
except Exception:
broadcast_shape = shapes[idx]
outputs[idx] = self._prepare_output_single_model(output, broadcast_shape)
return tuple(outputs)
def _prepare_outputs_model_set(self, outputs, broadcasted_shapes, model_set_axis):
pivots = broadcasted_shapes[0]
# If model_set_axis = False was passed then use
# self._model_set_axis to format the output.
if model_set_axis is None or model_set_axis is False:
model_set_axis = self.model_set_axis
outputs = list(outputs)
for idx, output in enumerate(outputs):
pivot = pivots[idx]
if pivot < output.ndim and pivot != model_set_axis:
outputs[idx] = np.rollaxis(output, pivot, model_set_axis)
return tuple(outputs)
def prepare_outputs(self, broadcasted_shapes, *outputs, **kwargs):
model_set_axis = kwargs.get("model_set_axis")
if len(self) == 1:
return self._prepare_outputs_single_model(outputs, broadcasted_shapes)
else:
return self._prepare_outputs_model_set(
outputs, broadcasted_shapes, model_set_axis
)
def copy(self):
"""
Return a copy of this model.
Uses a deep copy so that all model attributes, including parameter
values, are copied as well.
"""
return copy.deepcopy(self)
def deepcopy(self):
"""
Return a deep copy of this model.
"""
return self.copy()
@sharedmethod
def rename(self, name):
"""
Return a copy of this model with a new name.
"""
new_model = self.copy()
new_model._name = name
return new_model
def coerce_units(
self,
input_units=None,
return_units=None,
input_units_equivalencies=None,
input_units_allow_dimensionless=False,
):
"""
Attach units to this (unitless) model.
Parameters
----------
input_units : dict or tuple, optional
Input units to attach. If dict, each key is the name of a model input,
and the value is the unit to attach. If tuple, the elements are units
to attach in order corresponding to `~astropy.modeling.Model.inputs`.
return_units : dict or tuple, optional
Output units to attach. If dict, each key is the name of a model output,
and the value is the unit to attach. If tuple, the elements are units
to attach in order corresponding to `~astropy.modeling.Model.outputs`.
input_units_equivalencies : dict, optional
Default equivalencies to apply to input values. If set, this should be a
dictionary where each key is a string that corresponds to one of the
model inputs.
input_units_allow_dimensionless : bool or dict, optional
Allow dimensionless input. If this is True, input values to evaluate will
gain the units specified in input_units. If this is a dictionary then it
should map input name to a bool to allow dimensionless numbers for that
input.
Returns
-------
`~astropy.modeling.CompoundModel`
A `~astropy.modeling.CompoundModel` composed of the current
model plus `~astropy.modeling.mappings.UnitsMapping`
model(s) that attach the units.
Raises
------
ValueError
If the current model already has units.
Examples
--------
Wrapping a unitless model to require and convert units:
>>> from astropy.modeling.models import Polynomial1D
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = poly.coerce_units((u.m,), (u.s,))
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP
<Quantity 1.2 s>
Wrapping a unitless model but still permitting unitless input:
>>> from astropy.modeling.models import Polynomial1D
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = poly.coerce_units((u.m,), (u.s,), input_units_allow_dimensionless=True)
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(10) # doctest: +FLOAT_CMP
<Quantity 21. s>
"""
from .mappings import UnitsMapping
result = self
if input_units is not None:
if self.input_units is not None:
model_units = self.input_units
else:
model_units = {}
for unit in [model_units.get(i) for i in self.inputs]:
if unit is not None and unit != dimensionless_unscaled:
raise ValueError(
"Cannot specify input_units for model with existing input units"
)
if isinstance(input_units, dict):
if input_units.keys() != set(self.inputs):
message = (
f"""input_units keys ({", ".join(input_units.keys())}) """
f"""do not match model inputs ({", ".join(self.inputs)})"""
)
raise ValueError(message)
input_units = [input_units[i] for i in self.inputs]
if len(input_units) != self.n_inputs:
message = (
"input_units length does not match n_inputs: "
f"expected {self.n_inputs}, received {len(input_units)}"
)
raise ValueError(message)
mapping = tuple(
(unit, model_units.get(i)) for i, unit in zip(self.inputs, input_units)
)
input_mapping = UnitsMapping(
mapping,
input_units_equivalencies=input_units_equivalencies,
input_units_allow_dimensionless=input_units_allow_dimensionless,
)
input_mapping.inputs = self.inputs
input_mapping.outputs = self.inputs
result = input_mapping | result
if return_units is not None:
if self.return_units is not None:
model_units = self.return_units
else:
model_units = {}
for unit in [model_units.get(i) for i in self.outputs]:
if unit is not None and unit != dimensionless_unscaled:
raise ValueError(
"Cannot specify return_units for model "
"with existing output units"
)
if isinstance(return_units, dict):
if return_units.keys() != set(self.outputs):
message = (
f"""return_units keys ({", ".join(return_units.keys())}) """
f"""do not match model outputs ({", ".join(self.outputs)})"""
)
raise ValueError(message)
return_units = [return_units[i] for i in self.outputs]
if len(return_units) != self.n_outputs:
message = (
"return_units length does not match n_outputs: "
f"expected {self.n_outputs}, received {len(return_units)}"
)
raise ValueError(message)
mapping = tuple(
(model_units.get(i), unit)
for i, unit in zip(self.outputs, return_units)
)
return_mapping = UnitsMapping(mapping)
return_mapping.inputs = self.outputs
return_mapping.outputs = self.outputs
result = result | return_mapping
return result
@property
def n_submodels(self):
"""
Return the number of components in a single model, which is
obviously 1.
"""
return 1
def _initialize_constraints(self, kwargs):
"""
Pop parameter constraint values off the keyword arguments passed
to `~astropy.modeling.Model.__init__` and store them in private
instance attributes.
"""
# Pop any constraints off the keyword arguments
for constraint in self.parameter_constraints:
values = kwargs.pop(constraint, {})
for ckey, cvalue in values.items():
param = getattr(self, ckey)
setattr(param, constraint, cvalue)
self._mconstraints = {}
for constraint in self.model_constraints:
values = kwargs.pop(constraint, [])
self._mconstraints[constraint] = values
def _reset_parameters(self, *args, **kwargs):
"""
Reset parameters on the models to those specified.
Parameters can be specified either as positional arguments or keyword
arguments, as in the model initializer. Any parameters not specified
will be reset to their default values.
"""
self._initialize_parameters(args, kwargs)
self._initialize_slices()
def _initialize_parameters(self, args, kwargs):
"""
Initialize the _parameters array that stores raw parameter values for
all parameter sets for use with vectorized fitting algorithms; on
FittableModels the _param_name attributes actually just reference
slices of this array.
"""
n_models = kwargs.pop("n_models", None)
if not (
n_models is None
or (isinstance(n_models, (int, np.integer)) and n_models >= 1)
):
raise ValueError(
"n_models must be either None (in which case it is "
"determined from the model_set_axis of the parameter initial "
"values) or it must be a positive integer "
f"(got {n_models!r})"
)
model_set_axis = kwargs.pop("model_set_axis", None)
if model_set_axis is None:
if n_models is not None and n_models > 1:
# Default to zero
model_set_axis = 0
else:
# Otherwise disable
model_set_axis = False
else:
if not (
model_set_axis is False
or np.issubdtype(type(model_set_axis), np.integer)
):
raise ValueError(
"model_set_axis must be either False or an integer "
"specifying the parameter array axis to map to each "
f"model in a set of models (got {model_set_axis!r})."
)
# Process positional arguments by matching them up with the
# corresponding parameters in self.param_names--if any also appear as
# keyword arguments this presents a conflict
params = set()
if len(args) > len(self.param_names):
raise TypeError(
f"{self.__class__.__name__}.__init__() takes at most "
f"{len(self.param_names)} positional arguments ({len(args)} given)"
)
self._model_set_axis = model_set_axis
self._param_metrics = defaultdict(dict)
for idx, arg in enumerate(args):
if arg is None:
# A value of None implies using the default value, if exists
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
param_name = self.param_names[idx]
params.add(param_name)
if not isinstance(arg, Parameter):
value = quantity_asanyarray(arg, dtype=float)
else:
value = arg
self._initialize_parameter_value(param_name, value)
# At this point the only remaining keyword arguments should be
# parameter names; any others are in error.
for param_name in self.param_names:
if param_name in kwargs:
if param_name in params:
raise TypeError(
f"{self.__class__.__name__}.__init__() got multiple values for"
f" parameter {param_name!r}"
)
value = kwargs.pop(param_name)
if value is None:
continue
# We use quantity_asanyarray here instead of np.asanyarray
# because if any of the arguments are quantities, we need
# to return a Quantity object not a plain Numpy array.
value = quantity_asanyarray(value, dtype=float)
params.add(param_name)
self._initialize_parameter_value(param_name, value)
# Now deal with case where param_name is not supplied by args or kwargs
for param_name in self.param_names:
if param_name not in params:
self._initialize_parameter_value(param_name, None)
if kwargs:
# If any keyword arguments were left over at this point they are
# invalid--the base class should only be passed the parameter
# values, constraints, and param_dim
for kwarg in kwargs:
# Just raise an error on the first unrecognized argument
raise TypeError(
f"{self.__class__.__name__}.__init__() got an unrecognized"
f" parameter {kwarg!r}"
)
# Determine the number of model sets: If the model_set_axis is
# None then there is just one parameter set; otherwise it is determined
# by the size of that axis on the first parameter--if the other
# parameters don't have the right number of axes or the sizes of their
# model_set_axis don't match an error is raised
if model_set_axis is not False and n_models != 1 and params:
max_ndim = 0
if model_set_axis < 0:
min_ndim = abs(model_set_axis)
else:
min_ndim = model_set_axis + 1
for name in self.param_names:
value = getattr(self, name)
param_ndim = np.ndim(value)
if param_ndim < min_ndim:
raise InputParameterError(
"All parameter values must be arrays of dimension at least"
f" {min_ndim} for model_set_axis={model_set_axis} (the value"
f" given for {name!r} is only {param_ndim}-dimensional)"
)
max_ndim = max(max_ndim, param_ndim)
if n_models is None:
# Use the dimensions of the first parameter to determine
# the number of model sets
n_models = value.shape[model_set_axis]
elif value.shape[model_set_axis] != n_models:
raise InputParameterError(
f"Inconsistent dimensions for parameter {name!r} for"
f" {n_models} model sets. The length of axis"
f" {model_set_axis} must be the same for all input parameter"
" values"
)
self._check_param_broadcast(max_ndim)
else:
if n_models is None:
n_models = 1
self._check_param_broadcast(None)
self._n_models = n_models
# now validate parameters
for name in params:
param = getattr(self, name)
if param._validator is not None:
param._validator(self, param.value)
def _initialize_parameter_value(self, param_name, value):
"""Mostly deals with consistency checks and determining unit issues."""
if isinstance(value, Parameter):
self.__dict__[param_name] = value
return
param = getattr(self, param_name)
# Use default if value is not provided
if value is None:
default = param.default
if default is None:
# No value was supplied for the parameter and the
# parameter does not have a default, therefore the model
# is underspecified
raise TypeError(
f"{self.__class__.__name__}.__init__() requires a value for "
f"parameter {param_name!r}"
)
value = default
unit = param.unit
else:
if isinstance(value, Quantity):
unit = value.unit
value = value.value
else:
unit = None
if unit is None and param.unit is not None:
raise InputParameterError(
f"{self.__class__.__name__}.__init__() requires a Quantity for"
f" parameter {param_name!r}"
)
param._unit = unit
param._set_unit(unit, force=True)
param.internal_unit = None
if param._setter is not None:
if unit is not None:
_val = param._setter(value * unit)
else:
_val = param._setter(value)
if isinstance(_val, Quantity):
param.internal_unit = _val.unit
param._internal_value = np.array(_val.value)
else:
param.internal_unit = None
param._internal_value = np.array(_val)
else:
param._value = np.array(value)
def _initialize_slices(self):
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name]["slice"] = param_slice
param_metrics[name]["shape"] = param_shape
param_metrics[name]["size"] = param_size
total_size += param_size
self._parameters = np.empty(total_size, dtype=np.float64)
def _parameters_to_array(self):
# Now set the parameter values (this will also fill
# self._parameters)
param_metrics = self._param_metrics
for name in self.param_names:
param = getattr(self, name)
value = param.value
if not isinstance(value, np.ndarray):
value = np.array([value])
self._parameters[param_metrics[name]["slice"]] = value.ravel()
# Finally validate all the parameters; we do this last so that
# validators that depend on one of the other parameters' values will
# work
def _array_to_parameters(self):
param_metrics = self._param_metrics
for name in self.param_names:
param = getattr(self, name)
value = self._parameters[param_metrics[name]["slice"]]
value.shape = param_metrics[name]["shape"]
param.value = value
def _check_param_broadcast(self, max_ndim):
"""
This subroutine checks that all parameter arrays can be broadcast
against each other, and determines the shapes parameters must have in
order to broadcast correctly.
If model_set_axis is None this merely checks that the parameters
broadcast and returns an empty dict if so. This mode is only used for
single model sets.
"""
all_shapes = []
model_set_axis = self._model_set_axis
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_shape = np.shape(value)
param_ndim = len(param_shape)
if max_ndim is not None and param_ndim < max_ndim:
# All arrays have the same number of dimensions up to the
# model_set_axis dimension, but after that they may have a
# different number of trailing axes. The number of trailing
# axes must be extended for mutual compatibility. For example
# if max_ndim = 3 and model_set_axis = 0, an array with the
# shape (2, 2) must be extended to (2, 1, 2). However, an
# array with shape (2,) is extended to (2, 1).
new_axes = (1,) * (max_ndim - param_ndim)
if model_set_axis < 0:
# Just need to prepend axes to make up the difference
broadcast_shape = new_axes + param_shape
else:
broadcast_shape = (
param_shape[: model_set_axis + 1]
+ new_axes
+ param_shape[model_set_axis + 1 :]
)
self._param_metrics[name]["broadcast_shape"] = broadcast_shape
all_shapes.append(broadcast_shape)
else:
all_shapes.append(param_shape)
# Now check mutual broadcastability of all shapes
try:
np.broadcast_shapes(*all_shapes)
except ValueError as exc:
base_message = (
"All parameter arrays "
"must have shapes that are mutually compatible according "
"to the broadcasting rules."
)
raise InputParameterError(f"{base_message} {repr(exc)}") from None
def _param_sets(self, raw=False, units=False):
"""
Implementation of the Model.param_sets property.
This internal implementation has a ``raw`` argument which controls
whether or not to return the raw parameter values (i.e. the values that
are actually stored in the ._parameters array, as opposed to the values
displayed to users. In most cases these are one in the same but there
are currently a few exceptions.
Note: This is notably an overcomplicated device and may be removed
entirely in the near future.
"""
values = []
shapes = []
for name in self.param_names:
param = getattr(self, name)
if raw and param._setter:
value = param._internal_value
else:
value = param.value
broadcast_shape = self._param_metrics[name].get("broadcast_shape")
if broadcast_shape is not None:
value = value.reshape(broadcast_shape)
shapes.append(np.shape(value))
if len(self) == 1:
# Add a single param set axis to the parameter's value (thus
# converting scalars to shape (1,) array values) for
# consistency
value = np.array([value])
if units:
if raw and param.internal_unit is not None:
unit = param.internal_unit
else:
unit = param.unit
if unit is not None:
value = Quantity(value, unit, subok=True)
values.append(value)
if len(set(shapes)) != 1 or units:
# If the parameters are not all the same shape, converting to an
# array is going to produce an object array
# However the way Numpy creates object arrays is tricky in that it
# will recurse into array objects in the list and break them up
# into separate objects. Doing things this way ensures a 1-D
# object array the elements of which are the individual parameter
# arrays. There's not much reason to do this over returning a list
# except for consistency
psets = np.empty(len(values), dtype=object)
psets[:] = values
return psets
return np.array(values)
def _format_repr(self, args=[], kwargs={}, defaults={}):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
parts = [repr(a) for a in args]
parts.extend(
f"{name}={param_repr_oneline(getattr(self, name))}"
for name in self.param_names
)
if self.name is not None:
parts.append(f"name={self.name!r}")
for kwarg, value in kwargs.items():
if kwarg in defaults and defaults[kwarg] == value:
continue
parts.append(f"{kwarg}={value!r}")
if len(self) > 1:
parts.append(f"n_models={len(self)}")
return f"<{self.__class__.__name__}({', '.join(parts)})>"
def _format_str(self, keywords=[], defaults={}):
"""
Internal implementation of ``__str__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__str__`` while keeping the same basic
formatting.
"""
default_keywords = [
("Model", self.__class__.__name__),
("Name", self.name),
("Inputs", self.inputs),
("Outputs", self.outputs),
("Model set size", len(self)),
]
parts = [
f"{keyword}: {value}"
for keyword, value in default_keywords
if value is not None
]
for keyword, value in keywords:
if keyword.lower() in defaults and defaults[keyword.lower()] == value:
continue
parts.append(f"{keyword}: {value}")
parts.append("Parameters:")
if len(self) == 1:
columns = [[getattr(self, name).value] for name in self.param_names]
else:
columns = [getattr(self, name).value for name in self.param_names]
if columns:
param_table = Table(columns, names=self.param_names)
# Set units on the columns
for name in self.param_names:
param_table[name].unit = getattr(self, name).unit
parts.append(indent(str(param_table), 4 * " "))
return "\n".join(parts)
| Model |
python | pydata__xarray | xarray/core/types.py | {
"start": 10810,
"end": 11647
} | class ____(BaseBuffer, Protocol[AnyStr_co]):
def read(self, n: int = ..., /) -> AnyStr_co:
# for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File
...
QuantileMethods = Literal[
"inverted_cdf",
"averaged_inverted_cdf",
"closest_observation",
"interpolated_inverted_cdf",
"hazen",
"weibull",
"linear",
"median_unbiased",
"normal_unbiased",
"lower",
"higher",
"midpoint",
"nearest",
]
NetcdfWriteModes = Literal["w", "a"]
ZarrWriteModes = Literal["w", "w-", "a", "a-", "r+", "r"]
GroupKey = Any
GroupIndex = Union[slice, list[int]]
GroupIndices = tuple[GroupIndex, ...]
Bins = Union[
int, Sequence[int], Sequence[float], Sequence[pd.Timestamp], np.ndarray, pd.Index
]
ResampleCompatible: TypeAlias = str | datetime.timedelta | pd.Timedelta | pd.DateOffset
| ReadBuffer |
python | getsentry__sentry | src/sentry/services/eventstore/models.py | {
"start": 21528,
"end": 25369
} | class ____(BaseEvent):
def __init__(
self,
project_id: int,
event_id: str,
group_id: int | None = None,
data: Mapping[str, Any] | None = None,
snuba_data: Mapping[str, Any] | None = None,
groups: Sequence[Group] | None = None,
):
super().__init__(project_id, event_id, snuba_data=snuba_data)
self.group_id = group_id
self.groups = groups
self.data = data
def __getstate__(self) -> Mapping[str, Any]:
state = super().__getstate__()
state.pop("_group_cache", None)
state.pop("_groups_cache", None)
return state
def __repr__(self) -> str:
return "<sentry.services.eventstore.models.Event at 0x{:x}: event_id={}>".format(
id(self), self.event_id
)
@property
def data(self) -> NodeData:
return self._data
@data.setter
def data(self, value: Mapping[str, Any]) -> None:
node_id = Event.generate_node_id(self.project_id, self.event_id)
self._data = NodeData(
node_id, data=value, wrapper=EventDict, ref_version=2, ref_func=ref_func
)
@property
def group_id(self) -> int | None:
# TODO: `group_id` and `group` are deprecated properties on `Event`. We will remove them
# going forward. Since events may now be associated with multiple `Group` models, we will
# require `GroupEvent` to be passed around. The `group_events` property should be used to
# iterate through all `Groups` associated with an `Event`
if self._group_id:
return self._group_id
column = self._get_column_name(Columns.GROUP_ID)
return self._snuba_data.get(column)
@group_id.setter
def group_id(self, value: int | None) -> None:
self._group_id = value
# TODO: We need a better way to cache these properties. functools
# doesn't quite do the trick as there is a reference bug with unsaved
# models. But the current _group_cache thing is also clunky because these
# properties need to be stripped out in __getstate__.
@property
def group(self) -> Group | None:
from sentry.models.group import Group
if not self.group_id:
return None
if not hasattr(self, "_group_cache"):
self._group_cache = Group.objects.get(id=self.group_id)
return self._group_cache
@group.setter
def group(self, group: Group) -> None:
self.group_id = group.id
self._group_cache = group
_groups_cache: Sequence[Group]
@property
def groups(self) -> Sequence[Group]:
from sentry.models.group import Group
if getattr(self, "_groups_cache"):
return self._groups_cache
if self._group_ids is not None:
group_ids = self._group_ids
else:
snuba_group_id = self.group_id
# TODO: Replace `snuba_group_id` with this once we deprecate `group_id`.
# snuba_group_id = self._snuba_data.get(self._get_column_name(Columns.GROUP_ID))
snuba_group_ids = self._snuba_data.get(self._get_column_name(Columns.GROUP_IDS))
group_ids = []
if snuba_group_id:
group_ids.append(snuba_group_id)
if snuba_group_ids:
group_ids.extend(snuba_group_ids)
if group_ids:
groups = list(Group.objects.filter(id__in=group_ids))
else:
groups = []
self._groups_cache = groups
return groups
@groups.setter
def groups(self, values: Sequence[Group] | None):
self._groups_cache = values
self._group_ids = [group.id for group in values] if values else None
def for_group(self, group: Group) -> GroupEvent:
return GroupEvent.from_event(self, group)
| Event |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 203232,
"end": 203332
} | class ____(
_DateMultiRangeTests, _MultiRangeTypeRoundTrip
):
pass
| DateMultiRangeRoundTripTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.