language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kamyu104__LeetCode-Solutions | Python/groups-of-strings.py | {
"start": 39,
"end": 873
} | class ____(object): # Time: O(n * alpha(n)), Space: O(n)
def __init__(self, n):
self.set = range(n)
self.rank = [0]*n
self.size = [1]*n
self.total = n
def find_set(self, x):
stk = []
while self.set[x] != x: # path compression
stk.append(x)
x = self.set[x]
while stk:
self.set[stk.pop()] = x
return x
def union_set(self, x, y):
x, y = self.find_set(x), self.find_set(y)
if x == y:
return False
if self.rank[x] > self.rank[y]: # union by rank
x, y = y, x
self.set[x] = self.set[y]
if self.rank[x] == self.rank[y]:
self.rank[y] += 1
self.size[y] += self.size[x]
self.total -= 1
return True
# bitmasks, union find
| UnionFind |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 868313,
"end": 868719
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field(PullRequestChangedFile, graphql_name="node")
"""The item at the end of the edge."""
| PullRequestChangedFileEdge |
python | django__django | tests/admin_filters/tests.py | {
"start": 8404,
"end": 8533
} | class ____(ModelAdmin):
list_filter = (DecadeListFilterWithQuerysetBasedLookups,)
| DecadeFilterBookAdminWithQuerysetBasedLookups |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-alibabacloud-opensearch/llama_index/vector_stores/alibabacloud_opensearch/base.py | {
"start": 2300,
"end": 5487
} | class ____:
"""
`Alibaba Cloud Opensearch` client configuration.
Attribute:
endpoint (str) : The endpoint of opensearch instance, You can find it
from the console of Alibaba Cloud OpenSearch.
instance_id (str) : The identify of opensearch instance, You can find
it from the console of Alibaba Cloud OpenSearch.
username (str) : The username specified when purchasing the instance.
password (str) : The password specified when purchasing the instance,
After the instance is created, you can modify it on the console.
tablename (str): The table name specified during instance configuration.
namespace (str) : The instance data will be partitioned based on the "namespace"
field. If the namespace is enabled, you need to specify the namespace field
name during initialization, Otherwise, the queries cannot be executed
correctly.
field_mapping (dict[str, str]): The field mapping between llamaindex meta field
and OpenSearch table filed name. OpenSearch has some rules for the field name,
when the meta field name break the rules, can map to another name.
output_fields (list[str]): Specify the field list returned when searching OpenSearch.
id_field (str): The primary key field name in OpenSearch, default is `id`.
embedding_field (list[float]): The field name which stored the embedding.
text_field: The name of the field that stores the key text.
search_config (dict, optional): The configuration used for searching the OpenSearch.
"""
def __init__(
self,
endpoint: str,
instance_id: str,
username: str,
password: str,
table_name: str,
namespace: str = "",
field_mapping: Dict[str, str] = None,
output_fields: Optional[List[str]] = None,
id_field: str = "id",
embedding_field: str = DEFAULT_EMBEDDING_KEY,
text_field: str = DEFAULT_TEXT_KEY,
search_config: dict = None,
) -> None:
self.endpoint = endpoint
self.instance_id = instance_id
self.username = username
self.password = password
self.namespace = namespace
self.table_name = table_name
self.data_source_name = f"{self.instance_id}_{self.table_name}"
self.field_mapping = field_mapping
self.id_field = id_field
self.embedding_field = embedding_field
self.text_field = text_field
self.search_config = search_config
self.output_fields = output_fields
if self.output_fields is None:
self.output_fields = (
list(self.field_mapping.values()) if self.field_mapping else []
)
if self.text_field not in self.output_fields:
self.output_fields.append(self.text_field)
self.inverse_field_mapping: Dict[str, str] = (
{value: key for key, value in self.field_mapping.items()}
if self.field_mapping
else {}
)
def __getitem__(self, item: str) -> Any:
return getattr(self, item)
| AlibabaCloudOpenSearchConfig |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/slots2.py | {
"start": 186,
"end": 423
} | class ____(NoSlots1):
__slots__ = "aaa", "bbb", "ccc"
# This should generate an error
aaa = 3
# This should generate an error
bbb: int = 3
# This should generate an error
(ccc, ddd) = 3, 4
eee = 5
| Slots1 |
python | kamyu104__LeetCode-Solutions | Python/range-sum-query-2d-immutable.py | {
"start": 68,
"end": 1021
} | class ____(object):
def __init__(self, matrix):
"""
initialize your data structure here.
:type matrix: List[List[int]]
"""
if not matrix:
return
m, n = len(matrix), len(matrix[0])
self.__sums = [[0 for _ in xrange(n+1)] for _ in xrange(m+1)]
for i in xrange(1, m+1):
for j in xrange(1, n+1):
self.__sums[i][j] = self.__sums[i][j-1] + self.__sums[i-1][j] - \
self.__sums[i-1][j-1] + matrix[i-1][j-1]
def sumRegion(self, row1, col1, row2, col2):
"""
sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
"""
return self.__sums[row2+1][col2+1] - self.__sums[row2+1][col1] - \
self.__sums[row1][col2+1] + self.__sums[row1][col1]
| NumMatrix |
python | huggingface__transformers | src/transformers/models/moonshine/modeling_moonshine.py | {
"start": 2857,
"end": 3519
} | class ____(nn.Module):
def __init__(self, config, hidden_act):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size * 2)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states, gate = hidden_states.chunk(2, dim=-1)
hidden_states = self.activation_fn(gate) * hidden_states
hidden_states = self.fc2(hidden_states)
return hidden_states
| MoonshineDecoderMLP |
python | python-pillow__Pillow | src/PIL/ImageShow.py | {
"start": 5675,
"end": 6061
} | class ____(abc.ABC, Viewer):
format = "PNG"
options = {"compress_level": 1, "save_all": True}
@abc.abstractmethod
def get_command_ex(self, file: str, **options: Any) -> tuple[str, str]:
pass
def get_command(self, file: str, **options: Any) -> str:
command = self.get_command_ex(file, **options)[0]
return f"{command} {quote(file)}"
| UnixViewer |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/types.py | {
"start": 12214,
"end": 12888
} | class ____(_IntegerType, sqltypes.SMALLINT):
"""MySQL SMALLINTEGER type."""
__visit_name__ = "SMALLINT"
def __init__(self, display_width: Optional[int] = None, **kw: Any):
"""Construct a SMALLINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super().__init__(display_width=display_width, **kw)
| SMALLINT |
python | python__mypy | mypy/mixedtraverser.py | {
"start": 489,
"end": 3821
} | class ____(TraverserVisitor, TypeTraverserVisitor):
"""Recursive traversal of both Node and Type objects."""
def __init__(self) -> None:
self.in_type_alias_expr = False
# Symbol nodes
def visit_var(self, var: Var, /) -> None:
self.visit_optional_type(var.type)
def visit_func(self, o: FuncItem, /) -> None:
super().visit_func(o)
self.visit_optional_type(o.type)
def visit_class_def(self, o: ClassDef, /) -> None:
# TODO: Should we visit generated methods/variables as well, either here or in
# TraverserVisitor?
super().visit_class_def(o)
info = o.info
if info:
for base in info.bases:
base.accept(self)
if info.special_alias:
info.special_alias.accept(self)
def visit_type_alias_expr(self, o: TypeAliasExpr, /) -> None:
super().visit_type_alias_expr(o)
o.node.accept(self)
def visit_type_var_expr(self, o: TypeVarExpr, /) -> None:
super().visit_type_var_expr(o)
o.upper_bound.accept(self)
for value in o.values:
value.accept(self)
def visit_typeddict_expr(self, o: TypedDictExpr, /) -> None:
super().visit_typeddict_expr(o)
self.visit_optional_type(o.info.typeddict_type)
def visit_namedtuple_expr(self, o: NamedTupleExpr, /) -> None:
super().visit_namedtuple_expr(o)
assert o.info.tuple_type
o.info.tuple_type.accept(self)
def visit__promote_expr(self, o: PromoteExpr, /) -> None:
super().visit__promote_expr(o)
o.type.accept(self)
def visit_newtype_expr(self, o: NewTypeExpr, /) -> None:
super().visit_newtype_expr(o)
self.visit_optional_type(o.old_type)
# Statements
def visit_assignment_stmt(self, o: AssignmentStmt, /) -> None:
super().visit_assignment_stmt(o)
self.visit_optional_type(o.type)
def visit_type_alias_stmt(self, o: TypeAliasStmt, /) -> None:
super().visit_type_alias_stmt(o)
if o.alias_node is not None:
o.alias_node.accept(self)
def visit_type_alias(self, o: TypeAlias, /) -> None:
super().visit_type_alias(o)
self.in_type_alias_expr = True
o.target.accept(self)
self.in_type_alias_expr = False
def visit_for_stmt(self, o: ForStmt, /) -> None:
super().visit_for_stmt(o)
self.visit_optional_type(o.index_type)
def visit_with_stmt(self, o: WithStmt, /) -> None:
super().visit_with_stmt(o)
for typ in o.analyzed_types:
typ.accept(self)
# Expressions
def visit_cast_expr(self, o: CastExpr, /) -> None:
super().visit_cast_expr(o)
o.type.accept(self)
def visit_type_form_expr(self, o: TypeFormExpr, /) -> None:
super().visit_type_form_expr(o)
o.type.accept(self)
def visit_assert_type_expr(self, o: AssertTypeExpr, /) -> None:
super().visit_assert_type_expr(o)
o.type.accept(self)
def visit_type_application(self, o: TypeApplication, /) -> None:
super().visit_type_application(o)
for t in o.types:
t.accept(self)
# Helpers
def visit_optional_type(self, t: Type | None, /) -> None:
if t:
t.accept(self)
| MixedTraverserVisitor |
python | apache__airflow | devel-common/src/sphinx_exts/providers_commits.py | {
"start": 1544,
"end": 8742
} | class ____(NamedTuple):
"""Stores details about commits"""
full_hash: str
short_hash: str
date: str
version: str
message: str
message_without_backticks: str
pr: str | None
def get_provider_root_path(provider_id: str) -> Path:
return Path("providers") / provider_id.replace(".", "/")
def _get_version_tag(version: str, provider_id: str):
return f"providers-{provider_id.replace('.', '-')}/{version}"
def _get_possible_old_provider_paths(provider_id: str) -> list[Path]:
# This is used to get historical commits for the provider
paths: list[Path] = [
AIRFLOW_ORIGINAL_PROVIDERS_DIR.joinpath(*provider_id.split(".")).relative_to(AIRFLOW_ROOT_PATH),
PREVIOUS_AIRFLOW_PROVIDERS_NS_PACKAGE_PATH.joinpath(*provider_id.split(".")).relative_to(
AIRFLOW_ROOT_PATH
),
(DOCS_ROOT / f"apache-airflow-providers-{provider_id.replace('.', '-')}").relative_to(
AIRFLOW_ROOT_PATH
),
]
if provider_id == "edge3":
paths.append(get_provider_root_path("edge"))
paths.append(get_provider_root_path("edgeexecutor"))
return paths
def _get_git_log_command(
folder_paths: list[Path] | None = None, from_commit: str | None = None, to_commit: str | None = None
) -> list[str]:
"""Get git command to run for the current repo from the current folder.
The current directory should always be the package folder.
:param folder_paths: list of folder paths to check for changes
:param from_commit: if present - base commit from which to start the log from
:param to_commit: if present - final commit which should be the start of the log
:return: git command to run
"""
git_cmd = [
"git",
"log",
"--pretty=format:%H %h %cd %s",
"--date=short",
]
if from_commit and to_commit:
git_cmd.append(f"{from_commit}...{to_commit}")
elif from_commit:
git_cmd.append(from_commit)
elif to_commit:
raise ValueError("It makes no sense to specify to_commit without from_commit.")
folders = [folder_path.as_posix() for folder_path in folder_paths] if folder_paths else ["."]
git_cmd.extend(["--", *folders])
return git_cmd
def _get_change_from_line(line: str, version: str) -> Change:
split_line = line.split(" ", maxsplit=3)
message = split_line[3]
pr = None
pr_match = PR_PATTERN.match(message)
if pr_match:
pr = pr_match.group(1)
return Change(
full_hash=split_line[0],
short_hash=split_line[1],
date=split_line[2],
version=version,
message=message,
message_without_backticks=message.replace("`", "'").replace("&39;", "'"),
pr=pr,
)
def _convert_git_changes_to_table(
version: str, changes: str, base_url: str, markdown: bool = True
) -> tuple[str, list[Change]]:
"""
Converts list of changes from its string form to markdown/RST table and array of change information
The changes are in the form of multiple lines where each line consists of:
FULL_COMMIT_HASH SHORT_COMMIT_HASH COMMIT_DATE COMMIT_SUBJECT
The subject can contain spaces but one of the preceding values can, so we can make split
3 times on spaces to break it up.
:param version: Version from which the changes are
:param changes: list of changes in a form of multiple-line string
:param base_url: base url for the commit URL
:param markdown: if True, Markdown format is used else rst
:return: formatted table + list of changes (starting from the latest)
"""
from tabulate import tabulate
lines = changes.splitlines()
headers = ["Commit", "Committed", "Subject"]
table_data = []
changes_list: list[Change] = []
for line in lines:
if line == "":
continue
change = _get_change_from_line(line, version)
table_data.append(
(
f"[{change.short_hash}]({base_url}{change.full_hash})"
if markdown
else f"`{change.short_hash} <{base_url}{change.full_hash}>`__",
change.date,
f"`{change.message_without_backticks}`"
if markdown
else f"``{change.message_without_backticks}``",
)
)
changes_list.append(change)
header = ""
if not table_data:
return header, []
table = tabulate(table_data, headers=headers, tablefmt="pipe" if markdown else "rst")
if not markdown:
header += f"\n\n{version}\n" + "." * len(version) + "\n\n"
release_date = table_data[0][1]
header += f"Latest change: {release_date}\n\n"
return header + table, changes_list
def _get_all_changes_for_package_as_rst(
provider_id: str,
) -> str:
provider_root_path = AIRFLOW_PROVIDERS_ROOT_PATH / provider_id.replace(".", "/")
provider_yaml_file = provider_root_path / "provider.yaml"
provider_yaml_dict = yaml.safe_load(provider_yaml_file.read_text())
providers_folder_paths_for_git_commit_retrieval = [
get_provider_root_path(provider_id),
*_get_possible_old_provider_paths(provider_id),
]
changes_table = ""
current_version = provider_yaml_dict["versions"][0]
next_version_tag = _get_version_tag(current_version, provider_id)
result = run_command(["git", "rev-parse", next_version_tag], check=False)
if result.returncode != 0:
next_version_tag = "HEAD"
for version in provider_yaml_dict["versions"][1:]:
version_tag = _get_version_tag(version, provider_id)
log_command = _get_git_log_command(
providers_folder_paths_for_git_commit_retrieval, next_version_tag, version_tag
)
result = run_command(log_command)
changes = result.stdout.strip()
changes_table_for_version, array_of_changes_for_version = _convert_git_changes_to_table(
current_version, changes, base_url="https://github.com/apache/airflow/commit/", markdown=False
)
changes_table += changes_table_for_version
next_version_tag = version_tag
current_version = version
log_command = _get_git_log_command(providers_folder_paths_for_git_commit_retrieval, next_version_tag)
result = run_command(log_command)
changes = result.stdout.strip()
changes_table_for_version, array_of_changes_for_version = _convert_git_changes_to_table(
current_version, changes, base_url="https://github.com/apache/airflow/commit/", markdown=False
)
changes_table += changes_table_for_version
return changes_table
def run_command(log_command: list[str], check: bool = True) -> subprocess.CompletedProcess:
result = subprocess.run(
log_command,
cwd=AIRFLOW_ROOT_PATH,
capture_output=True,
text=True,
check=False,
)
if result.returncode != 0:
quoted_command = " ".join([shlex.quote(c) for c in log_command])
print(f"ERROR!!! Failed to run git command: `{quoted_command}`\n")
print(result.stdout)
print(result.stderr)
if check:
raise RuntimeError("Failed to run git log command")
return result
| Change |
python | weaviate__weaviate-python-client | weaviate/collections/queries/near_vector/generate/executor.py | {
"start": 1013,
"end": 19958
} | class ____(
Generic[ConnectionType, Properties, References], _BaseExecutor[ConnectionType]
):
@overload
def near_vector(
self,
near_vector: NearVectorInputType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Literal[None] = None,
) -> executor.Result[GenerativeReturn[Properties, References]]: ...
@overload
def near_vector(
self,
near_vector: NearVectorInputType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: REFERENCES,
) -> executor.Result[GenerativeReturn[Properties, CrossReferences]]: ...
@overload
def near_vector(
self,
near_vector: NearVectorInputType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Type[TReferences],
) -> executor.Result[GenerativeReturn[Properties, TReferences]]: ...
@overload
def near_vector(
self,
near_vector: NearVectorInputType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Literal[None] = None,
) -> executor.Result[GenerativeReturn[TProperties, References]]: ...
@overload
def near_vector(
self,
near_vector: NearVectorInputType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: REFERENCES,
) -> executor.Result[GenerativeReturn[TProperties, CrossReferences]]: ...
@overload
def near_vector(
self,
near_vector: NearVectorInputType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Type[TReferences],
) -> executor.Result[GenerativeReturn[TProperties, TReferences]]: ...
### GroupBy ###
@overload
def near_vector(
self,
near_vector: NearVectorInputType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Literal[None] = None,
) -> executor.Result[GenerativeGroupByReturn[Properties, References]]: ...
@overload
def near_vector(
self,
near_vector: NearVectorInputType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: REFERENCES,
) -> executor.Result[GenerativeGroupByReturn[Properties, CrossReferences]]: ...
@overload
def near_vector(
self,
near_vector: NearVectorInputType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Type[TReferences],
) -> executor.Result[GenerativeGroupByReturn[Properties, TReferences]]: ...
@overload
def near_vector(
self,
near_vector: NearVectorInputType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Literal[None] = None,
) -> executor.Result[GenerativeGroupByReturn[TProperties, References]]: ...
@overload
def near_vector(
self,
near_vector: NearVectorInputType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: REFERENCES,
) -> executor.Result[GenerativeGroupByReturn[TProperties, CrossReferences]]: ...
@overload
def near_vector(
self,
near_vector: NearVectorInputType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Type[TReferences],
) -> executor.Result[GenerativeGroupByReturn[TProperties, TReferences]]: ...
### DEFAULT ###
@overload
def near_vector(
self,
near_vector: NearVectorInputType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Optional[GroupBy] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Optional[ReturnProperties[TProperties]] = None,
return_references: Optional[ReturnReferences[TReferences]] = None,
) -> executor.Result[
GenerativeSearchReturnType[Properties, References, TProperties, TReferences]
]: ...
def near_vector(
self,
near_vector: NearVectorInputType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Optional[GroupBy] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Optional[ReturnProperties[TProperties]] = None,
return_references: Optional[ReturnReferences[TReferences]] = None,
) -> executor.Result[
GenerativeSearchReturnType[Properties, References, TProperties, TReferences]
]:
"""Perform retrieval-augmented generation (RaG) on the results of a by-vector object search in this collection using vector-based similarity search.
See the [docs](https://weaviate.io/developers/weaviate/search/similarity) for a more detailed explanation.
Args:
near_vector: The vector to search on, REQUIRED. This can be a base64 encoded string of the binary, a path to the file, or a file-like object.
certainty: The minimum similarity score to return. If not specified, the default certainty specified by the server is used.
distance: The maximum distance to search. If not specified, the default distance specified by the server is used.
limit: The maximum number of results to return. If not specified, the default limit specified by the server is returned.
offset: The offset to start from. If not specified, the retrieval begins from the first object in the server.
auto_limit: The maximum number of [autocut](https://weaviate.io/developers/weaviate/api/graphql/additional-operators#autocut) results to return. If not specified, no limit is applied.
filters: The filters to apply to the search.
group_by: How the results should be grouped by a specific property.
rerank: How the results should be reranked. NOTE: A `rerank-*` module must be enabled for this functionality to work.
target_vector: The name of the vector space to search in for named vector configurations. Required if multiple spaces are configured.
include_vector: Whether to include the vector in the results. If not specified, this is set to False.
return_metadata: The metadata to return for each object, defaults to `None`.
return_properties: The properties to return for each object.
return_references: The references to return for each object.
NOTE:
- If `return_properties` is not provided then all properties are returned except for blob properties.
- If `return_metadata` is not provided then no metadata is provided. Use MetadataQuery.full() to retrieve all metadata.
- If `return_references` is not provided then no references are provided.
Returns:
A `GenerativeReturn` or `GenerativeGroupByReturn` object that includes the searched objects.
If `group_by` is provided then a `GenerativeGroupByReturn` object is returned, otherwise a `GenerativeReturn` object is returned.
Raises:
weaviate.exceptions.WeaviateGRPCQueryError: If the request to the Weaviate server fails.
"""
def resp(
res: search_get_pb2.SearchReply,
) -> GenerativeSearchReturnType[Properties, References, TProperties, TReferences]:
return cast(
Any,
self._result_to_generative_return(
res,
_QueryOptions.from_input(
return_metadata,
return_properties,
include_vector,
self._references,
return_references,
rerank,
group_by,
),
),
)
request = self._query.near_vector(
near_vector=near_vector,
certainty=certainty,
distance=distance,
filters=filters,
group_by=_GroupBy.from_input(group_by),
generative=_Generative(
single=single_prompt,
grouped=grouped_task,
grouped_properties=grouped_properties,
generative_provider=generative_provider,
),
limit=limit,
offset=offset,
autocut=auto_limit,
rerank=rerank,
target_vector=target_vector,
return_metadata=self._parse_return_metadata(return_metadata, include_vector),
return_properties=self._parse_return_properties(return_properties),
return_references=self._parse_return_references(return_references),
)
return executor.execute(
response_callback=resp,
method=self._connection.grpc_search,
request=request,
)
| _NearVectorGenerateExecutor |
python | python__mypy | mypy/nodes.py | {
"start": 100165,
"end": 100539
} | class ____(Expression):
"""Await expression (await ...)."""
__slots__ = ("expr",)
__match_args__ = ("expr",)
expr: Expression
def __init__(self, expr: Expression) -> None:
super().__init__()
self.expr = expr
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_await_expr(self)
# Constants
| AwaitExpr |
python | pandas-dev__pandas | asv_bench/benchmarks/multiindex_object.py | {
"start": 3917,
"end": 4324
} | class ____:
def setup(self):
n, k = 200, 5000
levels = [
np.arange(n),
Index([f"i-{i}" for i in range(n)], dtype=object).values,
1000 + np.arange(n),
]
codes = [np.random.choice(n, (k * n)) for lev in levels]
self.mi = MultiIndex(levels=levels, codes=codes)
def time_duplicated(self):
self.mi.duplicated()
| Duplicated |
python | pytorch__pytorch | torch/optim/optimizer.py | {
"start": 13661,
"end": 51029
} | class ____:
r"""Base class for all optimizers.
.. warning::
Parameters need to be specified as collections that have a deterministic
ordering that is consistent between runs. Examples of objects that don't
satisfy those properties are sets and iterators over values of dictionaries.
Args:
params (iterable): an iterable of :class:`torch.Tensor` s or
:class:`dict` s. Specifies what Tensors should be optimized.
defaults: (dict): a dict containing default values of optimization
options (used when a parameter group doesn't specify them).
"""
OptimizerPreHook: TypeAlias = Callable[
[Self, Args, Kwargs], # type: ignore[misc]
Optional[tuple[Args, Kwargs]],
]
OptimizerPostHook: TypeAlias = Callable[[Self, Args, Kwargs], None] # type: ignore[misc]
_optimizer_step_pre_hooks: dict[int, OptimizerPreHook]
_optimizer_step_post_hooks: dict[int, OptimizerPostHook]
# pyrefly: ignore [not-a-type]
_optimizer_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]'
_optimizer_state_dict_post_hooks: (
# pyrefly: ignore [not-a-type]
'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]'
)
_optimizer_load_state_dict_pre_hooks: (
# pyrefly: ignore [not-a-type]
'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]'
)
_optimizer_load_state_dict_post_hooks: (
# pyrefly: ignore [not-a-type]
'OrderedDict[int, Callable[["Optimizer"], None]]'
)
def __init__(self, params: ParamsT, defaults: dict[str, Any]) -> None: # noqa: D107
torch._C._log_api_usage_once("python.optimizer")
self.defaults = defaults
self._optimizer_step_pre_hooks = OrderedDict()
self._optimizer_step_post_hooks = OrderedDict()
self._optimizer_state_dict_pre_hooks = OrderedDict()
self._optimizer_state_dict_post_hooks = OrderedDict()
self._optimizer_load_state_dict_pre_hooks = OrderedDict()
self._optimizer_load_state_dict_post_hooks = OrderedDict()
self._patch_step_function()
if isinstance(params, torch.Tensor):
raise TypeError(
"params argument given to the optimizer should be "
"an iterable of Tensors or dicts, but got " + torch.typename(params)
)
self.state: defaultdict[torch.Tensor, Any] = defaultdict(dict)
self.param_groups: list[dict[str, Any]] = []
param_groups = list(params)
if len(param_groups) == 0:
raise ValueError("optimizer got an empty parameter list")
if not isinstance(param_groups[0], dict):
param_groups = [{"params": param_groups}]
for param_group in param_groups:
self.add_param_group(cast(dict, param_group))
# Allows _cuda_graph_capture_health_check to rig a poor man's TORCH_WARN_ONCE in python,
# which I don't think exists
# https://github.com/pytorch/pytorch/issues/72948
self._warned_capturable_if_run_uncaptured = True
def __getstate__(self) -> dict[str, Any]: # noqa: D105
return {
"defaults": self.defaults,
"state": self.state,
"param_groups": self.param_groups,
}
def __setstate__(self, state: dict[str, Any]) -> None: # noqa: D105
self.__dict__.update(state)
if "_optimizer_step_pre_hooks" not in self.__dict__:
self._optimizer_step_pre_hooks = OrderedDict()
if "_optimizer_step_post_hooks" not in self.__dict__:
self._optimizer_step_post_hooks = OrderedDict()
if "_optimizer_state_dict_pre_hooks" not in self.__dict__:
self._optimizer_state_dict_pre_hooks = OrderedDict()
if "_optimizer_state_dict_post_hooks" not in self.__dict__:
self._optimizer_state_dict_post_hooks = OrderedDict()
if "_optimizer_load_state_dict_pre_hooks" not in self.__dict__:
self._optimizer_load_state_dict_pre_hooks = OrderedDict()
if "_optimizer_load_state_dict_post_hooks" not in self.__dict__:
self._optimizer_load_state_dict_post_hooks = OrderedDict()
self._patch_step_function() # To support multiprocessing pickle/unpickle
self.defaults.setdefault("differentiable", False)
def __repr__(self) -> str: # noqa: D105
format_string = self.__class__.__name__ + " ("
for i, group in enumerate(self.param_groups):
format_string += "\n"
format_string += f"Parameter Group {i}\n"
for key in sorted(group.keys()):
if key != "params":
format_string += f" {key}: {group[key]}\n"
format_string += ")"
return format_string
# Currently needed by Adam and AdamW
def _cuda_graph_capture_health_check(self) -> None:
# Note [torch.compile x capturable]
# If we are compiling, we try to take the capturable path automatically by
# setting the flag to True during tracing. Due to this, we skip all the checks
# normally required for determining whether we can use CUDA graphs and
# shunt the responsibility to torch.inductor. This saves time during tracing
# since the checks are slow without sacrificing UX since inductor will warn
# later if CUDA graphs cannot be enabled, e.g.,
# https://github.com/pytorch/pytorch/blob/d3ba8901d8640eb16f88b2bfef9df7fa383d4b47/torch/_inductor/compile_fx.py#L390.
# Thus, when compiling, inductor will determine if cudagraphs
# can be enabled based on whether there is input mutation or CPU tensors.
if (
not torch.compiler.is_compiling()
and torch.backends.cuda.is_built()
and torch.cuda.is_available()
):
capturing = torch.cuda.is_current_stream_capturing()
if capturing and not all(
group["capturable"] for group in self.param_groups
):
raise RuntimeError(
"Attempting CUDA graph capture of step() for an instance of "
+ self.__class__.__name__
+ " but param_groups' capturable is False."
)
if (
(not getattr(self, "_warned_capturable_if_run_uncaptured", False))
and all(group["capturable"] for group in self.param_groups)
and (not capturing)
):
warnings.warn(
"This instance was constructed with capturable=True or some of all the param_groups came with capturable=True, "
"but step() is running without CUDA graph capture. If you never intend to graph-capture this "
"instance, capturable=True can impair performance, and you should set capturable=False.",
stacklevel=2,
)
self._warned_capturable_if_run_uncaptured = True
def _optimizer_step_code(self) -> None:
"""Entry point for `torch.profile.profiler`.
When python tracing is enabled the profiler will hook into this
function at the CPython level to inspect the optimizer's parameters and
param groups. It is called it after `step()` since many optimizers
lazily initialize state.
This is a workaround due to lack of a proper step hook on the optimizer,
and will be removed if it exists.
"""
@staticmethod
def profile_hook_step(func: Callable[_P, R]) -> Callable[_P, R]: # noqa: D102
@functools.wraps(func)
def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> R:
self, *_ = args
self = cast(Optimizer, self)
profile_name = f"Optimizer.step#{self.__class__.__name__}.step"
with torch.autograd.profiler.record_function(profile_name):
# call optimizer step pre hooks
for pre_hook in chain(
_global_optimizer_pre_hooks.values(),
self._optimizer_step_pre_hooks.values(),
):
result = pre_hook(self, args, kwargs)
if result is not None:
if isinstance(result, tuple) and len(result) == 2:
args, kwargs = result # type: ignore[assignment]
else:
raise RuntimeError(
f"{func} must return None or a tuple of (new_args, new_kwargs), but got {result}."
)
# pyrefly: ignore [invalid-param-spec]
out = func(*args, **kwargs)
self._optimizer_step_code()
# call optimizer step post hooks
for post_hook in chain(
self._optimizer_step_post_hooks.values(),
_global_optimizer_post_hooks.values(),
):
post_hook(self, args, kwargs)
return out
return wrapper
@staticmethod
def _group_tensors_by_device_and_dtype(
tensorlistlist: TensorListList,
with_indices: bool = False,
) -> Union[
dict[tuple[None, None], tuple[TensorListList, Indices]],
dict[tuple[torch.device, torch.dtype], tuple[TensorListList, Indices]],
]:
"""Group a list of lists of tensors by device and dtype.
Skips this step if we are compiling since this will occur during inductor lowering.
"""
if torch.compiler.is_compiling():
return {(None, None): (tensorlistlist, list(range(len(tensorlistlist[0]))))}
else:
return _group_tensors_by_device_and_dtype(tensorlistlist, with_indices) # type: ignore[return-value, arg-type]
def _patch_step_function(self) -> None:
self._zero_grad_profile_name = (
f"Optimizer.zero_grad#{self.__class__.__name__}.zero_grad"
)
hooked = getattr(self.__class__.step, "hooked", None)
if not hooked:
self.__class__.step = self.profile_hook_step(self.__class__.step) # type: ignore[assignment]
self.__class__.step.hooked = True # type: ignore[attr-defined]
def register_step_pre_hook(self, hook: OptimizerPreHook) -> RemovableHandle:
r"""Register an optimizer step pre hook which will be called before optimizer step.
It should have the following signature::
hook(optimizer, args, kwargs) -> None or modified args and kwargs
The ``optimizer`` argument is the optimizer instance being used. If
args and kwargs are modified by the pre-hook, then the transformed
values are returned as a tuple containing the new_args and new_kwargs.
Args:
hook (Callable): The user defined hook to be registered.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._optimizer_step_pre_hooks)
self._optimizer_step_pre_hooks[handle.id] = hook
return handle
def register_step_post_hook(self, hook: OptimizerPostHook) -> RemovableHandle:
r"""Register an optimizer step post hook which will be called after optimizer step.
It should have the following signature::
hook(optimizer, args, kwargs) -> None
The ``optimizer`` argument is the optimizer instance being used.
Args:
hook (Callable): The user defined hook to be registered.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._optimizer_step_post_hooks)
self._optimizer_step_post_hooks[handle.id] = hook
return handle
def register_state_dict_pre_hook(
self, hook: Callable[["Optimizer"], None], prepend: bool = False
) -> RemovableHandle: # noqa: D101
r"""Register a state dict pre-hook which will be called before :meth:`~torch.optim.Optimizer.state_dict` is called.
It should have the following signature::
hook(optimizer) -> None
The ``optimizer`` argument is the optimizer instance being used.
The hook will be called with argument ``self`` before calling ``state_dict`` on ``self``.
The registered hook can be used to perform pre-processing before the ``state_dict``
call is made.
Args:
hook (Callable): The user defined hook to be registered.
prepend (bool): If True, the provided pre ``hook`` will be fired before
all the already registered pre-hooks on ``state_dict``. Otherwise,
the provided ``hook`` will be fired after all the already registered
pre-hooks. (default: False)
Returns:
:class:`torch.utils.hooks.RemoveableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._optimizer_state_dict_pre_hooks)
self._optimizer_state_dict_pre_hooks[handle.id] = hook
if prepend:
self._optimizer_state_dict_pre_hooks.move_to_end(handle.id, last=False)
return handle
def register_state_dict_post_hook(
self,
hook: Callable[["Optimizer", StateDict], Optional[StateDict]],
prepend: bool = False,
) -> RemovableHandle:
r"""Register a state dict post-hook which will be called after :meth:`~torch.optim.Optimizer.state_dict` is called.
It should have the following signature::
hook(optimizer, state_dict) -> state_dict or None
The hook will be called with arguments ``self`` and ``state_dict`` after generating
a ``state_dict`` on ``self``. The hook may modify the state_dict inplace or optionally
return a new one. The registered hook can be used to perform post-processing
on the ``state_dict`` before it is returned.
Args:
hook (Callable): The user defined hook to be registered.
prepend (bool): If True, the provided post ``hook`` will be fired before
all the already registered post-hooks on ``state_dict``. Otherwise,
the provided ``hook`` will be fired after all the already registered
post-hooks. (default: False)
Returns:
:class:`torch.utils.hooks.RemoveableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._optimizer_state_dict_post_hooks)
self._optimizer_state_dict_post_hooks[handle.id] = hook
if prepend:
self._optimizer_state_dict_post_hooks.move_to_end(handle.id, last=False)
return handle
@torch._disable_dynamo
def state_dict(self) -> StateDict:
r"""Return the state of the optimizer as a :class:`dict`.
It contains two entries:
* ``state``: a Dict holding current optimization state. Its content
differs between optimizer classes, but some common characteristics
hold. For example, state is saved per parameter, and the parameter
itself is NOT saved. ``state`` is a Dictionary mapping parameter ids
to a Dict with state corresponding to each parameter.
* ``param_groups``: a List containing all parameter groups where each
parameter group is a Dict. Each parameter group contains metadata
specific to the optimizer, such as learning rate and weight decay,
as well as a List of parameter IDs of the parameters in the group.
If a param group was initialized with ``named_parameters()`` the names
content will also be saved in the state dict.
NOTE: The parameter IDs may look like indices but they are just IDs
associating state with param_group. When loading from a state_dict,
the optimizer will zip the param_group ``params`` (int IDs) and the
optimizer ``param_groups`` (actual ``nn.Parameter`` s) in order to
match state WITHOUT additional verification.
A returned state dict might look something like:
.. code-block:: text
{
'state': {
0: {'momentum_buffer': tensor(...), ...},
1: {'momentum_buffer': tensor(...), ...},
2: {'momentum_buffer': tensor(...), ...},
3: {'momentum_buffer': tensor(...), ...}
},
'param_groups': [
{
'lr': 0.01,
'weight_decay': 0,
...
'params': [0]
'param_names' ['param0'] (optional)
},
{
'lr': 0.001,
'weight_decay': 0.5,
...
'params': [1, 2, 3]
'param_names': ['param1', 'layer.weight', 'layer.bias'] (optional)
}
]
}
"""
for pre_hook in self._optimizer_state_dict_pre_hooks.values():
pre_hook(self)
# Save order indices instead of Tensors
param_mappings: dict[int, int] = {}
start_index = 0
def pack_group(group: dict[str, Any]) -> dict[str, Any]:
nonlocal start_index
packed = {k: v for k, v in group.items() if k != "params"}
param_mappings.update(
{
id(p): i
for i, p in enumerate(group["params"], start_index)
if id(p) not in param_mappings
}
)
packed["params"] = [param_mappings[id(p)] for p in group["params"]]
start_index += len(packed["params"])
return packed
param_groups = [pack_group(g) for g in self.param_groups]
# Remap state to use order indices as keys
packed_state = {
(param_mappings[id(k)] if isinstance(k, torch.Tensor) else k): v
for k, v in self.state.items()
}
state_dict = {
"state": packed_state,
"param_groups": param_groups,
}
for post_hook in self._optimizer_state_dict_post_hooks.values():
hook_result = post_hook(self, state_dict)
if hook_result is not None:
state_dict = hook_result
return state_dict
@staticmethod
def _process_value_according_to_param_policy(
param: torch.Tensor,
value: torch.Tensor,
param_id: int,
param_groups: list[dict[Any, Any]],
key: Hashable = None,
) -> torch.Tensor:
# Floating-point types are a bit special here. They are the only ones
# that are assumed to always match the type of params.
# Make sure state['step'] is not casted https://github.com/pytorch/pytorch/issues/74424
# UNLESS fused or capturable, see note [special device hosting for step]
fused = False
capturable = False
if param_groups is None:
raise AssertionError("Expected param_groups to be set")
for pg in param_groups:
if param_id in pg["params"]:
fused = pg.get("fused", False)
capturable = pg.get("capturable", False)
break
if key == "step":
if capturable or fused:
return value.to(dtype=torch.float32, device=param.device)
else:
return value
else:
if param.is_floating_point():
return value.to(dtype=param.dtype, device=param.device)
else:
return value.to(device=param.device)
def register_load_state_dict_pre_hook(
self,
hook: Callable[["Optimizer", StateDict], Optional[StateDict]],
prepend: bool = False,
) -> RemovableHandle: # noqa: D205 D400
r"""Register a load_state_dict pre-hook which will be called before
:meth:`~torch.optim.Optimizer.load_state_dict` is called. It should have the
following signature::
hook(optimizer, state_dict) -> state_dict or None
The ``optimizer`` argument is the optimizer instance being used and the
``state_dict`` argument is a shallow copy of the ``state_dict`` the user
passed in to ``load_state_dict``. The hook may modify the state_dict inplace
or optionally return a new one. If a state_dict is returned, it will be used
to be loaded into the optimizer.
The hook will be called with argument ``self`` and ``state_dict`` before
calling ``load_state_dict`` on ``self``. The registered hook can be used to
perform pre-processing before the ``load_state_dict`` call is made.
Args:
hook (Callable): The user defined hook to be registered.
prepend (bool): If True, the provided pre ``hook`` will be fired before
all the already registered pre-hooks on ``load_state_dict``. Otherwise,
the provided ``hook`` will be fired after all the already registered
pre-hooks. (default: False)
Returns:
:class:`torch.utils.hooks.RemoveableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._optimizer_load_state_dict_pre_hooks)
self._optimizer_load_state_dict_pre_hooks[handle.id] = hook
if prepend:
self._optimizer_load_state_dict_pre_hooks.move_to_end(handle.id, last=False)
return handle
def register_load_state_dict_post_hook(
self, hook: Callable[["Optimizer"], None], prepend: bool = False
) -> RemovableHandle: # noqa: D205 D400
r"""Register a load_state_dict post-hook which will be called after
:meth:`~torch.optim.Optimizer.load_state_dict` is called. It should have the
following signature::
hook(optimizer) -> None
The ``optimizer`` argument is the optimizer instance being used.
The hook will be called with argument ``self`` after calling
``load_state_dict`` on ``self``. The registered hook can be used to
perform post-processing after ``load_state_dict`` has loaded the
``state_dict``.
Args:
hook (Callable): The user defined hook to be registered.
prepend (bool): If True, the provided post ``hook`` will be fired before
all the already registered post-hooks on ``load_state_dict``. Otherwise,
the provided ``hook`` will be fired after all the already registered
post-hooks. (default: False)
Returns:
:class:`torch.utils.hooks.RemoveableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(self._optimizer_load_state_dict_post_hooks)
self._optimizer_load_state_dict_post_hooks[handle.id] = hook
if prepend:
self._optimizer_load_state_dict_post_hooks.move_to_end(
handle.id, last=False
) # type: ignore[attr-defined]
return handle
@torch._disable_dynamo
def load_state_dict(self, state_dict: StateDict) -> None:
r"""Load the optimizer state.
Args:
state_dict (dict): optimizer state. Should be an object returned
from a call to :meth:`state_dict`.
.. warning::
Make sure this method is called after initializing :class:`torch.optim.lr_scheduler.LRScheduler`,
as calling it beforehand will overwrite the loaded learning rates.
.. note::
The names of the parameters (if they exist under the "param_names" key of each param group
in :meth:`state_dict`) will not affect the loading process.
To use the parameters' names for custom cases (such as when the parameters in the loaded state dict
differ from those initialized in the optimizer),
a custom ``register_load_state_dict_pre_hook`` should be implemented to adapt the loaded dict
accordingly.
If ``param_names`` exist in loaded state dict ``param_groups`` they will be saved and override
the current names, if present, in the optimizer state. If they do not exist in loaded state dict,
the optimizer ``param_names`` will remain unchanged.
Example:
>>> # xdoctest: +SKIP
>>> model = torch.nn.Linear(10, 10)
>>> optim = torch.optim.SGD(model.parameters(), lr=3e-4)
>>> scheduler1 = torch.optim.lr_scheduler.LinearLR(
... optim,
... start_factor=0.1,
... end_factor=1,
... total_iters=20,
... )
>>> scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(
... optim,
... T_max=80,
... eta_min=3e-5,
... )
>>> lr = torch.optim.lr_scheduler.SequentialLR(
... optim,
... schedulers=[scheduler1, scheduler2],
... milestones=[20],
... )
>>> lr.load_state_dict(torch.load("./save_seq.pt"))
>>> # now load the optimizer checkpoint after loading the LRScheduler
>>> optim.load_state_dict(torch.load("./save_optim.pt"))
"""
# shallow copy, to be consistent with module API
state_dict = state_dict.copy()
for pre_hook in self._optimizer_load_state_dict_pre_hooks.values():
hook_result = pre_hook(self, state_dict)
if hook_result is not None:
state_dict = hook_result
# Validate the state_dict
groups = self.param_groups
# Deepcopy as we write into saved_groups later to update state
saved_groups = deepcopy(state_dict["param_groups"])
if len(groups) != len(saved_groups):
raise ValueError(
"loaded state dict has a different number of parameter groups"
)
param_lens = (len(g["params"]) for g in groups)
saved_lens = (len(g["params"]) for g in saved_groups)
if any(
p_len != s_len for p_len, s_len in zip(param_lens, saved_lens, strict=True)
):
raise ValueError(
"loaded state dict contains a parameter group "
"that doesn't match the size of optimizer's group"
)
# Update the state
id_map = dict(
zip(
chain.from_iterable(g["params"] for g in saved_groups),
chain.from_iterable(g["params"] for g in groups),
strict=True,
)
)
def _cast(param, value, param_id=None, param_groups=None, key=None):
r"""Make a deep copy of value, casting all tensors to device of param."""
if isinstance(value, torch.Tensor):
return Optimizer._process_value_according_to_param_policy(
param,
value,
# pyrefly: ignore [bad-argument-type]
param_id,
# pyrefly: ignore [bad-argument-type]
param_groups,
key,
)
elif isinstance(value, dict):
return {
k: _cast(
param, v, param_id=param_id, param_groups=param_groups, key=k
)
for k, v in value.items()
}
elif isinstance(value, Iterable):
return type(value)(
# pyrefly: ignore [bad-argument-count]
_cast(param, v, param_id=param_id, param_groups=param_groups)
for v in value
) # type: ignore[call-arg]
else:
return value
# Copy state assigned to params (and cast tensors to appropriate types).
# State that is not assigned to params is copied as is (needed for
# backward compatibility).
state: defaultdict[torch.Tensor, dict[Any, Any]] = defaultdict(dict)
for k, v in state_dict["state"].items():
if k in id_map:
param = id_map[k]
state[param] = _cast(
param, v, param_id=k, param_groups=state_dict["param_groups"]
)
else:
state[k] = v
# Update parameter groups, setting their 'params' value
def update_group(
group: dict[str, Any], new_group: dict[str, Any]
) -> dict[str, Any]:
new_group["params"] = group["params"]
if "param_names" in group and "param_names" not in new_group:
new_group["param_names"] = group["param_names"]
return new_group
param_groups = [
update_group(g, ng) for g, ng in zip(groups, saved_groups, strict=True)
]
self.__setstate__({"state": state, "param_groups": param_groups})
for post_hook in self._optimizer_load_state_dict_post_hooks.values():
post_hook(self)
@torch._disable_dynamo
def zero_grad(self, set_to_none: bool = True) -> None:
r"""Reset the gradients of all optimized :class:`torch.Tensor` s.
Args:
set_to_none (bool, optional): Instead of setting to zero, set the grads to None. Default: ``True``
This will in general have lower memory footprint, and can modestly improve performance.
However, it changes certain behaviors. For example:
1. When the user tries to access a gradient and perform manual ops on it,
a None attribute or a Tensor full of 0s will behave differently.
2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s
are guaranteed to be None for params that did not receive a gradient.
3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None
(in one case it does the step with a gradient of 0 and in the other it skips
the step altogether).
"""
foreach = self.defaults.get("foreach", False) or self.defaults.get(
"fused", False
)
if not hasattr(self, "_zero_grad_profile_name"):
self._patch_step_function()
per_device_and_dtype_grads: Optional[
defaultdict[torch.device, defaultdict[torch.dtype, list[torch.Tensor]]]
]
if foreach:
per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list))
else:
per_device_and_dtype_grads = None
with torch.autograd.profiler.record_function(self._zero_grad_profile_name):
for group in self.param_groups:
for p in group["params"]:
if p.grad is not None:
if set_to_none:
p.grad = None
else:
if p.grad.grad_fn is not None:
p.grad.detach_()
else:
p.grad.requires_grad_(False)
if not foreach or p.grad.is_sparse:
p.grad.zero_()
else:
if per_device_and_dtype_grads is None:
raise AssertionError(
"Expected per_device_and_dtype_grads to be set"
)
per_device_and_dtype_grads[p.grad.device][
p.grad.dtype
].append(p.grad)
if foreach:
if per_device_and_dtype_grads is None:
raise AssertionError(
"Expected per_device_and_dtype_grads to be set"
)
for per_dtype_grads in per_device_and_dtype_grads.values():
for grads in per_dtype_grads.values():
torch._foreach_zero_(grads)
@overload
def step(self, closure: None = None) -> None: ...
@overload
def step(self, closure: Callable[[], float]) -> float: ...
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
r"""Perform a single optimization step to update parameter.
Args:
closure (Callable): A closure that reevaluates the model and
returns the loss. Optional for most optimizers.
"""
raise NotImplementedError
@torch._disable_dynamo
def add_param_group(self, param_group: dict[str, Any]) -> None:
r"""Add a param group to the :class:`Optimizer` s `param_groups`.
This can be useful when fine tuning a pre-trained network as frozen layers can be made
trainable and added to the :class:`Optimizer` as training progresses.
Args:
param_group (dict): Specifies what Tensors should be optimized along with group
specific optimization options.
"""
if not isinstance(param_group, dict):
raise TypeError(f"param_group must be a dict, but got {type(param_group)}")
params = param_group["params"]
if isinstance(params, torch.Tensor):
param_group["params"] = [params]
elif isinstance(params, set):
raise TypeError(
"optimizer parameters need to be organized in ordered collections, but "
"the ordering of tensors in sets will change between runs. Please use a list instead."
)
else:
param_group["params"] = list(params)
extracted_param_tensors = []
extracted_param_names = []
for param in param_group["params"]:
if isinstance(param, tuple):
param_name = param[0]
extracted_param_names.append(param_name)
extracted_param_tensors.append(param[1])
else:
extracted_param_tensors.append(param)
param_group["params"] = extracted_param_tensors
if len(extracted_param_names) != 0:
if len(extracted_param_names) == len(extracted_param_tensors):
param_group["param_names"] = extracted_param_names
else:
raise ValueError(
"all optimizer params should be with/without names. Some param names are missing"
)
for param in param_group["params"]:
if not isinstance(param, torch.Tensor):
raise TypeError(
"optimizer can only optimize Tensors, "
"but one of the params is " + torch.typename(param)
)
if not self.defaults.get("differentiable", None) and not (
param.is_leaf or param.retains_grad
):
raise ValueError("can't optimize a non-leaf Tensor")
for name, default in self.defaults.items():
if default is required and name not in param_group:
raise ValueError(
f"parameter group didn't specify a value of required optimization parameter {name}"
)
else:
param_group.setdefault(name, default)
params = param_group["params"]
if len(params) != len(set(params)):
warnings.warn(
"optimizer contains a parameter group with duplicate parameters; "
"in future, this will cause an error; "
"see github.com/pytorch/pytorch/issues/40967 for more information",
stacklevel=3,
)
param_set: set[torch.Tensor] = set()
for group in self.param_groups:
param_set.update(set(group["params"]))
if ("param_names" in param_group) != ("param_names" in group):
current_group_txt = (
"with names" if "param_names" in param_group else "without names"
)
raise ValueError(
"all optimizer param groups should be with/without names. "
f"cannot add param group {current_group_txt} to the optimizer"
)
if not param_set.isdisjoint(set(param_group["params"])):
raise ValueError("some parameters appear in more than one parameter group")
self.param_groups.append(param_group)
| Optimizer |
python | langchain-ai__langchain | libs/core/langchain_core/_api/beta_decorator.py | {
"start": 596,
"end": 8664
} | class ____(DeprecationWarning):
"""A class for issuing beta warnings for LangChain users."""
# PUBLIC API
T = TypeVar("T", bound=Callable[..., Any] | type)
def beta(
*,
message: str = "",
name: str = "",
obj_type: str = "",
addendum: str = "",
) -> Callable[[T], T]:
"""Decorator to mark a function, a class, or a property as beta.
When marking a classmethod, a staticmethod, or a property, the
`@beta` decorator should go *under* `@classmethod` and
`@staticmethod` (i.e., `beta` should directly decorate the
underlying callable), but *over* `@property`.
When marking a class `C` intended to be used as a base class in a
multiple inheritance hierarchy, `C` *must* define an `__init__` method
(if `C` instead inherited its `__init__` from its own base class, then
`@beta` would mess up `__init__` inheritance when installing its
own (annotation-emitting) `C.__init__`).
Args:
message:
Override the default beta message. The %(since)s,
%(name)s, %(alternative)s, %(obj_type)s, %(addendum)s,
and %(removal)s format specifiers will be replaced by the
values of the respective arguments passed to this function.
name:
The name of the beta object.
obj_type:
The object type being beta.
addendum:
Additional text appended directly to the final message.
Returns:
A decorator which can be used to mark functions or classes as beta.
```python
@beta
def the_function_to_annotate():
pass
```
"""
def beta(
obj: T,
*,
_obj_type: str = obj_type,
_name: str = name,
_message: str = message,
_addendum: str = addendum,
) -> T:
"""Implementation of the decorator returned by `beta`."""
def emit_warning() -> None:
"""Emit the warning."""
warn_beta(
message=_message,
name=_name,
obj_type=_obj_type,
addendum=_addendum,
)
warned = False
def warning_emitting_wrapper(*args: Any, **kwargs: Any) -> Any:
"""Wrapper for the original wrapped callable that emits a warning.
Args:
*args: The positional arguments to the function.
**kwargs: The keyword arguments to the function.
Returns:
The return value of the function being wrapped.
"""
nonlocal warned
if not warned and not is_caller_internal():
warned = True
emit_warning()
return wrapped(*args, **kwargs)
async def awarning_emitting_wrapper(*args: Any, **kwargs: Any) -> Any:
"""Same as warning_emitting_wrapper, but for async functions."""
nonlocal warned
if not warned and not is_caller_internal():
warned = True
emit_warning()
return await wrapped(*args, **kwargs)
if isinstance(obj, type):
if not _obj_type:
_obj_type = "class"
wrapped = obj.__init__ # type: ignore[misc]
_name = _name or obj.__qualname__
old_doc = obj.__doc__
def finalize(wrapper: Callable[..., Any], new_doc: str) -> T: # noqa: ARG001
"""Finalize the annotation of a class."""
# Can't set new_doc on some extension objects.
with contextlib.suppress(AttributeError):
obj.__doc__ = new_doc
def warn_if_direct_instance(
self: Any, *args: Any, **kwargs: Any
) -> Any:
"""Warn that the class is in beta."""
nonlocal warned
if not warned and type(self) is obj and not is_caller_internal():
warned = True
emit_warning()
return wrapped(self, *args, **kwargs)
obj.__init__ = functools.wraps(obj.__init__)( # type: ignore[misc]
warn_if_direct_instance
)
return obj
elif isinstance(obj, property):
if not _obj_type:
_obj_type = "attribute"
wrapped = None
_name = _name or obj.fget.__qualname__
old_doc = obj.__doc__
def _fget(instance: Any) -> Any:
if instance is not None:
emit_warning()
return obj.fget(instance)
def _fset(instance: Any, value: Any) -> None:
if instance is not None:
emit_warning()
obj.fset(instance, value)
def _fdel(instance: Any) -> None:
if instance is not None:
emit_warning()
obj.fdel(instance)
def finalize(_wrapper: Callable[..., Any], new_doc: str) -> Any:
"""Finalize the property."""
return property(fget=_fget, fset=_fset, fdel=_fdel, doc=new_doc)
else:
_name = _name or obj.__qualname__
if not _obj_type:
# edge case: when a function is within another function
# within a test, this will call it a "method" not a "function"
_obj_type = "function" if "." not in _name else "method"
wrapped = obj
old_doc = wrapped.__doc__
def finalize(wrapper: Callable[..., Any], new_doc: str) -> T:
"""Wrap the wrapped function using the wrapper and update the docstring.
Args:
wrapper: The wrapper function.
new_doc: The new docstring.
Returns:
The wrapped function.
"""
wrapper = functools.wraps(wrapped)(wrapper)
wrapper.__doc__ = new_doc
return cast("T", wrapper)
old_doc = inspect.cleandoc(old_doc or "").strip("\n") or ""
components = [message, addendum]
details = " ".join([component.strip() for component in components if component])
new_doc = f".. beta::\n {details}\n\n{old_doc}\n"
if inspect.iscoroutinefunction(obj):
return finalize(awarning_emitting_wrapper, new_doc)
return finalize(warning_emitting_wrapper, new_doc)
return beta
@contextlib.contextmanager
def suppress_langchain_beta_warning() -> Generator[None, None, None]:
"""Context manager to suppress LangChainDeprecationWarning."""
with warnings.catch_warnings():
warnings.simplefilter("ignore", LangChainBetaWarning)
yield
def warn_beta(
*,
message: str = "",
name: str = "",
obj_type: str = "",
addendum: str = "",
) -> None:
"""Display a standardized beta annotation.
Args:
message:
Override the default beta message. The
%(name)s, %(obj_type)s, %(addendum)s
format specifiers will be replaced by the
values of the respective arguments passed to this function.
name:
The name of the annotated object.
obj_type:
The object type being annotated.
addendum:
Additional text appended directly to the final message.
"""
if not message:
message = ""
if obj_type:
message += f"The {obj_type} `{name}`"
else:
message += f"`{name}`"
message += " is in beta. It is actively being worked on, so the API may change."
if addendum:
message += f" {addendum}"
warning = LangChainBetaWarning(message)
warnings.warn(warning, category=LangChainBetaWarning, stacklevel=4)
def surface_langchain_beta_warnings() -> None:
"""Unmute LangChain beta warnings."""
warnings.filterwarnings(
"default",
category=LangChainBetaWarning,
)
| LangChainBetaWarning |
python | huggingface__transformers | src/transformers/models/doge/modular_doge.py | {
"start": 34129,
"end": 34345
} | class ____(LlamaForSequenceClassification):
pass
__all__ = [
"DogeConfig",
"DogeForCausalLM",
"DogeModel",
"DogePreTrainedModel",
"DogeForSequenceClassification",
]
| DogeForSequenceClassification |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_tool_uses_keep_param.py | {
"start": 221,
"end": 341
} | class ____(TypedDict, total=False):
type: Required[Literal["tool_uses"]]
value: Required[int]
| BetaToolUsesKeepParam |
python | getsentry__sentry-python | sentry_sdk/client.py | {
"start": 7806,
"end": 8026
} | class ____(BaseClient):
"""
.. versionadded:: 2.0.0
A client that does not send any events to Sentry. This is used as a fallback when the Sentry SDK is not yet initialized.
"""
pass
| NonRecordingClient |
python | astropy__astropy | astropy/time/formats.py | {
"start": 25732,
"end": 30375
} | class ____(TimeNumeric):
"""
Base class for times that represent the interval from a particular
epoch as a numerical multiple of a unit time interval (e.g. seconds
or days).
"""
@classproperty(lazy=True)
def _epoch(cls):
# Ideally we would use `def epoch(cls)` here and not have the instance
# property below. However, this breaks the sphinx API docs generation
# in a way that was not resolved. See #10406 for details.
return Time(
cls.epoch_val,
cls.epoch_val2,
scale=cls.epoch_scale,
format=cls.epoch_format,
)
@property
def epoch(self):
"""Reference epoch time from which the time interval is measured."""
return self._epoch
def set_jds(self, val1, val2):
"""
Initialize the internal jd1 and jd2 attributes given val1 and val2.
For an TimeFromEpoch subclass like TimeUnix these will be floats giving
the effective seconds since an epoch time (e.g. 1970-01-01 00:00:00).
"""
# Form new JDs based on epoch time + time from epoch (converted to JD).
# One subtlety that might not be obvious is that 1.000 Julian days in
# UTC can be 86400 or 86401 seconds. For the TimeUnix format the
# assumption is that every day is exactly 86400 seconds, so this is, in
# principle, doing the math incorrectly, *except* that it matches the
# definition of Unix time which does not include leap seconds.
# note: use divisor=1./self.unit, since this is either 1 or 1/86400,
# and 1/86400 is not exactly representable as a float64, so multiplying
# by that will cause rounding errors. (But inverting it as a float64
# recovers the exact number)
day, frac = day_frac(val1, val2, divisor=1.0 / self.unit)
jd1 = self.epoch.jd1 + day
jd2 = self.epoch.jd2 + frac
# For the usual case that scale is the same as epoch_scale, we only need
# to ensure that abs(jd2) <= 0.5. Since abs(self.epoch.jd2) <= 0.5 and
# abs(frac) <= 0.5, we can do simple (fast) checks and arithmetic here
# without another call to day_frac(). Note also that `round(jd2.item())`
# is about 10x faster than `np.round(jd2)`` for a scalar.
if self.epoch.scale == self.scale:
jd1_extra = np.round(jd2) if jd2.shape else round(jd2.item())
jd1 += jd1_extra
jd2 -= jd1_extra
self.jd1, self.jd2 = jd1, jd2
return
# Create a temporary Time object corresponding to the new (jd1, jd2) in
# the epoch scale (e.g. UTC for TimeUnix) then convert that to the
# desired time scale for this object.
#
# A known limitation is that the transform from self.epoch_scale to
# self.scale cannot involve any metadata like lat or lon.
try:
tm = getattr(
Time(jd1, jd2, scale=self.epoch_scale, format="jd"), self.scale
)
except Exception as err:
raise ScaleValueError(
f"Cannot convert from '{self.name}' epoch scale '{self.epoch_scale}' "
f"to specified scale '{self.scale}', got error:\n{err}"
) from err
self.jd1, self.jd2 = day_frac(tm._time.jd1, tm._time.jd2)
def to_value(self, parent=None, **kwargs):
# Make sure that scale is the same as epoch scale so we can just
# subtract the epoch and convert
if self.scale != self.epoch_scale:
if parent is None:
raise ValueError("cannot compute value without parent Time object")
try:
tm = getattr(parent, self.epoch_scale)
except Exception as err:
raise ScaleValueError(
f"Cannot convert from '{self.name}' epoch scale "
f"'{self.epoch_scale}' to specified scale '{self.scale}', "
f"got error:\n{err}"
) from err
jd1, jd2 = tm._time.jd1, tm._time.jd2
else:
jd1, jd2 = self.jd1, self.jd2
# This factor is guaranteed to be exactly representable, which
# means time_from_epoch1 is calculated exactly.
factor = 1.0 / self.unit
time_from_epoch1 = (jd1 - self.epoch.jd1) * factor
time_from_epoch2 = (jd2 - self.epoch.jd2) * factor
return super().to_value(jd1=time_from_epoch1, jd2=time_from_epoch2, **kwargs)
value = property(to_value)
@property
def _default_scale(self):
return self.epoch_scale
| TimeFromEpoch |
python | plotly__plotly.py | plotly/graph_objs/surface/colorbar/_title.py | {
"start": 233,
"end": 3971
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "surface.colorbar"
_path_str = "surface.colorbar.title"
_valid_props = {"font", "side", "text"}
@property
def font(self):
"""
Sets this color bar's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.surface.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.surface.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h".
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def text(self):
"""
Sets the title of the color bar.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.surface.colorbar.Title`
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.surface.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.surface.colorbar.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("side", arg, side)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
python | FactoryBoy__factory_boy | tests/test_transformer.py | {
"start": 2300,
"end": 2449
} | class ____:
def __init__(self, one=None, two=None, three=None):
self.one = one
self.two = two
self.three = three
| TestObject |
python | dagster-io__dagster | python_modules/dagster/dagster/_vendored/dateutil/rrule.py | {
"start": 54439,
"end": 66759
} | class ____(object):
""" Parses a string representation of a recurrence rule or set of
recurrence rules.
:param s:
Required, a string defining one or more recurrence rules.
:param dtstart:
If given, used as the default recurrence start if not specified in the
rule string.
:param cache:
If set ``True`` caching of results will be enabled, improving
performance of multiple queries considerably.
:param unfold:
If set ``True`` indicates that a rule string is split over more
than one line and should be joined before processing.
:param forceset:
If set ``True`` forces a :class:`dateutil.rrule.rruleset` to
be returned.
:param compatible:
If set ``True`` forces ``unfold`` and ``forceset`` to be ``True``.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a naive
:class:`datetime.datetime` object is returned.
:param tzids:
If given, a callable or mapping used to retrieve a
:class:`datetime.tzinfo` from a string representation.
Defaults to :func:`dateutil.tz.gettz`.
:param tzinfos:
Additional time zone names / aliases which may be present in a string
representation. See :func:`dateutil.parser.parse` for more
information.
:return:
Returns a :class:`dateutil.rrule.rruleset` or
:class:`dateutil.rrule.rrule`
"""
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3,
"FR": 4, "SA": 5, "SU": 6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
# CHANGED IN VENDORED VERSION
from . import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError("invalid until date")
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs):
"""
Two ways to specify this: +1MO or MO(+1)
"""
l = []
for wday in value.split(','):
if '(' in wday:
# If it's of the form TH(+1), etc.
splt = wday.split('(')
w = splt[0]
n = int(splt[1][:-1])
elif len(wday):
# If it's of the form +1MO
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n:
n = int(n)
else:
raise ValueError("Invalid (empty) BYDAY specification.")
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError("unknown parameter name")
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError("unknown parameter '%s'" % name)
except (KeyError, ValueError):
raise ValueError("invalid '%s': %s" % (name, value))
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_date_value(self, date_value, parms, rule_tzids,
ignoretz, tzids, tzinfos):
global parser
if not parser:
# CHANGED IN VENDORED VERSION
from . import parser
datevals = []
value_found = False
TZID = None
for parm in parms:
if parm.startswith("TZID="):
try:
tzkey = rule_tzids[parm.split('TZID=')[-1]]
except KeyError:
continue
if tzids is None:
# CHANGED IN VENDORED VERSION
from . import tz
tzlookup = tz.gettz
elif callable(tzids):
tzlookup = tzids
else:
tzlookup = getattr(tzids, 'get', None)
if tzlookup is None:
msg = ('tzids must be a callable, mapping, or None, '
'not %s' % tzids)
raise ValueError(msg)
TZID = tzlookup(tzkey)
continue
# RFC 5445 3.8.2.4: The VALUE parameter is optional, but may be found
# only once.
if parm not in {"VALUE=DATE-TIME", "VALUE=DATE"}:
raise ValueError("unsupported parm: " + parm)
else:
if value_found:
msg = ("Duplicate value parameter found in: " + parm)
raise ValueError(msg)
value_found = True
for datestr in date_value.split(','):
date = parser.parse(datestr, ignoretz=ignoretz, tzinfos=tzinfos)
if TZID is not None:
if date.tzinfo is None:
date = date.replace(tzinfo=TZID)
else:
raise ValueError('DTSTART/EXDATE specifies multiple timezone')
datevals.append(date)
return datevals
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzids=None,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
TZID_NAMES = dict(map(
lambda x: (x.upper(), x),
re.findall('TZID=(?P<name>[^:]+):', s)
))
s = s.upper()
if not s.strip():
raise ValueError("empty string")
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and (s.find(':') == -1 or
s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError("unsupported RRULE parm: "+parm)
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported RDATE parm: "+parm)
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError("unsupported EXRULE parm: "+parm)
exrulevals.append(value)
elif name == "EXDATE":
exdatevals.extend(
self._parse_date_value(value, parms,
TZID_NAMES, ignoretz,
tzids, tzinfos)
)
elif name == "DTSTART":
dtvals = self._parse_date_value(value, parms, TZID_NAMES,
ignoretz, tzids, tzinfos)
if len(dtvals) != 1:
raise ValueError("Multiple DTSTART values specified:" +
value)
dtstart = dtvals[0]
else:
raise ValueError("unsupported property: "+name)
if (forceset or len(rrulevals) > 1 or rdatevals
or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
# CHANGED IN VENDORED VERSION
from . import parser
rset = rruleset(cache=cache)
for value in rrulevals:
rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
rset.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
rset.exdate(value)
if compatible and dtstart:
rset.rdate(dtstart)
return rset
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et
| _rrulestr |
python | spyder-ide__spyder | spyder/widgets/elementstable.py | {
"start": 1800,
"end": 6084
} | class ____(QAbstractTableModel, SpyderFontsMixin):
def __init__(
self,
parent: QWidget,
elements: List[Element],
with_description: bool,
with_icons: bool,
with_additional_info: bool,
with_widgets: bool,
):
QAbstractTableModel.__init__(self)
self.elements = elements
self.with_description = with_description
self.with_icons = with_icons
# Number of columns
self.n_columns = 1
# Index corresponding to columns. The 'title' column is always expected
self.columns = {'title': 0}
# Extra columns
if with_additional_info:
self.n_columns += 1
self.columns['additional_info'] = 1
if with_widgets:
self.n_columns += 1
if self.n_columns == 3:
self.columns['widgets'] = 2
else:
self.columns['widgets'] = 1
# ---- Qt overrides
# -------------------------------------------------------------------------
def data(self, index, role=Qt.DisplayRole):
element = self.elements[index.row()]
if role == Qt.DisplayRole:
if index.column() == self.columns['title']:
return self.get_title_repr(element)
elif index.column() == self.columns.get('additional_info'):
return self.get_info_repr(element)
else:
return None
elif role == Qt.DecorationRole and self.with_icons:
if index.column() == self.columns['title']:
return element['icon']
else:
return None
return None
def rowCount(self, index=QModelIndex()):
return len(self.elements)
def columnCount(self, index=QModelIndex()):
return self.n_columns
# ---- Own methods
# -------------------------------------------------------------------------
def get_title_repr(self, element: Element) -> str:
text_color = SpyderPalette.COLOR_TEXT_1
if self.with_description:
if element.get("description_color"):
description_color = element["title_color"]
else:
description_color = text_color
description = (
f'<tr><td><span style="color:{description_color}">'
f'{element["description"]}'
f'</span></td></tr>'
)
else:
description = ""
title_font_size = self.get_font(
SpyderFontType.Interface, font_size_delta=1
).pointSize()
if element.get("title_color"):
title_color = element["title_color"]
else:
title_color = text_color
title_style = (
f"color:{title_color}; font-size:{title_font_size}pt"
if self.with_description
else f"color:{title_color}"
)
return (
f'<table cellspacing="0" cellpadding="3">'
# Title
f'<tr><td><span style="{title_style}">'
f'{element["title"]}'
f'</span></td></tr>'
# Description
f'{description}'
f'</table>'
)
def get_info_repr(self, element: Element) -> str:
if element.get('additional_info'):
additional_info = f' {element["additional_info"]}'
else:
return ''
if element.get("additional_info_color"):
additional_info_style = f'color:{element["additional_info_color"]}'
else:
additional_info_style = f'color:{SpyderPalette.COLOR_TEXT_1}'
return (
f'<span style="{additional_info_style}">'
f'{additional_info}'
f'</span>'
)
def clear_elements(self):
self.beginRemoveRows(QModelIndex(), 0, self.rowCount())
self.elements = []
self.endRemoveRows()
def replace_elements(self, elements: List[Element]):
# The -1 is necessary to add exactly the number present in `elements`
# (including the 0 index). Otherwise, spurious rows are added by Qt.
self.beginInsertRows(QModelIndex(), 0, len(elements) - 1)
self.elements = elements
self.endInsertRows()
| ElementsModel |
python | dagster-io__dagster | python_modules/libraries/dagster-fivetran/dagster_fivetran/managed/types.py | {
"start": 346,
"end": 1242
} | class ____:
"""Represents a user-defined Fivetran destination."""
def __init__(
self,
name: str,
destination_type: str,
region: str,
destination_configuration: dict[str, Any],
time_zone_offset: Optional[int] = None,
):
self.name = check.str_param(name, "name")
self.region = check.str_param(region, "region")
self.time_zone_offset = check.opt_int_param(time_zone_offset, "time_zone_offset") or 0
self.destination_type = check.str_param(destination_type, "destination_type")
self.destination_configuration = check.dict_param(
destination_configuration, "destination_configuration", key_type=str
)
def must_be_recreated(self, other: "FivetranDestination") -> bool:
return self.name != other.name or self.destination_type != other.destination_type
| FivetranDestination |
python | huggingface__transformers | src/transformers/models/d_fine/modeling_d_fine.py | {
"start": 91239,
"end": 91919
} | class ____(nn.Module):
def __init__(self, config: DFineConfig, kernel_size: int, stride: int):
super().__init__()
self.conv1 = DFineConvNormLayer(config, config.encoder_hidden_dim, config.encoder_hidden_dim, 1, 1)
self.conv2 = DFineConvNormLayer(
config,
config.encoder_hidden_dim,
config.encoder_hidden_dim,
kernel_size,
stride,
config.encoder_hidden_dim,
)
def forward(self, input_features: torch.Tensor) -> torch.Tensor:
input_features = self.conv1(input_features)
input_features = self.conv2(input_features)
return input_features
| DFineSCDown |
python | pytorch__pytorch | test/inductor/test_snode_runtime.py | {
"start": 5928,
"end": 10365
} | class ____(TestCase):
device = DEVICE
WORLD_SIZE: int = 8
RANKS = list(range(8))
def _verify_runtime_estimation(self, fn, inps):
from torch.testing._internal.distributed.fake_pg import FakeStore
store = FakeStore()
dist.init_process_group(
backend="fake", rank=0, world_size=self.WORLD_SIZE, store=store
)
try:
metrics.reset()
torch._logging.set_logs(inductor_metrics=True)
torch.compile(fn)(*inps)
found_collective = False
for snode, runtime in metrics.node_runtimes:
if not is_collective(snode.node):
continue
found_collective = True
# Inductor swallows errors from snode runtime estimations.
# We call estimate_nccl_collective_runtime in a white-box
# fashion here so potential issues can be surfaced in tests.
est = estimate_nccl_collective_runtime(snode.node)
self.assertNotZero(est)
# Also make sure estimate_nccl_collective_runtime works
# correctly in inductor.
self.assertNotZero(runtime)
# Make sure a collective kernel is found in graph
self.assertTrue(found_collective)
torch._logging.set_logs()
finally:
dist.destroy_process_group()
def test_legacy_all_reduce(self):
def fn(x):
r = c10d.all_reduce(x, "sum", "", self.RANKS, self.WORLD_SIZE)
return c10d.wait_tensor(r)
inp = T(10, 10)
self._verify_runtime_estimation(fn, (inp,))
def test_legacy_all_reduce_coalesced(self):
def fn(x):
rs = c10d.all_reduce_coalesced(x, "sum", "", self.RANKS, self.WORLD_SIZE)
return [c10d.wait_tensor(r) for r in rs]
inp = [T(10, 10), T(15, 15)]
self._verify_runtime_estimation(fn, (inp,))
def test_legacy_all_gather_into_tensor_coalesced(self):
def fn(x):
rs = c10d.all_gather_into_tensor_coalesced(
x,
"",
self.RANKS,
self.WORLD_SIZE,
)
return [c10d.wait_tensor(r) for r in rs]
inp = [T(10, 10), T(15, 15)]
self._verify_runtime_estimation(fn, (inp,))
def test_all_reduce(self):
def fn(x):
r = _c10d.all_reduce(x, "sum", "0")
return _c10d.wait_tensor(r)
inp = T(10, 10)
self._verify_runtime_estimation(fn, (inp,))
def test_all_reduce_coalesced(self):
def fn(x):
rs = _c10d.all_reduce_coalesced(x, "sum", "0")
return [_c10d.wait_tensor(r) for r in rs]
inp = [T(10, 10), T(15, 15)]
self._verify_runtime_estimation(fn, (inp,))
def test_all_gather_into_tensor(self):
def fn(x):
rs = _c10d.all_gather_into_tensor(
x,
self.WORLD_SIZE,
"0",
)
return [_c10d.wait_tensor(r) for r in rs]
inp = T(10, 10)
self._verify_runtime_estimation(fn, (inp,))
def test_all_gather_into_tensor_coalesced(self):
def fn(x):
rs = _c10d.all_gather_into_tensor_coalesced(
x,
self.WORLD_SIZE,
"0",
)
return [_c10d.wait_tensor(r) for r in rs]
inp = [T(10, 10), T(15, 15)]
self._verify_runtime_estimation(fn, (inp,))
def test_reduce_scatter_tensor(self):
def fn(x):
rs = _c10d.reduce_scatter_tensor(
x,
"sum",
self.WORLD_SIZE,
"0",
)
return [_c10d.wait_tensor(r) for r in rs]
inp = T(self.WORLD_SIZE, 10)
self._verify_runtime_estimation(fn, (inp,))
def test_reduce_scatter_tensor_coalesced(self):
def fn(x):
rs = _c10d.reduce_scatter_tensor_coalesced(
x,
"sum",
self.WORLD_SIZE,
"0",
)
return [_c10d.wait_tensor(r) for r in rs]
inp = [T(self.WORLD_SIZE, 10), T(self.WORLD_SIZE, 15)]
self._verify_runtime_estimation(fn, (inp,))
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
if HAS_GPU:
run_tests(needs="filelock")
| TestCommAnalysis |
python | getsentry__sentry | src/sentry/workflow_engine/handlers/condition/event_frequency_handlers.py | {
"start": 4273,
"end": 5216
} | class ____(EventFrequencyPercentHandler):
group = DataConditionHandler.Group.ACTION_FILTER
subgroup = DataConditionHandler.Subgroup.FREQUENCY
comparison_json_schema = {
"type": "object",
"properties": {
"interval": {"type": "string", "enum": list(PERCENT_INTERVALS.keys())},
"value": {"type": "number", "minimum": 0},
"comparison_interval": {"type": "string", "enum": list(COMPARISON_INTERVALS.keys())},
"filters": {
"type": "array",
"items": {
"anyOf": [
TaggedEventConditionHandler.comparison_json_schema,
EventAttributeConditionHandler.comparison_json_schema,
],
},
},
},
"required": ["interval", "value", "comparison_interval"],
"additionalProperties": False,
}
| PercentSessionsPercentHandler |
python | huggingface__transformers | src/transformers/models/sam2_video/modeling_sam2_video.py | {
"start": 45737,
"end": 47141
} | class ____(nn.Module):
def __init__(self, config: Sam2VideoConfig):
super().__init__()
hidden_size = config.memory_encoder_hidden_size
output_channels = config.memory_encoder_output_channels
self.mask_downsampler = Sam2VideoMaskDownSampler(config)
self.feature_projection = nn.Conv2d(hidden_size, hidden_size, kernel_size=1)
self.memory_fuser = Sam2VideoMemoryFuser(config)
self.position_encoding = Sam2VideoPositionEmbeddingSine(num_pos_feats=output_channels // 2, normalize=True)
self.projection = nn.Conv2d(hidden_size, output_channels, kernel_size=1)
def forward(
self,
vision_features: torch.Tensor,
masks: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
## Process masks
masks = self.mask_downsampler(masks)
## Fuse pixel_features and downsampled masks
vision_features = self.feature_projection(vision_features)
vision_features = vision_features + masks
vision_features = self.memory_fuser(vision_features)
vision_features = self.projection(vision_features)
vision_pos_enc = self.position_encoding(vision_features.shape, vision_features.device, vision_features.dtype)
return vision_features, vision_pos_enc
@dataclass
@auto_docstring(custom_intro="Base class for the vision encoder's outputs.")
| Sam2VideoMemoryEncoder |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1243651,
"end": 1246074
} | class ____(sgqlc.types.Type, Node):
"""An OIDC identity provider configured to provision identities for
an enterprise. Visible to enterprise owners or enterprise owners'
personal access tokens (classic) with read:enterprise or
admin:enterprise scope.
"""
__schema__ = github_schema
__field_names__ = ("enterprise", "external_identities", "provider_type", "tenant_id")
enterprise = sgqlc.types.Field(Enterprise, graphql_name="enterprise")
"""The enterprise this identity provider belongs to."""
external_identities = sgqlc.types.Field(
sgqlc.types.non_null(ExternalIdentityConnection),
graphql_name="externalIdentities",
args=sgqlc.types.ArgDict(
(
("members_only", sgqlc.types.Arg(Boolean, graphql_name="membersOnly", default=None)),
("login", sgqlc.types.Arg(String, graphql_name="login", default=None)),
("user_name", sgqlc.types.Arg(String, graphql_name="userName", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""ExternalIdentities provisioned by this identity provider.
Arguments:
* `members_only` (`Boolean`): Filter to external identities with
valid org membership only
* `login` (`String`): Filter to external identities with the users
login
* `user_name` (`String`): Filter to external identities with the
users userName/NameID attribute
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
provider_type = sgqlc.types.Field(sgqlc.types.non_null(OIDCProviderType), graphql_name="providerType")
"""The OIDC identity provider type"""
tenant_id = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="tenantId")
"""The id of the tenant this provider is attached to"""
| OIDCProvider |
python | sanic-org__sanic | sanic/router.py | {
"start": 705,
"end": 9341
} | class ____(BaseRouter):
"""The router implementation responsible for routing a `Request` object to the appropriate handler.""" # noqa: E501
DEFAULT_METHOD = "GET"
ALLOWED_METHODS = HTTP_METHODS
def _get(
self, path: str, method: str, host: Optional[str]
) -> tuple[Route, RouteHandler, dict[str, Any]]:
try:
return self.resolve(
path=path,
method=method,
extra={"host": host} if host else None,
)
except RoutingNotFound as e:
raise NotFound(f"Requested URL {e.path} not found") from None
except NoMethod as e:
raise MethodNotAllowed(
f"Method {method} not allowed for URL {path}",
method=method,
allowed_methods=tuple(e.allowed_methods)
if e.allowed_methods
else None,
) from None
@lru_cache(maxsize=ROUTER_CACHE_SIZE)
def get( # type: ignore
self, path: str, method: str, host: Optional[str]
) -> tuple[Route, RouteHandler, dict[str, Any]]:
"""Retrieve a `Route` object containing the details about how to handle a response for a given request
:param request: the incoming request object
:type request: Request
:return: details needed for handling the request and returning the
correct response
:rtype: Tuple[ Route, RouteHandler, Dict[str, Any]]
Args:
path (str): the path of the route
method (str): the HTTP method of the route
host (Optional[str]): the host of the route
Raises:
NotFound: if the route is not found
MethodNotAllowed: if the method is not allowed for the route
Returns:
Tuple[Route, RouteHandler, Dict[str, Any]]: the route, handler, and match info
""" # noqa: E501
__tracebackhide__ = True
return self._get(path, method, host)
def add( # type: ignore
self,
uri: str,
methods: Iterable[str],
handler: RouteHandler,
host: Optional[Union[str, Iterable[str]]] = None,
strict_slashes: bool = False,
stream: bool = False,
ignore_body: bool = False,
version: Optional[Union[str, float, int]] = None,
name: Optional[str] = None,
unquote: bool = False,
static: bool = False,
version_prefix: str = "/v",
overwrite: bool = False,
error_format: Optional[str] = None,
) -> Union[Route, list[Route]]:
"""Add a handler to the router
Args:
uri (str): The path of the route.
methods (Iterable[str]): The types of HTTP methods that should be attached,
example: ["GET", "POST", "OPTIONS"].
handler (RouteHandler): The sync or async function to be executed.
host (Optional[str], optional): Host that the route should be on. Defaults to None.
strict_slashes (bool, optional): Whether to apply strict slashes. Defaults to False.
stream (bool, optional): Whether to stream the response. Defaults to False.
ignore_body (bool, optional): Whether the incoming request body should be read.
Defaults to False.
version (Union[str, float, int], optional): A version modifier for the uri. Defaults to None.
name (Optional[str], optional): An identifying name of the route. Defaults to None.
Returns:
Route: The route object.
""" # noqa: E501
if version is not None:
version = str(version).strip("/").lstrip("v")
uri = "/".join([f"{version_prefix}{version}", uri.lstrip("/")])
uri = self._normalize(uri, handler)
params = dict(
path=uri,
handler=handler,
methods=frozenset(map(str, methods)) if methods else None,
name=name,
strict=strict_slashes,
unquote=unquote,
overwrite=overwrite,
)
if isinstance(host, str):
hosts = [host]
else:
hosts = host or [None] # type: ignore
routes = []
for host in hosts:
if host:
params.update({"requirements": {"host": host}})
ident = name
if len(hosts) > 1:
ident = (
f"{name}_{host.replace('.', '_')}"
if name
else "__unnamed__"
)
route = super().add(**params) # type: ignore
route.extra.ident = ident
route.extra.ignore_body = ignore_body
route.extra.stream = stream
route.extra.hosts = hosts
route.extra.static = static
route.extra.error_format = error_format
if error_format:
check_error_format(route.extra.error_format)
routes.append(route)
if len(routes) == 1:
return routes[0]
return routes
@lru_cache(maxsize=ROUTER_CACHE_SIZE)
def find_route_by_view_name(
self, view_name: str, name: Optional[str] = None
) -> Optional[Route]:
"""Find a route in the router based on the specified view name.
Args:
view_name (str): the name of the view to search for
name (Optional[str], optional): the name of the route. Defaults to `None`.
Returns:
Optional[Route]: the route object
""" # noqa: E501
if not view_name:
return None
route = self.name_index.get(view_name)
if not route:
full_name = self.ctx.app.generate_name(view_name)
route = self.name_index.get(full_name)
if not route:
return None
return route
@property
def routes_all(self) -> dict[tuple[str, ...], Route]:
"""Return all routes in the router.
Returns:
Dict[Tuple[str, ...], Route]: a dictionary of routes
"""
return {route.parts: route for route in self.routes}
@property
def routes_static(self) -> dict[tuple[str, ...], RouteGroup]:
"""Return all static routes in the router.
_In this context "static" routes do not refer to the `app.static()`
method. Instead, they refer to routes that do not contain
any path parameters._
Returns:
Dict[Tuple[str, ...], Route]: a dictionary of routes
"""
return self.static_routes
@property
def routes_dynamic(self) -> dict[tuple[str, ...], RouteGroup]:
"""Return all dynamic routes in the router.
_Dynamic routes are routes that contain path parameters._
Returns:
Dict[Tuple[str, ...], Route]: a dictionary of routes
"""
return self.dynamic_routes
@property
def routes_regex(self) -> dict[tuple[str, ...], RouteGroup]:
"""Return all regex routes in the router.
_Regex routes are routes that contain path parameters with regex
expressions, or otherwise need regex to resolve._
Returns:
Dict[Tuple[str, ...], Route]: a dictionary of routes
"""
return self.regex_routes
def finalize(self, *args, **kwargs) -> None:
"""Finalize the router.
Raises:
SanicException: if a route contains a parameter name that starts with "__" and is not in ALLOWED_LABELS
""" # noqa: E501
super().finalize(*args, **kwargs)
for route in self.dynamic_routes.values():
if any(
label.startswith("__") and label not in ALLOWED_LABELS
for label in route.labels
):
raise SanicException(
f"Invalid route: {route}. Parameter names cannot use '__'."
)
def _normalize(self, uri: str, handler: RouteHandler) -> str:
if "<" not in uri:
return uri
sig = signature(handler)
mapping = {
param.name: param.annotation.__name__.lower()
for param in sig.parameters.values()
if param.annotation in (str, int, float, UUID)
}
reconstruction = []
for part in uri.split("/"):
if part.startswith("<") and ":" not in part:
name = part[1:-1]
annotation = mapping.get(name)
if annotation:
part = f"<{name}:{annotation}>"
reconstruction.append(part)
return "/".join(reconstruction)
| Router |
python | scipy__scipy | scipy/optimize/_zeros_py.py | {
"start": 755,
"end": 43169
} | class ____(OptimizeResult):
"""Represents the root finding result.
Attributes
----------
root : float
Estimated root location.
iterations : int
Number of iterations needed to find the root.
function_calls : int
Number of times the function was called.
converged : bool
True if the routine converged.
flag : str
Description of the cause of termination.
method : str
Root finding method used.
"""
def __init__(self, root, iterations, function_calls, flag, method):
self.root = root
self.iterations = iterations
self.function_calls = function_calls
self.converged = flag == _ECONVERGED
if flag in flag_map:
self.flag = flag_map[flag]
else:
self.flag = flag
self.method = method
def results_c(full_output, r, method):
if full_output:
x, funcalls, iterations, flag = r
results = RootResults(root=x,
iterations=iterations,
function_calls=funcalls,
flag=flag, method=method)
return x, results
else:
return r
def _results_select(full_output, r, method):
"""Select from a tuple of (root, funccalls, iterations, flag)"""
x, funcalls, iterations, flag = r
if full_output:
results = RootResults(root=x,
iterations=iterations,
function_calls=funcalls,
flag=flag, method=method)
return x, results
return x
def _wrap_nan_raise(f):
def f_raise(x, *args):
fx = f(x, *args)
f_raise._function_calls += 1
if np.isnan(fx):
msg = (f'The function value at x={x} is NaN; '
'solver cannot continue.')
err = ValueError(msg)
err._x = x
err._function_calls = f_raise._function_calls
raise err
return fx
f_raise._function_calls = 0
return f_raise
def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
fprime2=None, x1=None, rtol=0.0,
full_output=False, disp=True):
"""
Find a root of a real or complex function using the Newton-Raphson
(or secant or Halley's) method.
Find a root of the scalar-valued function `func` given a nearby scalar
starting point `x0`.
The Newton-Raphson method is used if the derivative `fprime` of `func`
is provided, otherwise the secant method is used. If the second order
derivative `fprime2` of `func` is also provided, then Halley's method is
used.
If `x0` is a sequence with more than one item, `newton` returns an array:
the roots of the function from each (scalar) starting point in `x0`.
In this case, `func` must be vectorized to return a sequence or array of
the same shape as its first argument. If `fprime` (`fprime2`) is given,
then its return must also have the same shape: each element is the first
(second) derivative of `func` with respect to its only variable evaluated
at each element of its first argument.
`newton` is for finding roots of a scalar-valued functions of a single
variable. For problems involving several variables, see `root`.
Parameters
----------
func : callable
The function whose root is wanted. It must be a function of a
single variable of the form ``f(x,a,b,c,...)``, where ``a,b,c,...``
are extra arguments that can be passed in the `args` parameter.
x0 : float, sequence, or ndarray
An initial estimate of the root that should be somewhere near the
actual root. If not scalar, then `func` must be vectorized and return
a sequence or array of the same shape as its first argument.
fprime : callable, optional
The derivative of the function when available and convenient. If it
is None (default), then the secant method is used.
args : tuple, optional
Extra arguments to be used in the function call.
tol : float, optional
The allowable error of the root's value. If `func` is complex-valued,
a larger `tol` is recommended as both the real and imaginary parts
of `x` contribute to ``|x - x0|``.
maxiter : int, optional
Maximum number of iterations.
fprime2 : callable, optional
The second order derivative of the function when available and
convenient. If it is None (default), then the normal Newton-Raphson
or the secant method is used. If it is not None, then Halley's method
is used.
x1 : float, optional
Another estimate of the root that should be somewhere near the
actual root. Used if `fprime` is not provided.
rtol : float, optional
Tolerance (relative) for termination.
full_output : bool, optional
If `full_output` is False (default), the root is returned.
If True and `x0` is scalar, the return value is ``(x, r)``, where ``x``
is the root and ``r`` is a `RootResults` object.
If True and `x0` is non-scalar, the return value is ``(x, converged,
zero_der)`` (see Returns section for details).
disp : bool, optional
If True, raise a RuntimeError if the algorithm didn't converge, with
the error message containing the number of iterations and current
function value. Otherwise, the convergence status is recorded in a
`RootResults` return object.
Ignored if `x0` is not scalar.
*Note: this has little to do with displaying, however,
the `disp` keyword cannot be renamed for backwards compatibility.*
Returns
-------
root : float, sequence, or ndarray
Estimated location where function is zero.
r : `RootResults`, optional
Present if ``full_output=True`` and `x0` is scalar.
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
converged : ndarray of bool, optional
Present if ``full_output=True`` and `x0` is non-scalar.
For vector functions, indicates which elements converged successfully.
zero_der : ndarray of bool, optional
Present if ``full_output=True`` and `x0` is non-scalar.
For vector functions, indicates which elements had a zero derivative.
See Also
--------
root_scalar : interface to root solvers for scalar functions
root : interface to root solvers for multi-input, multi-output functions
Notes
-----
The convergence rate of the Newton-Raphson method is quadratic,
the Halley method is cubic, and the secant method is
sub-quadratic. This means that if the function is well-behaved
the actual error in the estimated root after the nth iteration
is approximately the square (cube for Halley) of the error
after the (n-1)th step. However, the stopping criterion used
here is the step size and there is no guarantee that a root
has been found. Consequently, the result should be verified.
Safer algorithms are brentq, brenth, ridder, and bisect,
but they all require that the root first be bracketed in an
interval where the function changes sign. The brentq algorithm
is recommended for general use in one dimensional problems
when such an interval has been found.
When `newton` is used with arrays, it is best suited for the following
types of problems:
* The initial guesses, `x0`, are all relatively the same distance from
the roots.
* Some or all of the extra arguments, `args`, are also arrays so that a
class of similar problems can be solved together.
* The size of the initial guesses, `x0`, is larger than O(100) elements.
Otherwise, a naive loop may perform as well or better than a vector.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy import optimize
>>> def f(x):
... return (x**3 - 1) # only one real root at x = 1
``fprime`` is not provided, use the secant method:
>>> root = optimize.newton(f, 1.5)
>>> root
1.0000000000000016
>>> root = optimize.newton(f, 1.5, fprime2=lambda x: 6 * x)
>>> root
1.0000000000000016
Only ``fprime`` is provided, use the Newton-Raphson method:
>>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2)
>>> root
1.0
Both ``fprime2`` and ``fprime`` are provided, use Halley's method:
>>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2,
... fprime2=lambda x: 6 * x)
>>> root
1.0
When we want to find roots for a set of related starting values and/or
function parameters, we can provide both of those as an array of inputs:
>>> f = lambda x, a: x**3 - a
>>> fder = lambda x, a: 3 * x**2
>>> rng = np.random.default_rng()
>>> x = rng.standard_normal(100)
>>> a = np.arange(-50, 50)
>>> vec_res = optimize.newton(f, x, fprime=fder, args=(a, ), maxiter=200)
The above is the equivalent of solving for each value in ``(x, a)``
separately in a for-loop, just faster:
>>> loop_res = [optimize.newton(f, x0, fprime=fder, args=(a0,),
... maxiter=200)
... for x0, a0 in zip(x, a)]
>>> np.allclose(vec_res, loop_res)
True
Plot the results found for all values of ``a``:
>>> analytical_result = np.sign(a) * np.abs(a)**(1/3)
>>> fig, ax = plt.subplots()
>>> ax.plot(a, analytical_result, 'o')
>>> ax.plot(a, vec_res, '.')
>>> ax.set_xlabel('$a$')
>>> ax.set_ylabel('$x$ where $f(x, a)=0$')
>>> plt.show()
"""
if tol <= 0:
raise ValueError(f"tol too small ({tol:g} <= 0)")
maxiter = operator.index(maxiter)
if maxiter < 1:
raise ValueError("maxiter must be greater than 0")
if np.size(x0) > 1:
return _array_newton(func, x0, fprime, args, tol, maxiter, fprime2,
full_output)
# Convert to float (don't use float(x0); this works also for complex x0)
# Use np.asarray because we want x0 to be a numpy object, not a Python
# object. e.g. np.complex(1+1j) > 0 is possible, but (1 + 1j) > 0 raises
# a TypeError
x0 = np.asarray(x0)[()] * 1.0
p0 = x0
funcalls = 0
if fprime is not None:
# Newton-Raphson method
method = "newton"
for itr in range(maxiter):
# first evaluate fval
fval = func(p0, *args)
funcalls += 1
# If fval is 0, a root has been found, then terminate
if fval == 0:
return _results_select(
full_output, (p0, funcalls, itr, _ECONVERGED), method)
fder = fprime(p0, *args)
funcalls += 1
if fder == 0:
msg = "Derivative was zero."
if disp:
msg += (
f" Failed to converge after {itr + 1} iterations,"
f" value is {p0}."
)
raise RuntimeError(msg)
warnings.warn(msg, RuntimeWarning, stacklevel=2)
return _results_select(
full_output, (p0, funcalls, itr + 1, _ECONVERR), method)
newton_step = fval / fder
if fprime2:
fder2 = fprime2(p0, *args)
funcalls += 1
method = "halley"
# Halley's method:
# newton_step /= (1.0 - 0.5 * newton_step * fder2 / fder)
# Only do it if denominator stays close enough to 1
# Rationale: If 1-adj < 0, then Halley sends x in the
# opposite direction to Newton. Doesn't happen if x is close
# enough to root.
adj = newton_step * fder2 / fder / 2
if np.abs(adj) < 1:
newton_step /= 1.0 - adj
p = p0 - newton_step
if np.isclose(p, p0, rtol=rtol, atol=tol):
return _results_select(
full_output, (p, funcalls, itr + 1, _ECONVERGED), method)
p0 = p
else:
# Secant method
method = "secant"
if x1 is not None:
if x1 == x0:
raise ValueError("x1 and x0 must be different")
p1 = x1
else:
eps = 1e-4
p1 = x0 * (1 + eps)
p1 += (eps if p1 >= 0 else -eps)
q0 = func(p0, *args)
funcalls += 1
q1 = func(p1, *args)
funcalls += 1
if abs(q1) < abs(q0):
p0, p1, q0, q1 = p1, p0, q1, q0
for itr in range(maxiter):
if q1 == q0:
if p1 != p0:
msg = f"Tolerance of {p1 - p0} reached."
if disp:
msg += (
f" Failed to converge after {itr + 1} iterations,"
f" value is {p1}."
)
raise RuntimeError(msg)
warnings.warn(msg, RuntimeWarning, stacklevel=2)
p = (p1 + p0) / 2.0
return _results_select(
full_output, (p, funcalls, itr + 1, _ECONVERR), method)
else:
if abs(q1) > abs(q0):
p = (-q0 / q1 * p1 + p0) / (1 - q0 / q1)
else:
p = (-q1 / q0 * p0 + p1) / (1 - q1 / q0)
if np.isclose(p, p1, rtol=rtol, atol=tol):
return _results_select(
full_output, (p, funcalls, itr + 1, _ECONVERGED), method)
p0, q0 = p1, q1
p1 = p
q1 = func(p1, *args)
funcalls += 1
if disp:
msg = f"Failed to converge after {itr + 1} iterations, value is {p}."
raise RuntimeError(msg)
return _results_select(full_output, (p, funcalls, itr + 1, _ECONVERR), method)
def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2, full_output):
"""
A vectorized version of Newton, Halley, and secant methods for arrays.
Do not use this method directly. This method is called from `newton`
when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`.
"""
# Explicitly copy `x0` as `p` will be modified inplace, but the
# user's array should not be altered.
p = np.array(x0, copy=True)
failures = np.ones_like(p, dtype=bool)
nz_der = np.ones_like(failures)
if fprime is not None:
# Newton-Raphson method
for iteration in range(maxiter):
# first evaluate fval
fval = np.asarray(func(p, *args))
# If all fval are 0, all roots have been found, then terminate
if not fval.any():
failures = fval.astype(bool)
break
fder = np.asarray(fprime(p, *args))
nz_der = (fder != 0)
# stop iterating if all derivatives are zero
if not nz_der.any():
break
# Newton step
dp = fval[nz_der] / fder[nz_der]
if fprime2 is not None:
fder2 = np.asarray(fprime2(p, *args))
dp = dp / (1.0 - 0.5 * dp * fder2[nz_der] / fder[nz_der])
# only update nonzero derivatives
p = np.asarray(p, dtype=np.result_type(p, dp, np.float64))
p[nz_der] -= dp
failures[nz_der] = np.abs(dp) >= tol # items not yet converged
# stop iterating if there aren't any failures, not incl zero der
if not failures[nz_der].any():
break
else:
# Secant method
dx = np.finfo(float).eps**0.33
p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx)
q0 = np.asarray(func(p, *args))
q1 = np.asarray(func(p1, *args))
active = np.ones_like(p, dtype=bool)
for iteration in range(maxiter):
nz_der = (q1 != q0)
# stop iterating if all derivatives are zero
if not nz_der.any():
p = (p1 + p) / 2.0
break
# Secant Step
dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der]
# only update nonzero derivatives
p = np.asarray(p, dtype=np.result_type(p, p1, dp, np.float64))
p[nz_der] = p1[nz_der] - dp
active_zero_der = ~nz_der & active
p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0
active &= nz_der # don't assign zero derivatives again
failures[nz_der] = np.abs(dp) >= tol # not yet converged
# stop iterating if there aren't any failures, not incl zero der
if not failures[nz_der].any():
break
p1, p = p, p1
q0 = q1
q1 = np.asarray(func(p1, *args))
zero_der = ~nz_der & failures # don't include converged with zero-ders
if zero_der.any():
# Secant warnings
if fprime is None:
nonzero_dp = (p1 != p)
# non-zero dp, but infinite newton step
zero_der_nz_dp = (zero_der & nonzero_dp)
if zero_der_nz_dp.any():
rms = np.sqrt(
sum((p1[zero_der_nz_dp] - p[zero_der_nz_dp]) ** 2)
)
warnings.warn(f'RMS of {rms:g} reached', RuntimeWarning, stacklevel=3)
# Newton or Halley warnings
else:
all_or_some = 'all' if zero_der.all() else 'some'
msg = f'{all_or_some:s} derivatives were zero'
warnings.warn(msg, RuntimeWarning, stacklevel=3)
elif failures.any():
all_or_some = 'all' if failures.all() else 'some'
msg = f'{all_or_some:s} failed to converge after {maxiter:d} iterations'
if failures.all():
raise RuntimeError(msg)
warnings.warn(msg, RuntimeWarning, stacklevel=3)
if full_output:
result = namedtuple('result', ('root', 'converged', 'zero_der'))
p = result(p, ~failures, zero_der)
return p
def bisect(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find root of a function within an interval using bisection.
Basic bisection routine to find a root of the function `f` between the
arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs.
Slow but sure.
Parameters
----------
f : function
Python function returning a number. `f` must be continuous, and
f(a) and f(b) must have opposite signs.
a : scalar
One end of the bracketing interval [a,b].
b : scalar
The other end of the bracketing interval [a,b].
xtol : number, optional
The computed root ``x0`` will satisfy ``np.isclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be positive.
rtol : number, optional
The computed root ``x0`` will satisfy ``np.isclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``.
maxiter : int, optional
If convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
args : tuple, optional
Containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where x is the root, and r is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Otherwise, the convergence status is recorded in a `RootResults`
return object.
Returns
-------
root : float
Root of `f` between `a` and `b`.
r : `RootResults` (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
Notes
-----
As mentioned in the parameter documentation, the computed root ``x0`` will
satisfy ``np.isclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the
exact root. In equation form, this terminating condition is ``abs(x - x0)
<= xtol + rtol * abs(x0)``.
The default value ``xtol=2e-12`` may lead to surprising behavior if one
expects `bisect` to always compute roots with relative error near machine
precision. Care should be taken to select `xtol` for the use case at hand.
Setting ``xtol=5e-324``, the smallest subnormal number, will ensure the
highest level of accuracy. Larger values of `xtol` may be useful for saving
function evaluations when a root is at or near zero in applications where
the tiny absolute differences available between floating point numbers near
zero are not meaningful.
Examples
--------
>>> def f(x):
... return (x**2 - 1)
>>> from scipy import optimize
>>> root = optimize.bisect(f, 0, 2)
>>> root
1.0
>>> root = optimize.bisect(f, -2, 0)
>>> root
-1.0
See Also
--------
brentq, brenth, bisect, newton
fixed_point : scalar fixed-point finder
fsolve : n-dimensional root-finding
elementwise.find_root : efficient elementwise 1-D root-finder
"""
if not isinstance(args, tuple):
args = (args,)
maxiter = operator.index(maxiter)
if xtol <= 0:
raise ValueError(f"xtol too small ({xtol:g} <= 0)")
if rtol < _rtol:
raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})")
f = _wrap_nan_raise(f)
r = _zeros._bisect(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r, "bisect")
def ridder(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a root of a function in an interval using Ridder's method.
Parameters
----------
f : function
Python function returning a number. f must be continuous, and f(a) and
f(b) must have opposite signs.
a : scalar
One end of the bracketing interval [a,b].
b : scalar
The other end of the bracketing interval [a,b].
xtol : number, optional
The computed root ``x0`` will satisfy ``np.isclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be positive.
rtol : number, optional
The computed root ``x0`` will satisfy ``np.isclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``.
maxiter : int, optional
If convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
args : tuple, optional
Containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Otherwise, the convergence status is recorded in any `RootResults`
return object.
Returns
-------
root : float
Root of `f` between `a` and `b`.
r : `RootResults` (present if ``full_output = True``)
Object containing information about the convergence.
In particular, ``r.converged`` is True if the routine converged.
See Also
--------
brentq, brenth, bisect, newton : 1-D root-finding
fixed_point : scalar fixed-point finder
elementwise.find_root : efficient elementwise 1-D root-finder
Notes
-----
Uses [Ridders1979]_ method to find a root of the function `f` between the
arguments `a` and `b`. Ridders' method is faster than bisection, but not
generally as fast as the Brent routines. [Ridders1979]_ provides the
classic description and source of the algorithm. A description can also be
found in any recent edition of Numerical Recipes.
The routine used here diverges slightly from standard presentations in
order to be a bit more careful of tolerance.
As mentioned in the parameter documentation, the computed root ``x0`` will
satisfy ``np.isclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the
exact root. In equation form, this terminating condition is ``abs(x - x0)
<= xtol + rtol * abs(x0)``.
The default value ``xtol=2e-12`` may lead to surprising behavior if one
expects `ridder` to always compute roots with relative error near machine
precision. Care should be taken to select `xtol` for the use case at hand.
Setting ``xtol=5e-324``, the smallest subnormal number, will ensure the
highest level of accuracy. Larger values of `xtol` may be useful for saving
function evaluations when a root is at or near zero in applications where
the tiny absolute differences available between floating point numbers near
zero are not meaningful.
References
----------
.. [Ridders1979]
Ridders, C. F. J. "A New Algorithm for Computing a
Single Root of a Real Continuous Function."
IEEE Trans. Circuits Systems 26, 979-980, 1979.
Examples
--------
>>> def f(x):
... return (x**2 - 1)
>>> from scipy import optimize
>>> root = optimize.ridder(f, 0, 2)
>>> root
1.0
>>> root = optimize.ridder(f, -2, 0)
>>> root
-1.0
"""
if not isinstance(args, tuple):
args = (args,)
maxiter = operator.index(maxiter)
if xtol <= 0:
raise ValueError(f"xtol too small ({xtol:g} <= 0)")
if rtol < _rtol:
raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})")
f = _wrap_nan_raise(f)
r = _zeros._ridder(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r, "ridder")
def brentq(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a root of a function in a bracketing interval using Brent's method.
Uses the classic Brent's method to find a root of the function `f` on
the sign changing interval [a , b]. Generally considered the best of the
rootfinding routines here. It is a safe version of the secant method that
uses inverse quadratic extrapolation. Brent's method combines root
bracketing, interval bisection, and inverse quadratic interpolation. It is
sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973)
claims convergence is guaranteed for functions computable within [a,b].
[Brent1973]_ provides the classic description of the algorithm. Another
description can be found in a recent edition of Numerical Recipes, including
[PressEtal1992]_. A third description is at
http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to
understand the algorithm just by reading our code. Our code diverges a bit
from standard presentations: we choose a different formula for the
extrapolation step.
Parameters
----------
f : function
Python function returning a number. The function :math:`f`
must be continuous, and :math:`f(a)` and :math:`f(b)` must
have opposite signs.
a : scalar
One end of the bracketing interval :math:`[a, b]`.
b : scalar
The other end of the bracketing interval :math:`[a, b]`.
xtol : number, optional
The computed root ``x0`` will satisfy ``np.isclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be positive. For nice functions, Brent's
method will often satisfy the above condition with ``xtol/2``
and ``rtol/2``. [Brent1973]_
rtol : number, optional
The computed root ``x0`` will satisfy ``np.isclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``. For nice functions, Brent's
method will often satisfy the above condition with ``xtol/2``
and ``rtol/2``. [Brent1973]_
maxiter : int, optional
If convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
args : tuple, optional
Containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Otherwise, the convergence status is recorded in any `RootResults`
return object.
Returns
-------
root : float
Root of `f` between `a` and `b`.
r : `RootResults` (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg : multivariate local optimizers
leastsq : nonlinear least squares minimizer
fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers
basinhopping, differential_evolution, brute : global optimizers
fminbound, brent, golden, bracket : local scalar minimizers
fsolve : N-D root-finding
brenth, ridder, bisect, newton : 1-D root-finding
fixed_point : scalar fixed-point finder
elementwise.find_root : efficient elementwise 1-D root-finder
Notes
-----
`f` must be continuous. f(a) and f(b) must have opposite signs.
As mentioned in the parameter documentation, the computed root ``x0`` will
satisfy ``np.isclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the
exact root. In equation form, this terminating condition is ``abs(x - x0)
<= xtol + rtol * abs(x0)``.
The default value ``xtol=2e-12`` may lead to surprising behavior if one
expects `brentq` to always compute roots with relative error near machine
precision. Care should be taken to select `xtol` for the use case at hand.
Setting ``xtol=5e-324``, the smallest subnormal number, will ensure the
highest level of accuracy. Larger values of `xtol` may be useful for saving
function evaluations when a root is at or near zero in applications where
the tiny absolute differences available between floating point numbers near
zero are not meaningful.
References
----------
.. [Brent1973]
Brent, R. P.,
*Algorithms for Minimization Without Derivatives*.
Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4.
.. [PressEtal1992]
Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T.
*Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed.
Cambridge, England: Cambridge University Press, pp. 352-355, 1992.
Section 9.3: "Van Wijngaarden-Dekker-Brent Method."
Examples
--------
>>> def f(x):
... return (x**2 - 1)
>>> from scipy import optimize
>>> root = optimize.brentq(f, -2, 0)
>>> root
-1.0
>>> root = optimize.brentq(f, 0, 2)
>>> root
1.0
"""
if not isinstance(args, tuple):
args = (args,)
maxiter = operator.index(maxiter)
if xtol <= 0:
raise ValueError(f"xtol too small ({xtol:g} <= 0)")
if rtol < _rtol:
raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})")
f = _wrap_nan_raise(f)
r = _zeros._brentq(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r, "brentq")
def brenth(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""Find a root of a function in a bracketing interval using Brent's
method with hyperbolic extrapolation.
A variation on the classic Brent routine to find a root of the function f
between the arguments a and b that uses hyperbolic extrapolation instead of
inverse quadratic extrapolation. Bus & Dekker (1975) guarantee convergence
for this method, claiming that the upper bound of function evaluations here
is 4 or 5 times that of bisection.
f(a) and f(b) cannot have the same signs. Generally, on a par with the
brent routine, but not as heavily tested. It is a safe version of the
secant method that uses hyperbolic extrapolation.
The version here is by Chuck Harris, and implements Algorithm M of
[BusAndDekker1975]_, where further details (convergence properties,
additional remarks and such) can be found
Parameters
----------
f : function
Python function returning a number. f must be continuous, and f(a) and
f(b) must have opposite signs.
a : scalar
One end of the bracketing interval [a,b].
b : scalar
The other end of the bracketing interval [a,b].
xtol : number, optional
The computed root ``x0`` will satisfy ``np.isclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be positive. As with `brentq`, for nice
functions the method will often satisfy the above condition
with ``xtol/2`` and ``rtol/2``.
rtol : number, optional
The computed root ``x0`` will satisfy ``np.isclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``. As with `brentq`, for nice functions
the method will often satisfy the above condition with
``xtol/2`` and ``rtol/2``.
maxiter : int, optional
If convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
args : tuple, optional
Containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Otherwise, the convergence status is recorded in any `RootResults`
return object.
Returns
-------
root : float
Root of `f` between `a` and `b`.
r : `RootResults` (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg : multivariate local optimizers
leastsq : nonlinear least squares minimizer
fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers
basinhopping, differential_evolution, brute : global optimizers
fminbound, brent, golden, bracket : local scalar minimizers
fsolve : N-D root-finding
brentq, ridder, bisect, newton : 1-D root-finding
fixed_point : scalar fixed-point finder
elementwise.find_root : efficient elementwise 1-D root-finder
Notes
-----
As mentioned in the parameter documentation, the computed root ``x0`` will
satisfy ``np.isclose(x, x0, atol=xtol, rtol=rtol)``, where ``x`` is the
exact root. In equation form, this terminating condition is ``abs(x - x0)
<= xtol + rtol * abs(x0)``.
The default value ``xtol=2e-12`` may lead to surprising behavior if one
expects `brenth` to always compute roots with relative error near machine
precision. Care should be taken to select `xtol` for the use case at hand.
Setting ``xtol=5e-324``, the smallest subnormal number, will ensure the
highest level of accuracy. Larger values of `xtol` may be useful for saving
function evaluations when a root is at or near zero in applications where
the tiny absolute differences available between floating point numbers near
zero are not meaningful.
References
----------
.. [BusAndDekker1975]
Bus, J. C. P., Dekker, T. J.,
"Two Efficient Algorithms with Guaranteed Convergence for Finding a Zero
of a Function", ACM Transactions on Mathematical Software, Vol. 1, Issue
4, Dec. 1975, pp. 330-345. Section 3: "Algorithm M".
:doi:`10.1145/355656.355659`
Examples
--------
>>> def f(x):
... return (x**2 - 1)
>>> from scipy import optimize
>>> root = optimize.brenth(f, -2, 0)
>>> root
-1.0
>>> root = optimize.brenth(f, 0, 2)
>>> root
1.0
"""
if not isinstance(args, tuple):
args = (args,)
maxiter = operator.index(maxiter)
if xtol <= 0:
raise ValueError(f"xtol too small ({xtol:g} <= 0)")
if rtol < _rtol:
raise ValueError(f"rtol too small ({rtol:g} < {_rtol:g})")
f = _wrap_nan_raise(f)
r = _zeros._brenth(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r, "brenth")
################################
# TOMS "Algorithm 748: Enclosing Zeros of Continuous Functions", by
# Alefeld, G. E. and Potra, F. A. and Shi, Yixun,
# See [1]
def _notclose(fs, rtol=_rtol, atol=_xtol):
# Ensure not None, not 0, all finite, and not very close to each other
notclosefvals = (
all(fs) and all(np.isfinite(fs)) and
not any(any(np.isclose(_f, fs[i + 1:], rtol=rtol, atol=atol))
for i, _f in enumerate(fs[:-1])))
return notclosefvals
def _secant(xvals, fvals):
"""Perform a secant step, taking a little care"""
# Secant has many "mathematically" equivalent formulations
# x2 = x0 - (x1 - x0)/(f1 - f0) * f0
# = x1 - (x1 - x0)/(f1 - f0) * f1
# = (-x1 * f0 + x0 * f1) / (f1 - f0)
# = (-f0 / f1 * x1 + x0) / (1 - f0 / f1)
# = (-f1 / f0 * x0 + x1) / (1 - f1 / f0)
x0, x1 = xvals[:2]
f0, f1 = fvals[:2]
if f0 == f1:
return np.nan
if np.abs(f1) > np.abs(f0):
x2 = (-f0 / f1 * x1 + x0) / (1 - f0 / f1)
else:
x2 = (-f1 / f0 * x0 + x1) / (1 - f1 / f0)
return x2
def _update_bracket(ab, fab, c, fc):
"""Update a bracket given (c, fc), return the discarded endpoints."""
fa, fb = fab
idx = (0 if np.sign(fa) * np.sign(fc) > 0 else 1)
rx, rfx = ab[idx], fab[idx]
fab[idx] = fc
ab[idx] = c
return rx, rfx
def _compute_divided_differences(xvals, fvals, N=None, full=True,
forward=True):
"""Return a matrix of divided differences for the xvals, fvals pairs
DD[i, j] = f[x_{i-j}, ..., x_i] for 0 <= j <= i
If full is False, just return the main diagonal(or last row):
f[a], f[a, b] and f[a, b, c].
If forward is False, return f[c], f[b, c], f[a, b, c]."""
if full:
if forward:
xvals = np.asarray(xvals)
else:
xvals = np.array(xvals)[::-1]
M = len(xvals)
N = M if N is None else min(N, M)
DD = np.zeros([M, N])
DD[:, 0] = fvals[:]
for i in range(1, N):
DD[i:, i] = (np.diff(DD[i - 1:, i - 1]) /
(xvals[i:] - xvals[:M - i]))
return DD
xvals = np.asarray(xvals)
dd = np.array(fvals)
row = np.array(fvals)
idx2Use = (0 if forward else -1)
dd[0] = fvals[idx2Use]
for i in range(1, len(xvals)):
denom = xvals[i:i + len(row) - 1] - xvals[:len(row) - 1]
row = np.diff(row)[:] / denom
dd[i] = row[idx2Use]
return dd
def _interpolated_poly(xvals, fvals, x):
"""Compute p(x) for the polynomial passing through the specified locations.
Use Neville's algorithm to compute p(x) where p is the minimal degree
polynomial passing through the points xvals, fvals"""
xvals = np.asarray(xvals)
N = len(xvals)
Q = np.zeros([N, N])
D = np.zeros([N, N])
Q[:, 0] = fvals[:]
D[:, 0] = fvals[:]
for k in range(1, N):
alpha = D[k:, k - 1] - Q[k - 1:N - 1, k - 1]
diffik = xvals[0:N - k] - xvals[k:N]
Q[k:, k] = (xvals[k:] - x) / diffik * alpha
D[k:, k] = (xvals[:N - k] - x) / diffik * alpha
# Expect Q[-1, 1:] to be small relative to Q[-1, 0] as x approaches a root
return np.sum(Q[-1, 1:]) + Q[-1, 0]
def _inverse_poly_zero(a, b, c, d, fa, fb, fc, fd):
"""Inverse cubic interpolation f-values -> x-values
Given four points (fa, a), (fb, b), (fc, c), (fd, d) with
fa, fb, fc, fd all distinct, find poly IP(y) through the 4 points
and compute x=IP(0).
"""
return _interpolated_poly([fa, fb, fc, fd], [a, b, c, d], 0)
def _newton_quadratic(ab, fab, d, fd, k):
"""Apply Newton-Raphson like steps, using divided differences to approximate f'
ab is a real interval [a, b] containing a root,
fab holds the real values of f(a), f(b)
d is a real number outside [ab, b]
k is the number of steps to apply
"""
a, b = ab
fa, fb = fab
_, B, A = _compute_divided_differences([a, b, d], [fa, fb, fd],
forward=True, full=False)
# _P is the quadratic polynomial through the 3 points
def _P(x):
# Horner evaluation of fa + B * (x - a) + A * (x - a) * (x - b)
return (A * (x - b) + B) * (x - a) + fa
if A == 0:
r = a - fa / B
else:
r = (a if np.sign(A) * np.sign(fa) > 0 else b)
# Apply k Newton-Raphson steps to _P(x), starting from x=r
for i in range(k):
r1 = r - _P(r) / (B + A * (2 * r - a - b))
if not (ab[0] < r1 < ab[1]):
if (ab[0] < r < ab[1]):
return r
r = sum(ab) / 2.0
break
r = r1
return r
| RootResults |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/extra/codemods.py | {
"start": 9312,
"end": 11238
} | class ____(VisitorBasedCodemodCommand):
"""Fix deprecated white/blacklist arguments to characters::
st.characters(whitelist_categories=...) -> st.characters(categories=...)
st.characters(blacklist_categories=...) -> st.characters(exclude_categories=...)
st.characters(whitelist_characters=...) -> st.characters(include_characters=...)
st.characters(blacklist_characters=...) -> st.characters(exclude_characters=...)
Additionally, we drop `exclude_categories=` if `categories=` is present,
because this argument is always redundant (or an error).
"""
DESCRIPTION = "Fix deprecated white/blacklist arguments to characters."
METADATA_DEPENDENCIES = (cst.metadata.QualifiedNameProvider,)
_replacements: ClassVar = {
"whitelist_categories": "categories",
"blacklist_categories": "exclude_categories",
"whitelist_characters": "include_characters",
"blacklist_characters": "exclude_characters",
}
@m.leave(
m.Call(
metadata=match_qualname("hypothesis.strategies.characters"),
args=[
m.ZeroOrMore(),
m.Arg(keyword=m.OneOf(*map(m.Name, _replacements))),
m.ZeroOrMore(),
],
),
)
def fn(self, original_node, updated_node):
# Update to the new names
newargs = []
for arg in updated_node.args:
kw = self._replacements.get(arg.keyword.value, arg.keyword.value)
newargs.append(arg.with_changes(keyword=cst.Name(kw)))
# Drop redundant exclude_categories, which is now an error
if any(m.matches(arg, m.Arg(keyword=m.Name("categories"))) for arg in newargs):
ex = m.Arg(keyword=m.Name("exclude_categories"))
newargs = [a for a in newargs if m.matches(a, ~ex)]
return updated_node.with_changes(args=newargs)
| HypothesisFixCharactersArguments |
python | celery__celery | t/unit/apps/test_multi.py | {
"start": 6148,
"end": 10942
} | class ____:
def setup_method(self):
self.p = Mock(name='p')
self.p.options = {
'--executable': 'python',
'--logfile': '/var/log/celery/foo.log',
}
self.p.namespaces = {}
with patch('celery.apps.multi.os.mkdir'):
self.node = Node('foo@bar.com', options={'-A': 'proj'})
self.expander = self.node.expander = Mock(name='expander')
self.node.pid = 303
def test_from_kwargs(self):
with patch('celery.apps.multi.os.mkdir'):
n = Node.from_kwargs(
'foo@bar.com',
max_tasks_per_child=30, A='foo', Q='q1,q2', O='fair',
)
assert sorted(n.argv) == sorted([
'-m celery -A foo worker --detach',
f'--executable={n.executable}',
'-O fair',
'-n foo@bar.com',
'--logfile={}'.format(os.path.normpath('/var/log/celery/foo%I.log')),
'-Q q1,q2',
'--max-tasks-per-child=30',
'--pidfile={}'.format(os.path.normpath('/var/run/celery/foo.pid')),
'',
])
@patch('os.kill')
def test_send(self, kill):
assert self.node.send(9)
kill.assert_called_with(self.node.pid, 9)
@patch('os.kill')
def test_send__ESRCH(self, kill):
kill.side_effect = OSError()
kill.side_effect.errno = errno.ESRCH
assert not self.node.send(9)
kill.assert_called_with(self.node.pid, 9)
@patch('os.kill')
def test_send__error(self, kill):
kill.side_effect = OSError()
kill.side_effect.errno = errno.ENOENT
with pytest.raises(OSError):
self.node.send(9)
kill.assert_called_with(self.node.pid, 9)
def test_alive(self):
self.node.send = Mock(name='send')
assert self.node.alive() is self.node.send.return_value
self.node.send.assert_called_with(0)
def test_start(self):
self.node._waitexec = Mock(name='_waitexec')
self.node.start(env={'foo': 'bar'}, kw=2)
self.node._waitexec.assert_called_with(
self.node.argv, path=self.node.executable,
env={'foo': 'bar'}, kw=2,
)
@patch('celery.apps.multi.Popen')
def test_waitexec(self, Popen, argv=['A', 'B']):
on_spawn = Mock(name='on_spawn')
on_signalled = Mock(name='on_signalled')
on_failure = Mock(name='on_failure')
env = Mock(name='env')
self.node.handle_process_exit = Mock(name='handle_process_exit')
self.node._waitexec(
argv,
path='python',
env=env,
on_spawn=on_spawn,
on_signalled=on_signalled,
on_failure=on_failure,
)
Popen.assert_called_with(
self.node.prepare_argv(argv, 'python'), env=env)
self.node.handle_process_exit.assert_called_with(
Popen().wait(),
on_signalled=on_signalled,
on_failure=on_failure,
)
def test_handle_process_exit(self):
assert self.node.handle_process_exit(0) == 0
def test_handle_process_exit__failure(self):
on_failure = Mock(name='on_failure')
assert self.node.handle_process_exit(9, on_failure=on_failure) == 9
on_failure.assert_called_with(self.node, 9)
def test_handle_process_exit__signalled(self):
on_signalled = Mock(name='on_signalled')
assert self.node.handle_process_exit(
-9, on_signalled=on_signalled) == 9
on_signalled.assert_called_with(self.node, 9)
def test_logfile(self):
assert self.node.logfile == self.expander.return_value
self.expander.assert_called_with(os.path.normpath('/var/log/celery/%n%I.log'))
@patch('celery.apps.multi.os.path.exists')
def test_pidfile_default(self, mock_exists):
n = Node.from_kwargs(
'foo@bar.com',
)
assert n.options['--pidfile'] == os.path.normpath('/var/run/celery/%n.pid')
mock_exists.assert_any_call(os.path.normpath('/var/run/celery'))
@patch('celery.apps.multi.os.makedirs')
@patch('celery.apps.multi.os.path.exists', return_value=False)
def test_pidfile_custom(self, mock_exists, mock_dirs):
n = Node.from_kwargs(
'foo@bar.com',
pidfile='/var/run/demo/celery/%n.pid'
)
assert n.options['--pidfile'] == '/var/run/demo/celery/%n.pid'
try:
mock_exists.assert_any_call('/var/run/celery')
except AssertionError:
pass
else:
raise AssertionError("Expected exists('/var/run/celery') to not have been called.")
mock_exists.assert_any_call('/var/run/demo/celery')
mock_dirs.assert_any_call('/var/run/demo/celery')
| test_Node |
python | pytorch__pytorch | torch/distributed/elastic/agent/server/api.py | {
"start": 7684,
"end": 9800
} | class ____(str, Enum):
"""A state of the ``WorkerGroup``.
Workers in a worker group change state as a unit. If a single worker
in a worker group fails the entire set is considered failed::
UNKNOWN - agent lost track of worker group state, unrecoverable
INIT - worker group object created not yet started
HEALTHY - workers running and healthy
UNHEALTHY - workers running and unhealthy
STOPPED - workers stopped (interrupted) by the agent
SUCCEEDED - workers finished running (exit 0)
FAILED - workers failed to successfully finish (exit !0)
A worker group starts from an initial ``INIT`` state,
then progresses to ``HEALTHY`` or ``UNHEALTHY`` states,
and finally reaches a terminal ``SUCCEEDED`` or ``FAILED`` state.
Worker groups can be interrupted and temporarily put into ``STOPPED`` state
by the agent. Workers in ``STOPPED`` state are scheduled to be restarted
in the near future by the agent. Some examples of workers being put into
``STOPPED`` state are:
1. Worker group failure|unhealthy observed
2. Membership change detected
When actions (start, stop, rdzv, retry, etc) on worker group fails
and results in the action being partially applied to the worker group
the state will be ``UNKNOWN``. Typically this happens on uncaught/unhandled
exceptions during state change events on the agent. The agent is not
expected to recover worker groups in ``UNKNOWN`` state and is better off
self terminating and allowing the job manager to retry the node.
"""
UNKNOWN = "UNKNOWN"
INIT = "INIT"
HEALTHY = "HEALTHY"
UNHEALTHY = "UNHEALTHY"
STOPPED = "STOPPED"
SUCCEEDED = "SUCCEEDED"
FAILED = "FAILED"
@staticmethod
def is_running(state: "WorkerState") -> bool:
"""Return the state of the Worker.
Returns:
True if the worker state represents workers still running
(e.g. that the process exists but not necessarily healthy).
"""
return state in {WorkerState.HEALTHY, WorkerState.UNHEALTHY}
| WorkerState |
python | huggingface__transformers | tests/models/bark/test_processing_bark.py | {
"start": 814,
"end": 5338
} | class ____(unittest.TestCase):
def setUp(self):
self.checkpoint = "suno/bark-small"
self.tmpdirname = tempfile.mkdtemp()
self.voice_preset = "en_speaker_1"
self.input_string = "This is a test string"
self.speaker_embeddings_dict_path = "speaker_embeddings_path.json"
self.speaker_embeddings_directory = "speaker_embeddings"
def get_tokenizer(self, **kwargs):
return AutoTokenizer.from_pretrained(self.checkpoint, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_save_load_pretrained_default(self):
tokenizer = self.get_tokenizer()
processor = BarkProcessor(tokenizer=tokenizer)
processor.save_pretrained(self.tmpdirname)
processor = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
@slow
def test_save_load_pretrained_additional_features(self):
processor = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint,
speaker_embeddings_dict_path=self.speaker_embeddings_dict_path,
)
# TODO (ebezzam) not all speaker embedding are properly downloaded.
# My hypothesis: there are many files (~700 speaker embeddings) and some fail to download (not the same at different first runs)
# https://github.com/huggingface/transformers/blob/967045082faaaaf3d653bfe665080fd746b2bb60/src/transformers/models/bark/processing_bark.py#L89
# https://github.com/huggingface/transformers/blob/967045082faaaaf3d653bfe665080fd746b2bb60/src/transformers/models/bark/processing_bark.py#L188
# So for testing purposes, we will remove the unavailable speaker embeddings before saving.
processor._verify_speaker_embeddings(remove_unavailable=True)
processor.save_pretrained(
self.tmpdirname,
speaker_embeddings_dict_path=self.speaker_embeddings_dict_path,
speaker_embeddings_directory=self.speaker_embeddings_directory,
)
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
processor = BarkProcessor.from_pretrained(
self.tmpdirname,
self.speaker_embeddings_dict_path,
bos_token="(BOS)",
eos_token="(EOS)",
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
def test_speaker_embeddings(self):
processor = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint,
speaker_embeddings_dict_path=self.speaker_embeddings_dict_path,
)
seq_len = 35
nb_codebooks_coarse = 2
nb_codebooks_total = 8
voice_preset = {
"semantic_prompt": np.ones(seq_len),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)),
"fine_prompt": np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
inputs = processor(text=self.input_string, voice_preset=voice_preset)
processed_voice_preset = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(key, np.array([])).tolist())
# test loading voice preset from npz file
tmpfilename = os.path.join(self.tmpdirname, "file.npz")
np.savez(tmpfilename, **voice_preset)
inputs = processor(text=self.input_string, voice_preset=tmpfilename)
processed_voice_preset = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(key, np.array([])).tolist())
# test loading voice preset from the hub
inputs = processor(text=self.input_string, voice_preset=self.voice_preset)
def test_tokenizer(self):
tokenizer = self.get_tokenizer()
processor = BarkProcessor(tokenizer=tokenizer)
encoded_processor = processor(text=self.input_string)
encoded_tok = tokenizer(
self.input_string,
padding="max_length",
max_length=256,
add_special_tokens=False,
return_attention_mask=True,
return_token_type_ids=False,
)
for key in encoded_tok:
self.assertListEqual(encoded_tok[key], encoded_processor[key].squeeze().tolist())
| BarkProcessorTest |
python | readthedocs__readthedocs.org | readthedocs/search/tests/test_views.py | {
"start": 4759,
"end": 14042
} | class ____:
@pytest.fixture(autouse=True)
def setup(self):
self.url = reverse("search")
def _get_search_result(self, url, client, search_params):
resp = client.get(url, search_params)
assert resp.status_code == 200
results = resp.context["results"]
facets = resp.context["facets"]
return results, facets
def _get_highlight(self, result, field, type=None):
# if query is from page title,
# highlighted title is present in 'result.meta.highlight.title'
if not type and field == "title":
highlight = result["highlights"]["title"]
# if result is not from page title,
# then results and highlighted results are present inside 'blocks'
else:
blocks = result["blocks"]
assert len(blocks) >= 1
# checking first inner_hit
inner_hit_0 = blocks[0]
assert inner_hit_0["type"] == type
highlight = inner_hit_0["highlights"][field]
return highlight
def _get_highlighted_words(self, string):
highlighted_words = re.findall("<span>(.*?)</span>", string)
return highlighted_words
@pytest.mark.parametrize("data_type", DATA_TYPES_VALUES)
@pytest.mark.parametrize("page_num", [0, 1])
def test_file_search(self, client, project, data_type, page_num):
data_type = data_type.split(".")
type, field = None, None
if len(data_type) < 2:
field = data_type[0]
else:
type, field = data_type
query = get_search_query_from_project_file(
project_slug=project.slug,
page_num=page_num,
type=type,
field=field,
)
results, _ = self._get_search_result(
url=self.url, client=client, search_params={"q": query, "type": "file"}
)
assert len(results) >= 1
# checking first result
result_0 = results[0]
highlight = self._get_highlight(result_0, field, type)
assert len(highlight) == 1
highlighted_words = self._get_highlighted_words(highlight[0])
assert len(highlighted_words) >= 1
for word in highlighted_words:
# Make it lower because our search is case insensitive
assert word.lower() in query.lower()
@pytest.mark.parametrize("data_type", DATA_TYPES_VALUES)
@pytest.mark.parametrize("case", ["upper", "lower", "title"])
def test_file_search_case_insensitive(self, client, project, case, data_type):
"""
Check File search is case insensitive.
It tests with uppercase, lowercase and camelcase.
"""
type, field = None, None
data_type = data_type.split(".")
if len(data_type) < 2:
field = data_type[0]
else:
type, field = data_type
query_text = get_search_query_from_project_file(
project_slug=project.slug,
type=type,
field=field,
)
cased_query = getattr(query_text, case)
query = cased_query()
results, _ = self._get_search_result(
url=self.url, client=client, search_params={"q": query, "type": "file"}
)
assert len(results) >= 1
first_result = results[0]
highlight = self._get_highlight(first_result, field, type)
assert len(highlight) == 1
highlighted_words = self._get_highlighted_words(highlight[0])
assert len(highlighted_words) >= 1
for word in highlighted_words:
assert word.lower() in query.lower()
def test_file_search_exact_match(self, client, project):
"""
Check quoted query match exact phrase.
Making a query with quoted text like ``"foo bar"`` should match exactly
``foo bar`` phrase.
"""
# `Sphinx` word is present both in `kuma` and `docs` files
# But the phrase `Sphinx uses` is present only in `kuma` docs.
# So search with this phrase to check
query = r'"Sphinx uses"'
results, _ = self._get_search_result(
url=self.url, client=client, search_params={"q": query, "type": "file"}
)
# There are two results,
# one from each version of the "kuma" project.
assert len(results) == 2
# Both versions have the same exact content.
# Order of results is not deterministic anymore for some reason,
# so we use a set to compare the results.
assert {result["version"]["slug"] for result in results} == {"stable", "latest"}
for result in results:
assert result["project"] == {"alias": None, "slug": "kuma"}
assert result["domain"] == "http://kuma.readthedocs.io"
assert result["path"].endswith("/documentation.html")
blocks = results[0]["blocks"]
assert len(blocks) == 1
assert blocks[0]["type"] == "section"
highlight = self._get_highlight(results[0], "content", "section")
assert len(highlight) == 1
highlighted_words = self._get_highlighted_words(highlight[0])
assert len(highlighted_words) >= 1
for word in highlighted_words:
assert word.lower() in query.lower()
def test_file_search_filter_by_project(self, client):
"""Test that search result are filtered according to project."""
# `environment` word is present both in `kuma` and `docs` files
# so search with this phrase but filter through `kuma` project
search_params = {
"q": "project:kuma environment",
"type": "file",
}
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params=search_params,
)
project_facets = facets["project"]
resulted_project_facets = [facet[0] for facet in project_facets]
# There should be 1 search result as we have filtered
assert len(results) == 1
# kuma should should be there only
assert {"alias": None, "slug": "kuma"} == results[0]["project"]
# The projects we search is the only one included in the final results.
assert resulted_project_facets == ["kuma"]
@pytest.mark.xfail(
reason="Versions are not showing correctly! Fixme while rewrite!"
)
def test_file_search_show_versions(self, client, all_projects, es_index, settings):
project = all_projects[0]
# Create some versions of the project
versions = [get(Version, project=project) for _ in range(3)]
query = get_search_query_from_project_file(project_slug=project.slug)
results, facets = self._get_search_result(
url=self.url,
client=client,
search_params={"q": query, "type": "file"},
)
# Results can be from other projects also
assert len(results) >= 1
version_facets = facets["version"]
version_facets_str = [facet[0] for facet in version_facets]
# There should be total 4 versions
# one is latest, and other 3 that we created above
assert len(version_facets) == 4
project_versions = [v.slug for v in versions] + [LATEST]
assert sorted(project_versions) == sorted(version_facets_str)
def test_file_search_subprojects(self, client, all_projects, es_index):
project = all_projects[0]
subproject = all_projects[1]
# Add another project as subproject of the project
project.add_subproject(subproject, alias="subproject")
# Now search with subproject content but explicitly filter by the parent project
query = get_search_query_from_project_file(project_slug=subproject.slug)
search_params = {
"q": f"subprojects:{project.slug} {query}",
"type": "file",
}
results, _ = self._get_search_result(
url=self.url,
client=client,
search_params=search_params,
)
assert len(results) == 1
assert results[0]["project"] == {"alias": "subproject", "slug": subproject.slug}
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_search_only_projects_owned_by_the_user(self, client, all_projects):
project = Project.objects.get(slug="docs")
user = get(User)
user.projects.add(project)
client.force_login(user)
results, _ = self._get_search_result(
url=self.url,
client=client,
# Search for the most common english word.
search_params={"q": "the", "type": "file"},
)
assert len(results) > 0
for result in results:
assert result["project"] == {"alias": None, "slug": "docs"}
@override_settings(ALLOW_PRIVATE_REPOS=True)
def test_search_no_owned_projects(self, client, all_projects):
user = get(User)
assert user.projects.all().count() == 0
client.force_login(user)
results, _ = self._get_search_result(
url=self.url,
client=client,
# Search for the most common english word.
search_params={"q": "the", "type": "file"},
)
assert len(results) == 0
| TestPageSearch |
python | pypa__warehouse | tests/unit/test_forms.py | {
"start": 287,
"end": 1074
} | class ____:
@pytest.mark.parametrize(
"uri",
[
"https://example.com/",
"http://example.com/",
"https://sub.example.com/path?query#thing",
],
)
def test_valid(self, uri):
URIValidator()(pretend.stub(), pretend.stub(data=uri))
@pytest.mark.parametrize(
"uri", ["javascript:alert(0)", "UNKNOWN", "ftp://example.com/"]
)
def test_invalid(self, uri):
validator = URIValidator()
with pytest.raises(ValidationError):
validator(pretend.stub(), pretend.stub(data=uri))
def test_plain_schemes(self):
validator = URIValidator(require_scheme=True, allowed_schemes=[])
validator(pretend.stub(), pretend.stub(data="ftp://example.com/"))
| TestURIValidator |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 884566,
"end": 884956
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("PushAllowance", graphql_name="node")
"""The item at the end of the edge."""
| PushAllowanceEdge |
python | fastapi__sqlmodel | docs_src/tutorial/many_to_many/tutorial003_py39.py | {
"start": 519,
"end": 747
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
headquarters: str
hero_links: list[HeroTeamLink] = Relationship(back_populates="team")
| Team |
python | huggingface__transformers | tests/generation/test_candidate_generator.py | {
"start": 9891,
"end": 15054
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.target_name = "hf-internal-testing/tiny-random-LlamaForCausalLM"
cls.assistant_name = "hf-internal-testing/tiny-random-PhiForCausalLM"
def setUp(self):
self.target_tokenizer = AutoTokenizer.from_pretrained(self.target_name)
self.target_config = AutoConfig.from_pretrained(self.target_name)
self.assistant_model = AutoModelForCausalLM.from_pretrained(self.assistant_name).to(torch_device)
self.assistant_tokenizer = AutoTokenizer.from_pretrained(self.assistant_name)
self.generation_config = GenerationConfig()
# Ensure required tokens exist
if self.target_tokenizer.pad_token_id is None:
self.target_tokenizer.pad_token_id = self.target_tokenizer.eos_token_id
if self.target_tokenizer.bos_token_id is None:
self.target_tokenizer.bos_token_id = self.target_tokenizer.eos_token_id
if self.assistant_tokenizer.pad_token_id is None:
self.assistant_tokenizer.pad_token_id = self.assistant_tokenizer.eos_token_id
if self.assistant_tokenizer.bos_token_id is None:
self.assistant_tokenizer.bos_token_id = self.assistant_tokenizer.eos_token_id
self.input_ids = torch.tensor([[1, 2, 3]]).to(torch_device)
self.model_kwargs = {
"attention_mask": torch.ones_like(self.input_ids).to(torch_device),
}
atm_translator = AssistantVocabTranslatorCache.get_translator(
target_tokenizer=self.target_tokenizer,
assistant_tokenizer=self.assistant_tokenizer,
assistant_model=self.assistant_model,
target_vocab_size=self.target_config.vocab_size,
)
self.generator = UniversalSpeculativeDecodingGenerator(
input_ids=self.input_ids,
assistant_model=self.assistant_model,
target_tokenizer=self.target_tokenizer,
assistant_tokenizer=self.assistant_tokenizer,
generation_config=self.generation_config,
model_kwargs=self.model_kwargs,
atm_translator=atm_translator,
)
def test_basic_generation(self):
"""Test basic speculative decoding works"""
input_text = "The quick brown fox"
input_ids = self.target_tokenizer.encode(input_text, return_tensors="pt")
self.generator.input_ids = input_ids
candidates, scores = self.generator.get_candidates(input_ids)
self.assertIsNotNone(candidates)
self.assertIsNotNone(scores)
self.assertTrue(torch.is_tensor(candidates))
self.assertTrue(torch.is_tensor(scores))
def test_mismatched_vocabularies(self):
"""Test handling of mismatched vocabularies between models"""
# Create input with tokens present in main but not assistant vocab
# Find a token that is not in the assistant tokenizer but in
# the main tokenizer.
missing_token = next(
token
for token in self.target_tokenizer.get_vocab()
if token not in self.assistant_tokenizer.get_vocab()
and token not in self.target_tokenizer.all_special_tokens
and "reserved_" not in token
)
input_ids = torch.tensor([[self.target_tokenizer.convert_tokens_to_ids(missing_token)]])
self.generator.input_ids = input_ids
candidates, _ = self.generator.get_candidates(input_ids)
self.assertIsNotNone(candidates)
def test_speculation_depth(self):
"""Test different speculation depths"""
input_ids = self.target_tokenizer.encode("Test text", return_tensors="pt")
self.generator.input_ids = input_ids
for depth in [1, 8, 17]:
self.generator.num_assistant_tokens = depth
candidates, _ = self.generator.get_candidates(input_ids)
self.assertLessEqual(candidates.shape[1] - input_ids.shape[1], depth)
def test_device_consistency(self):
"""Test handling of inputs on different devices"""
input_ids = torch.tensor([[1, 2, 3]]).to(torch_device)
self.generator.input_ids = input_ids
candidates, _ = self.generator.get_candidates(input_ids)
self.assertEqual(candidates.device, input_ids.device)
def test_usd_vs_vanilla_sampling(cls):
"""Test that USD matches vanilla sampling with temperature set to nearly 0"""
prompt = "Test text"
pipe_vanilla = pipeline(
"text-generation",
model=cls.target_name,
)
pipe_vanilla_output = pipe_vanilla(prompt, max_new_tokens=5, do_sample=False)
vanilla_text = pipe_vanilla_output[0]["generated_text"]
pipe_usd = pipeline(
"text-generation",
model=cls.target_name,
assistant_model=cls.assistant_name,
)
pipe_usd_output = pipe_usd(prompt, max_new_tokens=5, do_sample=True, temperature=1e-9) # Nearly 0 temperature
usd_text = pipe_usd_output[0]["generated_text"]
# Assert that the outputs match
cls.assertEqual(usd_text, vanilla_text)
| TestUniversalSpeculativeDecoding |
python | qdrant__qdrant-client | tools/async_client_generator/transformers/local/call_transformer.py | {
"start": 126,
"end": 669
} | class ____(CallTransformer):
def visit_Call(self, node: ast.Call) -> Union[ast.AST, ast.Await]:
if isinstance(node.func, ast.Name):
if node.func.id in self.class_replace_map:
node.func.id = self.class_replace_map[node.func.id]
if isinstance(node.func, ast.Attribute):
if node.func.attr in self.async_methods:
if getattr(node.func.value, "id", None) == "self":
return ast.Await(value=node)
return self.generic_visit(node)
| LocalCallTransformer |
python | pytorch__pytorch | torch/_inductor/utils.py | {
"start": 47890,
"end": 52804
} | class ____:
tabwidth = 4
def __init__(self, initial_indent: int = 0) -> None:
self._lines: list[Union[DeferredLineBase, LineContext, str]] = []
self._indent = initial_indent
@contextlib.contextmanager
def set_tabwidth(self, tabwidth: int) -> Iterator[None]:
prev = self.tabwidth
try:
self.tabwidth = tabwidth
yield
finally:
self.tabwidth = prev
def getvaluewithlinemap(self) -> ValueWithLineMap:
buf = StringIO()
p = 1
linemap: list[tuple[int, LineContext]] = []
for li in self._lines:
if isinstance(li, DeferredLineBase):
line = li()
if line is None:
continue
elif isinstance(li, LineContext):
linemap.append((p, li.context))
continue
else:
line = li
assert isinstance(line, str)
buf.write(line)
buf.write("\n")
p += 1 + line.count("\n")
return ValueWithLineMap(buf.getvalue(), linemap)
def getvalue(self) -> str:
return self.getvaluewithlinemap().value
def getrawvalue(self) -> str:
buf = StringIO()
for li in self._lines:
if isinstance(li, DeferredLineBase):
line = li()
if line is None:
continue
elif isinstance(li, LineContext):
continue
else:
line = li
assert isinstance(line, str)
# backslash implies line continuation
if line.endswith("\\"):
buf.write(line[:-1])
else:
buf.write(line)
buf.write("\n")
return buf.getvalue()
def clear(self) -> None:
self._lines.clear()
def __bool__(self) -> bool:
return bool(self._lines)
def prefix(self) -> str:
return " " * (self._indent * self.tabwidth)
def newline(self) -> None:
self.writeline("\n")
def writeline(self, line: Union[LineContext, DeferredLineBase, str]) -> None:
if isinstance(line, LineContext):
self._lines.append(line)
elif isinstance(line, DeferredLineBase):
self._lines.append(line.with_prefix(self.prefix()))
elif line.strip():
self._lines.append(f"{self.prefix()}{line}")
else:
self._lines.append("")
def writelines(
self, lines: Sequence[Union[LineContext, DeferredLineBase, str]]
) -> None:
for line in lines:
self.writeline(line)
def indent(self, offset: int = 1) -> contextlib.AbstractContextManager[None]:
@contextlib.contextmanager
def ctx() -> Iterator[None]:
self._indent += offset
try:
yield
finally:
self._indent -= offset
return ctx()
def do_indent(self, offset: int = 1) -> None:
self._indent += offset
def do_unindent(self, offset: int = 1) -> None:
self._indent -= offset
def splice(
self, other_code: Union[IndentedBuffer, str], strip: bool = False
) -> None:
if isinstance(other_code, IndentedBuffer):
dedent = float("inf")
# pyrefly: ignore [bad-assignment]
for line in other_code._lines:
if not isinstance(line, LineContext) and line:
dedent = min(dedent, len(line) - len(line.lstrip()))
if math.isinf(dedent):
dedent = 0
for line in other_code._lines:
if isinstance(line, LineContext):
self._lines.append(line)
else:
IndentedBuffer.writeline(self, line[int(dedent) :])
else:
other_code = textwrap.dedent(other_code)
if strip:
other_code = other_code.lstrip()
if not other_code:
return
other_code = other_code.rstrip()
for s in other_code.split("\n"):
self.writeline(s)
def map(self, func: Callable[[Any], Any]) -> IndentedBuffer:
res = IndentedBuffer(initial_indent=self._indent)
res._lines = [func(line) for line in self._lines]
return res
def __repr__(self) -> str:
return f"{type(self)}({self.getvalue()})"
def __add__(self, other: Self) -> IndentedBuffer:
assert self._indent == other._indent
res = IndentedBuffer(initial_indent=self._indent)
# TODO(rec): or should this be self.__class__(initial_indent=self._indent)?
res.writelines(self._lines)
res.writelines(other._lines)
return res
def contains(self, new_line: Union[DeferredLineBase, LineContext, str]) -> bool:
return new_line in self._lines
| IndentedBuffer |
python | squidfunk__mkdocs-material | material/utilities/filter/config.py | {
"start": 1395,
"end": 1947
} | class ____(Config):
"""
A filter configuration.
"""
include = ListOfItems(Type(str), default = [])
"""
Patterns to include.
This list contains patterns that are matched against the value to filter.
If the value matches at least one pattern, it will be included.
"""
exclude = ListOfItems(Type(str), default = [])
"""
Patterns to exclude.
This list contains patterns that are matched against the value to filter.
If the value matches at least one pattern, it will be excluded.
"""
| FilterConfig |
python | walkccc__LeetCode | solutions/707. Design Linked List/707.py | {
"start": 107,
"end": 1216
} | class ____:
def __init__(self):
self.length = 0
self.dummy = ListNode(0)
def get(self, index: int) -> int:
if index < 0 or index >= self.length:
return -1
curr = self.dummy.next
for _ in range(index):
curr = curr.next
return curr.val
def addAtHead(self, val: int) -> None:
curr = self.dummy.next
self.dummy.next = ListNode(val)
self.dummy.next.next = curr
self.length += 1
def addAtTail(self, val: int) -> None:
curr = self.dummy
while curr.next:
curr = curr.next
curr.next = ListNode(val)
self.length += 1
def addAtIndex(self, index: int, val: int) -> None:
if index > self.length:
return
curr = self.dummy
for _ in range(index):
curr = curr.next
temp = curr.next
curr.next = ListNode(val)
curr.next.next = temp
self.length += 1
def deleteAtIndex(self, index: int) -> None:
if index < 0 or index >= self.length:
return
curr = self.dummy
for _ in range(index):
curr = curr.next
temp = curr.next
curr.next = temp.next
self.length -= 1
| MyLinkedList |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/vertex_ai.py | {
"start": 6870,
"end": 7107
} | class ____(BaseGoogleLink):
"""Helper class for constructing Vertex AI Ray Cluster List link."""
name = "Ray Cluster List"
key = "ray_cluster_list_conf"
format_str = VERTEX_AI_RAY_CLUSTER_LIST_LINK
| VertexAIRayClusterListLink |
python | huggingface__transformers | src/transformers/models/phi/modular_phi.py | {
"start": 12411,
"end": 12601
} | class ____(LlamaForCausalLM):
def __init__(self, config):
super().__init__(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=True)
| PhiForCausalLM |
python | viewflow__viewflow | tests/components/test_field_date.py | {
"start": 298,
"end": 1353
} | class ____(LiveTestCase):
def test_field_date(self):
self.browser.get(f"{self.live_server_url}/application/form/")
self.assertNoJsErrors()
input = self.browser.find_element(By.CSS_SELECTOR, "vf-field-date input")
label = self.browser.find_element(By.CSS_SELECTOR, "vf-field-date label")
label_classes = label.get_attribute("class").split(" ")
self.assertNotIn("mdc-text-field--float-above", label_classes)
input.click()
label_classes = label.get_attribute("class").split(" ")
self.assertIn("mdc-text-field--focused", label_classes)
self.assertIn("mdc-text-field--label-floating", label_classes)
self.assertNoJsErrors()
button = self.browser.find_element(By.CSS_SELECTOR, "vf-field-date button")
button.click()
calendar_button = self.browser.find_element(
By.CSS_SELECTOR, '.vf-calendar button[data-mdc-dialog-action="accept"]'
)
calendar_button.click()
self.assertTrue(input.get_property("value"))
| Test |
python | pypa__packaging | src/packaging/metadata.py | {
"start": 10135,
"end": 20022
} | class ____(email.message.EmailMessage):
"""
This is :class:`email.message.EmailMessage` with two small changes: it defaults to
our `RFC822Policy`, and it correctly writes unicode when being called
with `bytes()`.
"""
def __init__(self) -> None:
super().__init__(policy=RFC822Policy())
def as_bytes(
self, unixfrom: bool = False, policy: email.policy.Policy | None = None
) -> bytes:
"""
Return the bytes representation of the message.
This handles unicode encoding.
"""
return self.as_string(unixfrom, policy=policy).encode("utf-8")
def parse_email(data: bytes | str) -> tuple[RawMetadata, dict[str, list[str]]]:
"""Parse a distribution's metadata stored as email headers (e.g. from ``METADATA``).
This function returns a two-item tuple of dicts. The first dict is of
recognized fields from the core metadata specification. Fields that can be
parsed and translated into Python's built-in types are converted
appropriately. All other fields are left as-is. Fields that are allowed to
appear multiple times are stored as lists.
The second dict contains all other fields from the metadata. This includes
any unrecognized fields. It also includes any fields which are expected to
be parsed into a built-in type but were not formatted appropriately. Finally,
any fields that are expected to appear only once but are repeated are
included in this dict.
"""
raw: dict[str, str | list[str] | dict[str, str]] = {}
unparsed: dict[str, list[str]] = {}
if isinstance(data, str):
parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data)
else:
parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data)
# We have to wrap parsed.keys() in a set, because in the case of multiple
# values for a key (a list), the key will appear multiple times in the
# list of keys, but we're avoiding that by using get_all().
for name_with_case in frozenset(parsed.keys()):
# Header names in RFC are case insensitive, so we'll normalize to all
# lower case to make comparisons easier.
name = name_with_case.lower()
# We use get_all() here, even for fields that aren't multiple use,
# because otherwise someone could have e.g. two Name fields, and we
# would just silently ignore it rather than doing something about it.
headers = parsed.get_all(name) or []
# The way the email module works when parsing bytes is that it
# unconditionally decodes the bytes as ascii using the surrogateescape
# handler. When you pull that data back out (such as with get_all() ),
# it looks to see if the str has any surrogate escapes, and if it does
# it wraps it in a Header object instead of returning the string.
#
# As such, we'll look for those Header objects, and fix up the encoding.
value = []
# Flag if we have run into any issues processing the headers, thus
# signalling that the data belongs in 'unparsed'.
valid_encoding = True
for h in headers:
# It's unclear if this can return more types than just a Header or
# a str, so we'll just assert here to make sure.
assert isinstance(h, (email.header.Header, str))
# If it's a header object, we need to do our little dance to get
# the real data out of it. In cases where there is invalid data
# we're going to end up with mojibake, but there's no obvious, good
# way around that without reimplementing parts of the Header object
# ourselves.
#
# That should be fine since, if mojibacked happens, this key is
# going into the unparsed dict anyways.
if isinstance(h, email.header.Header):
# The Header object stores it's data as chunks, and each chunk
# can be independently encoded, so we'll need to check each
# of them.
chunks: list[tuple[bytes, str | None]] = []
for binary, _encoding in email.header.decode_header(h):
try:
binary.decode("utf8", "strict")
except UnicodeDecodeError:
# Enable mojibake.
encoding = "latin1"
valid_encoding = False
else:
encoding = "utf8"
chunks.append((binary, encoding))
# Turn our chunks back into a Header object, then let that
# Header object do the right thing to turn them into a
# string for us.
value.append(str(email.header.make_header(chunks)))
# This is already a string, so just add it.
else:
value.append(h)
# We've processed all of our values to get them into a list of str,
# but we may have mojibake data, in which case this is an unparsed
# field.
if not valid_encoding:
unparsed[name] = value
continue
raw_name = _EMAIL_TO_RAW_MAPPING.get(name)
if raw_name is None:
# This is a bit of a weird situation, we've encountered a key that
# we don't know what it means, so we don't know whether it's meant
# to be a list or not.
#
# Since we can't really tell one way or another, we'll just leave it
# as a list, even though it may be a single item list, because that's
# what makes the most sense for email headers.
unparsed[name] = value
continue
# If this is one of our string fields, then we'll check to see if our
# value is a list of a single item. If it is then we'll assume that
# it was emitted as a single string, and unwrap the str from inside
# the list.
#
# If it's any other kind of data, then we haven't the faintest clue
# what we should parse it as, and we have to just add it to our list
# of unparsed stuff.
if raw_name in _STRING_FIELDS and len(value) == 1:
raw[raw_name] = value[0]
# If this is import_names, we need to special case the empty field
# case, which converts to an empty list instead of None. We can't let
# the empty case slip through, as it will fail validation.
elif raw_name == "import_names" and value == [""]:
raw[raw_name] = []
# If this is one of our list of string fields, then we can just assign
# the value, since email *only* has strings, and our get_all() call
# above ensures that this is a list.
elif raw_name in _LIST_FIELDS:
raw[raw_name] = value
# Special Case: Keywords
# The keywords field is implemented in the metadata spec as a str,
# but it conceptually is a list of strings, and is serialized using
# ", ".join(keywords), so we'll do some light data massaging to turn
# this into what it logically is.
elif raw_name == "keywords" and len(value) == 1:
raw[raw_name] = _parse_keywords(value[0])
# Special Case: Project-URL
# The project urls is implemented in the metadata spec as a list of
# specially-formatted strings that represent a key and a value, which
# is fundamentally a mapping, however the email format doesn't support
# mappings in a sane way, so it was crammed into a list of strings
# instead.
#
# We will do a little light data massaging to turn this into a map as
# it logically should be.
elif raw_name == "project_urls":
try:
raw[raw_name] = _parse_project_urls(value)
except KeyError:
unparsed[name] = value
# Nothing that we've done has managed to parse this, so it'll just
# throw it in our unparsable data and move on.
else:
unparsed[name] = value
# We need to support getting the Description from the message payload in
# addition to getting it from the the headers. This does mean, though, there
# is the possibility of it being set both ways, in which case we put both
# in 'unparsed' since we don't know which is right.
try:
payload = _get_payload(parsed, data)
except ValueError:
unparsed.setdefault("description", []).append(
parsed.get_payload(decode=isinstance(data, bytes)) # type: ignore[call-overload]
)
else:
if payload:
# Check to see if we've already got a description, if so then both
# it, and this body move to unparsable.
if "description" in raw:
description_header = cast("str", raw.pop("description"))
unparsed.setdefault("description", []).extend(
[description_header, payload]
)
elif "description" in unparsed:
unparsed["description"].append(payload)
else:
raw["description"] = payload
# We need to cast our `raw` to a metadata, because a TypedDict only support
# literal key names, but we're computing our key names on purpose, but the
# way this function is implemented, our `TypedDict` can only have valid key
# names.
return cast("RawMetadata", raw), unparsed
_NOT_FOUND = object()
# Keep the two values in sync.
_VALID_METADATA_VERSIONS = ["1.0", "1.1", "1.2", "2.1", "2.2", "2.3", "2.4", "2.5"]
_MetadataVersion = Literal["1.0", "1.1", "1.2", "2.1", "2.2", "2.3", "2.4", "2.5"]
_REQUIRED_ATTRS = frozenset(["metadata_version", "name", "version"])
| RFC822Message |
python | scipy__scipy | scipy/stats/tests/test_hypotests.py | {
"start": 68326,
"end": 72969
} | class ____:
@pytest.mark.parametrize('args', [([], np.arange(5)),
(np.arange(5), [1])])
@pytest.mark.skip_xp_backends("jax.numpy", reason="lazy -> no axis_nan_policy")
def test_too_small_input(self, args, xp):
args = (xp.asarray(arg, dtype=xp_default_dtype(xp)) for arg in args)
with eager_warns(SmallSampleWarning, match=too_small_1d_not_omit, xp=xp):
res = cramervonmises_2samp(*args)
xp_assert_equal(res.statistic, xp.asarray(xp.nan))
xp_assert_equal(res.pvalue, xp.asarray(xp.nan))
def test_invalid_input(self, xp):
y = xp.arange(5)
msg = 'method must be either auto, exact or asymptotic'
with pytest.raises(ValueError, match=msg):
cramervonmises_2samp(y, y, 'xyz')
def test_list_input(self): # list input only relevant for NumPy
x = [2, 3, 4, 7, 6]
y = [0.2, 0.7, 12, 18]
r1 = cramervonmises_2samp(x, y)
r2 = cramervonmises_2samp(np.array(x), np.array(y))
assert_equal((r1.statistic, r1.pvalue), (r2.statistic, r2.pvalue))
@pytest.mark.parametrize('dtype', [None, 'float32', 'float64'])
def test_example_conover(self, dtype, xp):
# Example 2 in Section 6.2 of W.J. Conover: Practical Nonparametric
# Statistics, 1971.
if is_numpy(xp) and xp.__version__ < "2.0" and dtype == 'float32':
pytest.skip("Pre-NEP 50 doesn't respect dtypes")
dtype = xp_default_dtype(xp) if dtype is None else getattr(xp, dtype)
x = xp.asarray([7.6, 8.4, 8.6, 8.7, 9.3, 9.9, 10.1, 10.6, 11.2], dtype=dtype)
y = xp.asarray([5.2, 5.7, 5.9, 6.5, 6.8, 8.2, 9.1, 9.8,
10.8, 11.3, 11.5, 12.3, 12.5, 13.4, 14.6], dtype=dtype)
r = cramervonmises_2samp(x, y)
xp_assert_close(r.statistic, xp.asarray(0.262, dtype=dtype), atol=1e-3)
xp_assert_close(r.pvalue, xp.asarray(.18, dtype=dtype), atol=1e-2)
@pytest.mark.parametrize('statistic, m, n, pval',
[(710, 5, 6, 48./462),
(1897, 7, 7, 117./1716),
(576, 4, 6, 2./210),
(1764, 6, 7, 2./1716)])
def test_exact_pvalue(self, statistic, m, n, pval): # only implemented w/ NumPy
# the exact values are taken from Anderson: On the distribution of the
# two-sample Cramer-von-Mises criterion, 1962.
# The values are taken from Table 2, 3, 4 and 5
assert_equal(_pval_cvm_2samp_exact(statistic, m, n), pval)
@pytest.mark.xslow
def test_large_sample(self, xp):
# for large samples, the statistic U gets very large
# do a sanity check that p-value is not 0, 1 or nan
rng = np.random.default_rng(4367)
x = distributions.norm.rvs(size=1000000, random_state=rng)
y = distributions.norm.rvs(size=900000, random_state=rng)
x, y = xp.asarray(x), xp.asarray(y)
r = cramervonmises_2samp(x, y)
assert 0 < r.pvalue < 1
r = cramervonmises_2samp(x, y+0.1)
assert 0 < r.pvalue < 1
def test_exact_vs_asymptotic(self, xp):
rng = np.random.RandomState(0)
x = xp.asarray(rng.random(7))
y = xp.asarray(rng.random(8))
r1 = cramervonmises_2samp(x, y, method='exact')
r2 = cramervonmises_2samp(x, y, method='asymptotic')
xp_assert_equal(r1.statistic, r2.statistic)
xp_assert_close(r1.pvalue, r2.pvalue, atol=1e-2)
def test_method_auto(self, xp):
x = xp.arange(20.)
y = xp.asarray([0.5, 4.7, 13.1])
r1 = cramervonmises_2samp(x, y, method='exact')
r2 = cramervonmises_2samp(x, y, method='auto')
xp_assert_equal(r1.pvalue, r2.pvalue)
# switch to asymptotic if one sample has more than 20 observations
x = xp.arange(21.)
r1 = cramervonmises_2samp(x, y, method='asymptotic')
r2 = cramervonmises_2samp(x, y, method='auto')
xp_assert_equal(r1.pvalue, r2.pvalue)
def test_same_input(self, xp):
# make sure trivial edge case can be handled
# note that _cdf_cvm_inf(0) = nan. implementation avoids nan by
# returning pvalue=1 for very small values of the statistic
x = xp.arange(15)
res = cramervonmises_2samp(x, x)
xp_assert_equal(res.statistic, xp.asarray(0.))
xp_assert_equal(res.pvalue, xp.asarray(1.))
# check exact p-value
res = cramervonmises_2samp(x[:4], x[:4])
xp_assert_equal(res.statistic, xp.asarray(0.))
xp_assert_equal(res.pvalue, xp.asarray(1.))
| TestCvm_2samp |
python | getsentry__sentry | src/sentry/users/models/identity.py | {
"start": 6275,
"end": 7203
} | class ____(Model):
"""
A verified link between a user and a third party identity.
"""
__relocation_scope__ = RelocationScope.Excluded
idp = FlexibleForeignKey("sentry.IdentityProvider")
user = FlexibleForeignKey(settings.AUTH_USER_MODEL)
external_id = models.TextField()
data = models.JSONField(default=dict)
status = BoundedPositiveIntegerField(default=IdentityStatus.UNKNOWN)
scopes = ArrayField(models.TextField(), default=list)
date_verified = models.DateTimeField(default=timezone.now)
date_added = models.DateTimeField(default=timezone.now)
objects: ClassVar[IdentityManager] = IdentityManager()
class Meta:
app_label = "sentry"
db_table = "sentry_identity"
unique_together = (("idp", "external_id"), ("idp", "user"))
def get_provider(self) -> Provider:
from sentry.identity import get
return get(self.idp.type)
| Identity |
python | openai__openai-python | src/openai/types/beta/threads/runs/function_tool_call_delta.py | {
"start": 240,
"end": 648
} | class ____(BaseModel):
arguments: Optional[str] = None
"""The arguments passed to the function."""
name: Optional[str] = None
"""The name of the function."""
output: Optional[str] = None
"""The output of the function.
This will be `null` if the outputs have not been
[submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
yet.
"""
| Function |
python | sqlalchemy__sqlalchemy | test/orm/test_cascade.py | {
"start": 88572,
"end": 91880
} | class ____(fixtures.MappedTest):
"""test usages stated at
https://article.gmane.org/gmane.comp.python.sqlalchemy.user/3085
https://article.gmane.org/gmane.comp.python.sqlalchemy.user/3119
"""
@classmethod
def define_tables(cls, metadata):
Table(
"order",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
Table(
"item",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column(
"order_id", Integer, ForeignKey("order.id"), nullable=False
),
)
Table(
"attribute",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("item_id", Integer, ForeignKey("item.id"), nullable=False),
)
@classmethod
def setup_classes(cls):
class Order(cls.Comparable):
pass
class Item(cls.Comparable):
pass
class Attribute(cls.Comparable):
pass
def test_singlelevel_remove(self):
item, Order, order, Item = (
self.tables.item,
self.classes.Order,
self.tables.order,
self.classes.Item,
)
self.mapper_registry.map_imperatively(
Order,
order,
properties={
"items": relationship(Item, cascade="all, delete-orphan")
},
)
self.mapper_registry.map_imperatively(Item, item)
s = fixture_session()
o1 = Order()
s.add(o1)
i1 = Item()
o1.items.append(i1)
o1.items.remove(i1)
s.commit()
assert i1 not in o1.items
def test_multilevel_remove(self):
Item, Attribute, order, item, attribute, Order = (
self.classes.Item,
self.classes.Attribute,
self.tables.order,
self.tables.item,
self.tables.attribute,
self.classes.Order,
)
self.mapper_registry.map_imperatively(
Order,
order,
properties={
"items": relationship(Item, cascade="all, delete-orphan")
},
)
self.mapper_registry.map_imperatively(
Item,
item,
properties={
"attributes": relationship(
Attribute, cascade="all, delete-orphan"
)
},
)
self.mapper_registry.map_imperatively(Attribute, attribute)
s = fixture_session()
o1 = Order()
s.add(o1)
i1 = Item()
a1 = Attribute()
i1.attributes.append(a1)
o1.items.append(i1)
assert i1 in s
assert a1 in s
# i1 is an orphan so the operation
# removes 'i1'. The operation
# cascades down to 'a1'.
o1.items.remove(i1)
assert i1 not in s
assert a1 not in s
s.commit()
assert o1 in s
assert a1 not in s
assert i1 not in s
assert a1 not in o1.items
| PendingOrphanTestTwoLevel |
python | TheAlgorithms__Python | graphs/boruvka.py | {
"start": 1280,
"end": 6405
} | class ____:
def __init__(self, num_of_nodes: int) -> None:
"""
Arguments:
num_of_nodes - the number of nodes in the graph
Attributes:
m_num_of_nodes - the number of nodes in the graph.
m_edges - the list of edges.
m_component - the dictionary which stores the index of the component which
a node belongs to.
"""
self.m_num_of_nodes = num_of_nodes
self.m_edges: list[list[int]] = []
self.m_component: dict[int, int] = {}
def add_edge(self, u_node: int, v_node: int, weight: int) -> None:
"""Adds an edge in the format [first, second, edge weight] to graph."""
self.m_edges.append([u_node, v_node, weight])
def find_component(self, u_node: int) -> int:
"""Propagates a new component throughout a given component."""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def set_component(self, u_node: int) -> None:
"""Finds the component index of a given node"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
self.m_component[k] = self.find_component(k)
def union(self, component_size: list[int], u_node: int, v_node: int) -> None:
"""Union finds the roots of components for two nodes, compares the components
in terms of size, and attaches the smaller one to the larger one to form
single component"""
if component_size[u_node] <= component_size[v_node]:
self.m_component[u_node] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(u_node)
elif component_size[u_node] >= component_size[v_node]:
self.m_component[v_node] = self.find_component(u_node)
component_size[u_node] += component_size[v_node]
self.set_component(v_node)
def boruvka(self) -> None:
"""Performs Borůvka's algorithm to find MST."""
# Initialize additional lists required to algorithm.
component_size = []
mst_weight = 0
minimum_weight_edge: list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
num_of_components = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
u, v, w = edge
u_component = self.m_component[u]
v_component = self.m_component[v]
if u_component != v_component:
"""If the current minimum weight edge of component u doesn't
exist (is -1), or if it's greater than the edge we're
observing right now, we will assign the value of the edge
we're observing to it.
If the current minimum weight edge of component v doesn't
exist (is -1), or if it's greater than the edge we're
observing right now, we will assign the value of the edge
we're observing to it"""
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
minimum_weight_edge[component] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(edge, list):
u, v, w = edge
u_component = self.m_component[u]
v_component = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(component_size, u_component, v_component)
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n")
num_of_components -= 1
minimum_weight_edge = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}")
def test_vector() -> None:
"""
>>> g = Graph(8)
>>> for u_v_w in ((0, 1, 10), (0, 2, 6), (0, 3, 5), (1, 3, 15), (2, 3, 4),
... (3, 4, 8), (4, 5, 10), (4, 6, 6), (4, 7, 5), (5, 7, 15), (6, 7, 4)):
... g.add_edge(*u_v_w)
>>> g.boruvka()
Added edge [0 - 3]
Added weight: 5
<BLANKLINE>
Added edge [0 - 1]
Added weight: 10
<BLANKLINE>
Added edge [2 - 3]
Added weight: 4
<BLANKLINE>
Added edge [4 - 7]
Added weight: 5
<BLANKLINE>
Added edge [4 - 5]
Added weight: 10
<BLANKLINE>
Added edge [6 - 7]
Added weight: 4
<BLANKLINE>
Added edge [3 - 4]
Added weight: 8
<BLANKLINE>
The total weight of the minimal spanning tree is: 46
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| Graph |
python | eventlet__eventlet | eventlet/zipkin/_thrift/zipkinCore/ttypes.py | {
"start": 349,
"end": 722
} | class ____:
BOOL = 0
BYTES = 1
I16 = 2
I32 = 3
I64 = 4
DOUBLE = 5
STRING = 6
_VALUES_TO_NAMES = {
0: "BOOL",
1: "BYTES",
2: "I16",
3: "I32",
4: "I64",
5: "DOUBLE",
6: "STRING",
}
_NAMES_TO_VALUES = {
"BOOL": 0,
"BYTES": 1,
"I16": 2,
"I32": 3,
"I64": 4,
"DOUBLE": 5,
"STRING": 6,
}
| AnnotationType |
python | huggingface__transformers | src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | {
"start": 9564,
"end": 10793
} | class ____(nn.Module):
def __init__(
self,
config: MobileViTV2Config,
embed_dim: int,
ffn_latent_dim: int,
ffn_dropout: float = 0.0,
) -> None:
super().__init__()
self.conv1 = MobileViTV2ConvLayer(
config=config,
in_channels=embed_dim,
out_channels=ffn_latent_dim,
kernel_size=1,
stride=1,
bias=True,
use_normalization=False,
use_activation=True,
)
self.dropout1 = nn.Dropout(ffn_dropout)
self.conv2 = MobileViTV2ConvLayer(
config=config,
in_channels=ffn_latent_dim,
out_channels=embed_dim,
kernel_size=1,
stride=1,
bias=True,
use_normalization=False,
use_activation=False,
)
self.dropout2 = nn.Dropout(ffn_dropout)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.conv1(hidden_states)
hidden_states = self.dropout1(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = self.dropout2(hidden_states)
return hidden_states
| MobileViTV2FFN |
python | PyCQA__pylint | tests/functional/a/alternative/alternative_union_syntax_regession_8119.py | {
"start": 459,
"end": 686
} | class ____(Coordinator[int | str]):
def __init__(self) -> None:
Coordinator.__init__(self, update_interval=2)
def _async_update_data(self):
assert self.update_interval
self.update_interval = 1
| Child |
python | pandas-dev__pandas | pandas/tests/frame/test_arithmetic.py | {
"start": 16723,
"end": 30088
} | class ____:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = Series(arr)
df = DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = Series(tdi)
df = DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = DataFrame({"A": dti, "B": ser})
other = DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = DataFrame(
{
"A": Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype={"C": None})
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self,
op,
int_frame,
mixed_int_frame,
mixed_float_frame,
switch_numexpr_min_elements,
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = {"B": "uint64", "C": None}
elif op in ["__add__", "__mul__"]:
dtype = {"C": None}
if expr.USE_NUMEXPR and switch_numexpr_min_elements == 0:
# when using numexpr, the casting rules are slightly different:
# in the `2 + mixed_int_frame` operation, int32 column becomes
# and int64 column (not preserving dtype in operation with Python
# scalar), and then the int32/int64 combo results in int64 result
dtype["A"] = (2 + mixed_int_frame)["A"].dtype
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype={"C": None})
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dim", range(3, 6))
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame, dim):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
expected = float_frame.sort_index() * np.nan
tm.assert_frame_equal(result, expected)
result = float_frame[:0].add(float_frame)
expected = float_frame.sort_index() * np.nan
tm.assert_frame_equal(result, expected)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
@pytest.mark.parametrize("op", ["add", "sub", "mul", "mod"])
def test_arith_flex_series_ops(self, simple_frame, op):
# after arithmetic refactor, add truediv here
df = simple_frame
row = df.xs("a")
col = df["two"]
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
def test_arith_flex_series_broadcasting(self, any_real_numpy_dtype):
# broadcasting issue in GH 7325
df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype=any_real_numpy_dtype)
expected = DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
if any_real_numpy_dtype == "float32":
expected = expected.astype(any_real_numpy_dtype)
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = Series([], dtype=object)
df_len0 = DataFrame(columns=["A", "B"])
df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
def test_flex_add_scalar_fill_value(self):
# GH#12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = DataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_frame_equal(res, exp)
def test_sub_alignment_with_duplicate_index(self):
# GH#5185 dup aligning operations should work
df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3])
df2 = DataFrame([1, 2, 3], index=[1, 2, 3])
expected = DataFrame([0, 2, 0, 2, 2], index=[1, 1, 2, 2, 3])
result = df1.sub(df2)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", ["__add__", "__mul__", "__sub__", "__truediv__"])
def test_arithmetic_with_duplicate_columns(self, op):
# operations
df = DataFrame({"A": np.arange(10), "B": np.random.default_rng(2).random(10)})
expected = getattr(df, op)(df)
expected.columns = ["A", "A"]
df.columns = ["A", "A"]
result = getattr(df, op)(df)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("level", [0, None])
def test_broadcast_multiindex(self, level):
# GH34388
df1 = DataFrame({"A": [0, 1, 2], "B": [1, 2, 3]})
df1.columns = df1.columns.set_names("L1")
df2 = DataFrame({("A", "C"): [0, 0, 0], ("A", "D"): [0, 0, 0]})
df2.columns = df2.columns.set_names(["L1", "L2"])
result = df1.add(df2, level=level)
expected = DataFrame({("A", "C"): [0, 1, 2], ("A", "D"): [0, 1, 2]})
expected.columns = expected.columns.set_names(["L1", "L2"])
tm.assert_frame_equal(result, expected)
def test_frame_multiindex_operations(self):
# GH 43321
df = DataFrame(
{2010: [1, 2, 3], 2020: [3, 4, 5]},
index=MultiIndex.from_product(
[["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"]
),
)
series = Series(
[0.4],
index=MultiIndex.from_product([["b"], ["a"]], names=["mod", "scen"]),
)
expected = DataFrame(
{2010: [1.4, 2.4, 3.4], 2020: [3.4, 4.4, 5.4]},
index=MultiIndex.from_product(
[["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"]
),
)
result = df.add(series, axis=0)
tm.assert_frame_equal(result, expected)
def test_frame_multiindex_operations_series_index_to_frame_index(self):
# GH 43321
df = DataFrame(
{2010: [1], 2020: [3]},
index=MultiIndex.from_product([["a"], ["b"]], names=["scen", "mod"]),
)
series = Series(
[10.0, 20.0, 30.0],
index=MultiIndex.from_product(
[["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"]
),
)
expected = DataFrame(
{2010: [11.0, 21, 31.0], 2020: [13.0, 23.0, 33.0]},
index=MultiIndex.from_product(
[["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"]
),
)
result = df.add(series, axis=0)
tm.assert_frame_equal(result, expected)
def test_frame_multiindex_operations_no_align(self):
df = DataFrame(
{2010: [1, 2, 3], 2020: [3, 4, 5]},
index=MultiIndex.from_product(
[["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"]
),
)
series = Series(
[0.4],
index=MultiIndex.from_product([["c"], ["a"]], names=["mod", "scen"]),
)
expected = DataFrame(
{2010: np.nan, 2020: np.nan},
index=MultiIndex.from_tuples(
[
("a", "b", 0),
("a", "b", 1),
("a", "b", 2),
("a", "c", np.nan),
],
names=["scen", "mod", "id"],
),
)
result = df.add(series, axis=0)
tm.assert_frame_equal(result, expected)
def test_frame_multiindex_operations_part_align(self):
df = DataFrame(
{2010: [1, 2, 3], 2020: [3, 4, 5]},
index=MultiIndex.from_tuples(
[
("a", "b", 0),
("a", "b", 1),
("a", "c", 2),
],
names=["scen", "mod", "id"],
),
)
series = Series(
[0.4],
index=MultiIndex.from_product([["b"], ["a"]], names=["mod", "scen"]),
)
expected = DataFrame(
{2010: [1.4, 2.4, np.nan], 2020: [3.4, 4.4, np.nan]},
index=MultiIndex.from_tuples(
[
("a", "b", 0),
("a", "b", 1),
("a", "c", 2),
],
names=["scen", "mod", "id"],
),
)
result = df.add(series, axis=0)
tm.assert_frame_equal(result, expected)
def test_frame_multiindex_operations_part_align_axis1(self):
# GH#61009 Test DataFrame-Series arithmetic operation
# with partly aligned MultiIndex and axis = 1
df = DataFrame(
[[1, 2, 3], [3, 4, 5]],
index=[2010, 2020],
columns=MultiIndex.from_tuples(
[
("a", "b", 0),
("a", "b", 1),
("a", "c", 2),
],
names=["scen", "mod", "id"],
),
)
series = Series(
[0.4],
index=MultiIndex.from_product([["b"], ["a"]], names=["mod", "scen"]),
)
expected = DataFrame(
[[1.4, 2.4, np.nan], [3.4, 4.4, np.nan]],
index=[2010, 2020],
columns=MultiIndex.from_tuples(
[
("a", "b", 0),
("a", "b", 1),
("a", "c", 2),
],
names=["scen", "mod", "id"],
),
)
result = df.add(series, axis=1)
tm.assert_frame_equal(result, expected)
| TestFrameFlexArithmetic |
python | Textualize__textual | src/textual/color.py | {
"start": 3454,
"end": 3803
} | class ____(Exception):
"""A color failed to parse.
Args:
message: The error message
suggested_color: A close color we can suggest.
"""
def __init__(self, message: str, suggested_color: str | None = None):
super().__init__(message)
self.suggested_color = suggested_color
@rich.repr.auto
| ColorParseError |
python | django__django | tests/generic_views/views.py | {
"start": 3675,
"end": 3784
} | class ____(AuthorCreate):
post = method_decorator(login_required)(AuthorCreate.post)
| AuthorCreateRestricted |
python | django__django | django/db/models/fetch_modes.py | {
"start": 55,
"end": 235
} | class ____:
__slots__ = ()
track_peers = False
def fetch(self, fetcher, instance):
raise NotImplementedError("Subclasses must implement this method.")
| FetchMode |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchClass3.py | {
"start": 630,
"end": 667
} | class ____:
name: str
@final
| BFinal |
python | huggingface__transformers | src/transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py | {
"start": 29839,
"end": 34199
} | class ____(KyutaiSpeechToTextAttention):
"""
KyutaiSpeechToText attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
`KyutaiSpeechToTextAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
SDPA API.
"""
# Adapted from KyutaiSpeechToTextAttention.forward
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
if output_attentions:
logger.warning_once(
f"{self.__class__.__name__} does not support `output_attentions=True`. The returned attention weights will "
"be `None`. If you want to get attention weights, please set `attn_implementation='eager'` when loading the model."
)
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states, cache_position) # Ignore copy
key_states = self.k_proj(hidden_states, cache_position) # Ignore copy
value_states = self.v_proj(hidden_states, cache_position) # Ignore copy
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
if self.rotary_emb is not None: # Ignore copy
cos, sin = self.rotary_emb(value_states, position_ids) # Ignore copy
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) # Ignore copy
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = (
{"sin": sin, "cos": cos, "cache_position": cache_position}
if self.rotary_emb is not None
else {"cache_position": cache_position}
) # Ignore copy
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
causal_mask = attention_mask
if attention_mask is not None:
causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
# Reference: https://github.com/pytorch/pytorch/issues/112577.
if query_states.device.type == "cuda" and causal_mask is not None:
query_states = query_states.contiguous()
key_states = key_states.contiguous()
value_states = value_states.contiguous()
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
is_causal = causal_mask is None and q_len > 1
attn_output = torch.nn.functional.scaled_dot_product_attention(
query_states,
key_states,
value_states,
attn_mask=causal_mask,
dropout_p=self.attention_dropout if self.training else 0.0,
is_causal=is_causal,
)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(bsz, q_len, -1)
attn_output = self.o_proj(attn_output, cache_position) # Ignore copy
return attn_output, None
KYUTAI_SPEECH_TO_TEXT_ATTENTION_CLASSES = {
"eager": KyutaiSpeechToTextAttention,
"flash_attention_2": KyutaiSpeechToTextFlashAttention2,
"sdpa": KyutaiSpeechToTextSdpaAttention,
}
| KyutaiSpeechToTextSdpaAttention |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/ir.py | {
"start": 99416,
"end": 100198
} | class ____(IR):
"""Select a subset of columns from a dataframe."""
__slots__ = ()
_non_child = ("schema",)
def __init__(self, schema: Schema, df: IR):
self.schema = schema
self._non_child_args = (schema,)
self.children = (df,)
@classmethod
@log_do_evaluate
@nvtx_annotate_cudf_polars(message="Projection")
def do_evaluate(
cls, schema: Schema, df: DataFrame, *, context: IRExecutionContext
) -> DataFrame:
"""Evaluate and return a dataframe."""
# This can reorder things.
columns = broadcast(
*(df.column_map[name] for name in schema),
target_length=df.num_rows,
stream=df.stream,
)
return DataFrame(columns, stream=df.stream)
| Projection |
python | django__django | django/contrib/auth/views.py | {
"start": 8983,
"end": 9252
} | class ____(PasswordContextMixin, TemplateView):
template_name = "registration/password_reset_done.html"
title = _("Password reset sent")
@method_decorator(
[login_not_required, sensitive_post_parameters(), never_cache], name="dispatch"
)
| PasswordResetDoneView |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_dag_run.py | {
"start": 46635,
"end": 53322
} | class ____:
@pytest.mark.parametrize(
("dag_id", "run_id", "patch_body", "response_body", "note_data"),
[
(
DAG1_ID,
DAG1_RUN1_ID,
{"state": DagRunState.FAILED, "note": "new_note2"},
{"state": DagRunState.FAILED, "note": "new_note2"},
{"user_id": "test", "content": "new_note2"},
),
(
DAG1_ID,
DAG1_RUN2_ID,
{"state": DagRunState.SUCCESS},
{"state": DagRunState.SUCCESS, "note": None},
None,
),
(
DAG2_ID,
DAG2_RUN1_ID,
{"state": DagRunState.QUEUED},
{"state": DagRunState.QUEUED, "note": None},
None,
),
(
DAG1_ID,
DAG1_RUN1_ID,
{"note": "updated note"},
{"state": DagRunState.SUCCESS, "note": "updated note"},
{"user_id": "test", "content": "updated note"},
),
(
DAG1_ID,
DAG1_RUN2_ID,
{"note": "new note", "state": DagRunState.FAILED},
{"state": DagRunState.FAILED, "note": "new note"},
{"user_id": "test", "content": "new note"},
),
(
DAG1_ID,
DAG1_RUN2_ID,
{"note": None},
{"state": DagRunState.FAILED, "note": None},
{"user_id": "test", "content": None},
),
],
)
@pytest.mark.usefixtures("configure_git_connection_for_dag_bundle")
def test_patch_dag_run(self, test_client, dag_id, run_id, patch_body, response_body, note_data, session):
response = test_client.patch(f"/dags/{dag_id}/dagRuns/{run_id}", json=patch_body)
assert response.status_code == 200
body = response.json()
assert body["dag_id"] == dag_id
assert body["dag_run_id"] == run_id
assert body.get("state") == response_body.get("state")
assert body.get("note") == response_body.get("note")
_check_dag_run_note(session, run_id, note_data)
_check_last_log(session, dag_id=dag_id, event="patch_dag_run", logical_date=None)
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.patch("/dags/dag_1/dagRuns/run_1", json={})
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.patch("/dags/dag_1/dagRuns/run_1", json={})
assert response.status_code == 403
@pytest.mark.parametrize(
("query_params", "patch_body", "response_body", "expected_status_code", "note_data"),
[
(
{"update_mask": ["state"]},
{"state": DagRunState.SUCCESS},
{"state": "success"},
200,
{"user_id": "not_test", "content": "test_note"},
),
(
{"update_mask": ["note"]},
{"state": DagRunState.FAILED, "note": "new_note1"},
{"note": "new_note1", "state": "success"},
200,
{"user_id": "test", "content": "new_note1"},
),
(
{},
{"state": DagRunState.FAILED, "note": "new_note2"},
{"note": "new_note2", "state": "failed"},
200,
{"user_id": "test", "content": "new_note2"},
),
(
{"update_mask": ["note"]},
{},
{"state": "success", "note": "test_note"},
200,
{"user_id": "not_test", "content": "test_note"},
),
(
{"update_mask": ["note"]},
{"note": None},
{"state": "success", "note": None},
200,
{"user_id": "test", "content": None},
),
(
{"update_mask": ["random"]},
{"state": DagRunState.FAILED},
{"state": "success", "note": "test_note"},
200,
{"user_id": "not_test", "content": "test_note"},
),
],
)
@pytest.mark.usefixtures("configure_git_connection_for_dag_bundle")
def test_patch_dag_run_with_update_mask(
self, test_client, query_params, patch_body, response_body, expected_status_code, note_data, session
):
response = test_client.patch(
f"/dags/{DAG1_ID}/dagRuns/{DAG1_RUN1_ID}",
params=query_params,
json=patch_body,
)
response_json = response.json()
assert response.status_code == expected_status_code
for key, value in response_body.items():
assert response_json.get(key) == value
_check_dag_run_note(session, DAG1_RUN1_ID, note_data)
def test_patch_dag_run_not_found(self, test_client):
response = test_client.patch(
f"/dags/{DAG1_ID}/dagRuns/invalid",
json={"state": DagRunState.SUCCESS},
)
assert response.status_code == 404
body = response.json()
assert body["detail"] == "The DagRun with dag_id: `test_dag1` and run_id: `invalid` was not found"
def test_patch_dag_run_bad_request(self, test_client):
response = test_client.patch(f"/dags/{DAG1_ID}/dagRuns/{DAG1_RUN1_ID}", json={"state": "running"})
assert response.status_code == 422
body = response.json()
assert body["detail"][0]["msg"] == "Input should be 'queued', 'success' or 'failed'"
@pytest.fixture(autouse=True)
def clean_listener_manager(self):
get_listener_manager().clear()
yield
get_listener_manager().clear()
@pytest.mark.parametrize(
("state", "listener_state"),
[
("queued", []),
("success", [DagRunState.SUCCESS]),
("failed", [DagRunState.FAILED]),
],
)
@pytest.mark.usefixtures("configure_git_connection_for_dag_bundle")
def test_patch_dag_run_notifies_listeners(self, test_client, state, listener_state):
from unit.listeners.class_listener import ClassBasedListener
listener = ClassBasedListener()
get_listener_manager().add_listener(listener)
response = test_client.patch(f"/dags/{DAG1_ID}/dagRuns/{DAG1_RUN1_ID}", json={"state": state})
assert response.status_code == 200
assert listener.state == listener_state
| TestPatchDagRun |
python | getsentry__sentry | src/sentry/discover/arithmetic.py | {
"start": 1061,
"end": 2566
} | class ____:
__slots__ = "operator", "lhs", "rhs"
def __init__(
self,
operator: str,
lhs: OperandType | None = None,
rhs: OperandType | None = None,
) -> None:
self.operator = operator
self.lhs: OperandType | None = lhs
self.rhs: OperandType | None = rhs
self.validate()
def validate(self) -> None:
# This shouldn't really happen, but the operator value is based on the grammar so enforcing it to be safe
if self.operator not in SUPPORTED_OPERATORS:
raise ArithmeticParseError(f"{self.operator} is not a supported operator")
if self.operator == "divide" and self.rhs == 0:
raise ArithmeticValidationError("division by 0 is not allowed")
def to_snuba_json(self, alias: str | None = None) -> JsonQueryType:
"""Convert this tree of Operations to the equivalent snuba json"""
lhs = self.lhs.to_snuba_json() if isinstance(self.lhs, Operation) else self.lhs
# TODO(snql): This is a hack so the json syntax doesn't turn lhs into a function
if isinstance(lhs, str):
lhs = ["toFloat64", [lhs]]
rhs = self.rhs.to_snuba_json() if isinstance(self.rhs, Operation) else self.rhs
result: JsonQueryType = [self.operator, [lhs, rhs]]
if alias:
result.append(alias)
return result
def __repr__(self) -> str:
return repr([self.operator, self.lhs, self.rhs])
@dataclass(frozen=True)
| Operation |
python | openai__gym | gym/error.py | {
"start": 54,
"end": 125
} | class ____(Exception):
"""Error superclass."""
# Local errors
| Error |
python | pytorch__pytorch | test/test_autocast.py | {
"start": 7910,
"end": 10910
} | class ____(TestCase):
def test_cast_cache_is_global(self):
"""
Verifies that the autocast cache is global. This is done by
mocking out cache clearing at the end of the forward pass,
running forward+backward with an explicit call to autocast in the
backward, and verifying that the weight only get cast to float16 once.
"""
data = torch.randn(2, 3).cuda()
weight = torch.nn.Parameter(torch.randn(4, 3).cuda())
with WeightDTypeCastCounterMode(weight) as mode:
with torch.autocast(device_type="cuda"):
output = CustomLinear.apply(data, weight)
s = output.sum()
s.backward()
self.assertEqual(mode.dtype_cast_counter, 1)
def test_cache_disabled(self):
data = torch.randn(2, 3).cuda()
weight = torch.nn.Parameter(torch.randn(4, 3).cuda())
try:
torch._C._set_cached_tensors_enabled(True)
torch._C._add_cached_tensor(weight)
with WeightDTypeCastCounterMode(weight) as mode:
with torch.autocast(device_type="cuda"):
output = CustomLinear.apply(data, weight)
s = output.sum()
s.backward()
# we should not have cached the conversion of the weight
self.assertEqual(mode.dtype_cast_counter, 2)
finally:
torch._C._set_cached_tensors_enabled(False)
# index_put under AMP follows a cast policy called "promote",
# https://github.com/pytorch/pytorch/blob/4fcd15a667df5b80e81db6563d8d3123a0cbd051/aten/src/ATen/autocast_mode.h#L205-L230
# That means:
# (1) double precision is ignored,
# (2) if any argument is float, then all arguments are promoted to float,
# (3) if all arguments are of lower precision dtype, then all dtypes must be equal to the same amp autocast dtype.
# Since AMP autocast dtype is thread-local, it is not preserved across thread boundaries during autograd execution,
# and due to the multi-threaded nature of the autograd, the forward pass is being run in bfloat16, while the backward
# pass defaults to float16. The dtype mismatch leads to the error in the policy, as the criteria (3) is not satisfied.
# For more info see https://github.com/pytorch/pytorch/issues/132715.
def test_autocast_prioritize(self):
device = "cuda"
dtype = torch.bfloat16
with torch.autocast(device_type=device, enabled=True, dtype=dtype):
t = torch.randn([3, 4, 5], dtype=dtype, device=device, requires_grad=True)
index = torch.randint(
low=0, high=3, size=[3, 4, 5], dtype=torch.int64, device=device
)
val = torch.randn(1, dtype=dtype, device=device)
res = torch.index_put(t, [index], val)
loss = res.mean()
loss.backward()
@unittest.skipIf(not torch.backends.mps.is_available(), "requires mps")
| TestAutocastGPU |
python | tensorflow__tensorflow | tensorflow/python/framework/extension_type_field_test.py | {
"start": 8077,
"end": 14184
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([
({
'x': 1
}, "Missing required fields: {'y'}"),
({
'x': 1,
'y': 2.0,
'z': 3
}, "Got unexpected fields: {'z'}"),
])
def testConvertFieldsMismatch(self, field_values, error):
fields = [
extension_type_field.ExtensionTypeField('x', int),
extension_type_field.ExtensionTypeField('y', float)
]
with self.assertRaisesRegex(ValueError, error):
extension_type_field.convert_fields(fields, field_values)
@parameterized.parameters([
(12, int),
(5.3, float),
('foo', str),
(None, None),
(True, bool),
([1, 2, 3], tensor.Tensor),
(lambda: constant_op.constant([1, 2, 3]), tensor.Tensor),
(lambda: ragged_factory_ops.constant([[1, 2], [3]]),
ragged_tensor.RaggedTensor),
([1, 2, 3], typing.Tuple[int, ...], (1, 2, 3)),
((1, 2, 3), typing.Tuple[int, int, int], (1, 2, 3)),
([1, 2, 3], _TUPLE[int, ...], (1, 2, 3)),
((1, 2, 3), _TUPLE[int, int, int], (1, 2, 3)),
({
'a': 12
}, typing.Mapping[str, int]),
({
'a': (12, 3.0)
}, typing.Mapping[str, typing.Tuple[int, float]]),
({
'a': (12, 3.0)
}, typing.Mapping[str, _TUPLE[int, float]]),
(tensor_shape.TensorShape([1, 2]), tensor_shape.TensorShape,
tensor_shape.TensorShape([1, 2])),
([1, 2], tensor_shape.TensorShape, tensor_shape.TensorShape([1, 2])),
(dtypes.int32, dtypes.DType, dtypes.int32),
(np.int32, dtypes.DType, dtypes.int32),
])
def testConvertValue(self, value, value_type, expected=None):
if callable(value):
value = value() # deferred construction (contains tensor)
if expected is None:
expected = value
converted = extension_type_field._convert_value(value, value_type, ('x',))
if isinstance(converted, (tensor.Tensor, ragged_tensor.RaggedTensor)):
self.assertAllEqual(converted, expected)
else:
self.assertEqual(converted, expected)
@parameterized.parameters([
(12, int),
(5.3, float),
('foo', str),
(None, None),
(True, bool),
(tensor.TensorSpec([5]), tensor.Tensor),
(ragged_tensor.RaggedTensorSpec([5, None]), ragged_tensor.RaggedTensor),
([1, 2, 3], typing.Tuple[int, ...], (1, 2, 3)),
((1, 2, 3), typing.Tuple[int, int, int], (1, 2, 3)),
([1, 2, 3], _TUPLE[int, ...], (1, 2, 3)),
((1, 2, 3), _TUPLE[int, int, int], (1, 2, 3)),
({
'a': 12
}, typing.Mapping[str, int]),
({
'a': (12, 3.0)
}, typing.Mapping[str, typing.Tuple[int, float]]),
({
'a': (12, 3.0)
}, typing.Mapping[str, _TUPLE[int, float]]),
(tensor_shape.TensorShape([1, 2]), tensor_shape.TensorShape,
tensor_shape.TensorShape([1, 2])),
([1, 2], tensor_shape.TensorShape, tensor_shape.TensorShape([1, 2])),
(dtypes.int32, dtypes.DType, dtypes.int32),
(np.int32, dtypes.DType, dtypes.int32),
])
def testConvertValueForSpec(self, value, value_type, expected=None):
if callable(value):
value = value() # deferred construction (contains tensor)
if expected is None:
expected = value
converted = extension_type_field._convert_value(
value, value_type, ('x',),
extension_type_field._ConversionContext.SPEC)
if isinstance(converted, (tensor.Tensor, ragged_tensor.RaggedTensor)):
self.assertAllEqual(converted, expected)
else:
self.assertEqual(converted, expected)
@parameterized.parameters([
(12.3, int, "x: expected 'int', got 'float'"),
(12, float, "x: expected 'float', got 'int'"),
([1, 2, 3.0], typing.Tuple[int, ...],
r"x\[2\]: expected 'int', got 'float'"),
([1, 2, 3.0], _TUPLE[int, ...],
r"x\[2\]: expected 'int', got 'float'"),
('foo', tensor_shape.TensorShape,
"x: expected 'tf.TensorShape', got 'str'"),
('foo', dtypes.DType, "x: expected 'tf.DType', got 'str'"),
])
def testConvertValueError(self, value, value_type, error):
if callable(value):
value = value() # deferred construction (contains tensor)
with self.assertRaisesRegex(TypeError, error):
extension_type_field._convert_value(value, value_type, ('x',))
def testConvertFields(self):
fields = [
extension_type_field.ExtensionTypeField('x', int),
extension_type_field.ExtensionTypeField(
'y', typing.Tuple[typing.Union[int, bool], ...]),
extension_type_field.ExtensionTypeField(
'y', _TUPLE[typing.Union[int, bool], ...]),
extension_type_field.ExtensionTypeField('z', tensor.Tensor)
]
field_values = {'x': 1, 'y': [1, True, 3], 'z': [[1, 2], [3, 4], [5, 6]]}
extension_type_field.convert_fields(fields, field_values)
self.assertEqual(set(field_values), set(['x', 'y', 'z']))
self.assertEqual(field_values['x'], 1)
self.assertEqual(field_values['y'], (1, True, 3))
self.assertIsInstance(field_values['z'], tensor.Tensor)
self.assertAllEqual(field_values['z'], [[1, 2], [3, 4], [5, 6]])
def testConvertFieldsForSpec(self):
fields = [
extension_type_field.ExtensionTypeField('x', int),
extension_type_field.ExtensionTypeField(
'y', typing.Tuple[typing.Union[int, bool], ...]),
extension_type_field.ExtensionTypeField(
'y', _TUPLE[typing.Union[int, bool], ...]),
extension_type_field.ExtensionTypeField('z', tensor.Tensor)
]
field_values = {
'x': 1,
'y': [1, True, 3],
'z': tensor.TensorSpec([5, 3])
}
extension_type_field.convert_fields_for_spec(fields, field_values)
self.assertEqual(set(field_values), set(['x', 'y', 'z']))
self.assertEqual(field_values['x'], 1)
self.assertEqual(field_values['y'], (1, True, 3))
self.assertEqual(field_values['z'], tensor.TensorSpec([5, 3]))
if __name__ == '__main__':
googletest.main()
| FieldValueConverterTest |
python | getsentry__sentry | tests/sentry/core/endpoints/test_project_details.py | {
"start": 1886,
"end": 12279
} | class ____(APITestCase):
endpoint = "sentry-api-0-project-details"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
def test_simple(self) -> None:
response = self.get_success_response(self.project.organization.slug, self.project.slug)
assert response.data["id"] == str(self.project.id)
def test_superuser_simple(self) -> None:
superuser = self.create_user(is_superuser=True)
self.login_as(user=superuser, superuser=True)
response = self.get_success_response(self.project.organization.slug, self.project.slug)
assert response.data["id"] == str(self.project.id)
def test_staff_simple(self) -> None:
staff_user = self.create_user(is_staff=True)
self.login_as(user=staff_user, staff=True)
response = self.get_success_response(self.project.organization.slug, self.project.slug)
assert response.data["id"] == str(self.project.id)
def test_numeric_org_slug(self) -> None:
# Regression test for https://github.com/getsentry/sentry/issues/2236
project = self.create_project(name="Bar", slug="bar", teams=[self.team])
# We want to make sure we don't hit the LegacyProjectRedirect view at all.
url = f"/api/0/projects/{self.organization.slug}/{project.slug}/"
response = self.client.get(url)
assert response.status_code == 200
assert response.data["id"] == str(project.id)
def test_with_stats(self) -> None:
self.create_group(project=self.project)
response = self.get_success_response(
self.project.organization.slug, self.project.slug, qs_params={"include": "stats"}
)
assert response.data["stats"]["unresolved"] == 1
def test_has_alert_integration(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_provider_integration(provider="msteams")
integration.add_organization(self.organization)
self.create_group(project=self.project)
response = self.get_success_response(
self.project.organization.slug,
self.project.slug,
qs_params={"expand": "hasAlertIntegration"},
)
assert response.data["hasAlertIntegrationInstalled"]
def test_no_alert_integration(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_provider_integration(provider="jira")
integration.add_organization(self.organization)
self.create_group(project=self.project)
response = self.get_success_response(
self.project.organization.slug,
self.project.slug,
qs_params={"expand": "hasAlertIntegration"},
)
assert not response.data["hasAlertIntegrationInstalled"]
def test_filters_disabled_plugins(self) -> None:
from sentry.plugins.base import plugins
self.create_group(project=self.project)
response = self.get_success_response(
self.project.organization.slug,
self.project.slug,
)
assert response.data["plugins"] == []
asana_plugin = plugins.get("asana")
asana_plugin.enable(self.project)
response = self.get_success_response(
self.project.organization.slug,
self.project.slug,
)
assert len(response.data["plugins"]) == 1
assert response.data["plugins"][0]["slug"] == asana_plugin.slug
def test_project_renamed_302(self) -> None:
# Rename the project
self.get_success_response(
self.project.organization.slug, self.project.slug, method="put", slug="foobar"
)
with outbox_runner():
response = self.get_success_response(
self.project.organization.slug, self.project.slug, status_code=302
)
with assume_test_silo_mode(SiloMode.CONTROL):
assert (
AuditLogEntry.objects.get(
organization_id=self.project.organization_id,
event=audit_log.get_event_id("PROJECT_EDIT"),
).data.get("old_slug")
== self.project.slug
)
assert (
AuditLogEntry.objects.get(
organization_id=self.project.organization_id,
event=audit_log.get_event_id("PROJECT_EDIT"),
).data.get("new_slug")
== "foobar"
)
assert response.data["slug"] == "foobar"
assert (
response.data["detail"]["extra"]["url"]
== f"/api/0/projects/{self.project.organization.slug}/foobar/"
)
redirect_path = f"/api/0/projects/{self.project.organization.slug}/foobar/"
# XXX: AttributeError: 'Response' object has no attribute 'url'
# (this is with self.assertRedirects(response, ...))
assert response["Location"] == redirect_path
def test_non_org_rename_403(self) -> None:
org = self.create_organization()
team = self.create_team(organization=org, name="foo", slug="foo")
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=org, role="member", teams=[team])
other_org = self.create_organization()
other_project = self.create_project(organization=other_org)
ProjectRedirect.record(other_project, "old_slug")
self.login_as(user=user)
self.get_error_response(other_org.slug, "old_slug", status_code=403)
def test_highlight_preset(self) -> None:
assert self.project.get_option("sentry:highlight_context") is None
assert self.project.get_option("sentry:highlight_tags") is None
resp = self.get_success_response(self.project.organization.slug, self.project.slug)
expected_preset = get_highlight_preset_for_project(self.project)
assert resp.data["highlightPreset"] == expected_preset
assert resp.data["highlightContext"] == expected_preset["context"]
assert resp.data["highlightTags"] == expected_preset["tags"]
def test_is_dynamically_sampled_pan_rate(self) -> None:
# test with feature flags disabled
with self.feature("organizations:dynamic-sampling"):
with mock.patch(
"sentry.dynamic_sampling.rules.base.quotas.backend.get_blended_sample_rate",
return_value=0.5,
):
resp = self.get_success_response(self.project.organization.slug, self.project.slug)
assert resp.data["isDynamicallySampled"]
with mock.patch(
"sentry.dynamic_sampling.rules.base.quotas.backend.get_blended_sample_rate",
return_value=1.0,
):
resp = self.get_success_response(self.project.organization.slug, self.project.slug)
assert not resp.data["isDynamicallySampled"]
with mock.patch(
"sentry.dynamic_sampling.rules.base.quotas.backend.get_blended_sample_rate",
return_value=None,
):
resp = self.get_success_response(self.project.organization.slug, self.project.slug)
assert not resp.data["isDynamicallySampled"]
def test_is_dynamically_sampled(self) -> None:
# test with feature flags disabled
with self.feature(
{
"organizations:dynamic-sampling": False,
"organizations:dynamic-sampling-custom": False,
}
):
resp = self.get_success_response(self.project.organization.slug, self.project.slug)
assert not resp.data["isDynamicallySampled"]
# test with sampling_mode = organization
self.project.organization.update_option(
"sentry:sampling_mode", DynamicSamplingMode.ORGANIZATION.value
)
# test not sampled organization
self.project.organization.update_option("sentry:target_sample_rate", 1.0)
with self.feature("organizations:dynamic-sampling-custom"):
resp = self.get_success_response(self.project.organization.slug, self.project.slug)
assert not resp.data["isDynamicallySampled"]
# test dynamically sampled organization
self.project.organization.update_option("sentry:target_sample_rate", 0.1)
with self.feature("organizations:dynamic-sampling-custom"):
resp = self.get_success_response(self.project.organization.slug, self.project.slug)
assert resp.data["isDynamicallySampled"]
# test with sampling_mode = project
self.project.organization.update_option(
"sentry:sampling_mode", DynamicSamplingMode.PROJECT.value
)
# test with not sampled project
self.project.update_option("sentry:target_sample_rate", 1.0)
with self.feature("organizations:dynamic-sampling-custom"):
resp = self.get_success_response(self.project.organization.slug, self.project.slug)
assert not resp.data["isDynamicallySampled"]
# test with sampled project
self.project.update_option("sentry:target_sample_rate", 0.1)
with self.feature("organizations:dynamic-sampling-custom"):
resp = self.get_success_response(self.project.organization.slug, self.project.slug)
assert resp.data["isDynamicallySampled"]
def test_filter_options(self):
self.project.update_option("sentry:releases", ["1.*", "2.1.*"])
self.project.update_option(
"sentry:error_messages", ["TypeError*", "*: integer division by modulo or zero"]
)
self.project.update_option("sentry:log_messages", ["Updated*", "*.sentry.io"])
self.project.update_option("sentry:trace_metric_names", ["counter.*", "*.duration"])
resp = self.get_success_response(self.project.organization.slug, self.project.slug)
assert resp.data["options"]["filters:releases"] == "1.*\n2.1.*"
assert (
resp.data["options"]["filters:error_messages"]
== "TypeError*\n*: integer division by modulo or zero"
)
assert resp.data["options"]["filters:log_messages"] == "Updated*\n*.sentry.io"
assert resp.data["options"]["filters:trace_metric_names"] == "counter.*\n*.duration"
| ProjectDetailsTest |
python | falconry__falcon | examples/things_advanced.py | {
"start": 1026,
"end": 2134
} | class ____:
def process_request(self, req, resp):
token = req.get_header('Authorization')
account_id = req.get_header('Account-ID')
challenges = ['Token type="Fernet"']
if token is None:
description = 'Please provide an auth token as part of the request.'
raise falcon.HTTPUnauthorized(
title='Auth token required',
description=description,
challenges=challenges,
href='http://docs.example.com/auth',
)
if not self._token_is_valid(token, account_id):
description = (
'The provided auth token is not valid. '
'Please request a new token and try again.'
)
raise falcon.HTTPUnauthorized(
title='Authentication required',
description=description,
challenges=challenges,
href='http://docs.example.com/auth',
)
def _token_is_valid(self, token, account_id):
return True # Suuuuuure it's valid...
| AuthMiddleware |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/errors.py | {
"start": 10992,
"end": 11495
} | class ____(OAuth2Error):
"""
The request requires higher privileges than provided by the
access token. The resource server SHOULD respond with the HTTP
403 (Forbidden) status code and MAY include the "scope"
attribute with the scope necessary to access the protected
resource.
"""
error = 'insufficient_scope'
status_code = 403
description = ("The request requires higher privileges than provided by "
"the access token.")
| InsufficientScopeError |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/service/server_lib.py | {
"start": 10942,
"end": 13567
} | class ____(
collections.namedtuple("WorkerConfig", [
"dispatcher_address", "worker_address", "port", "protocol",
"heartbeat_interval_ms", "dispatcher_timeout_ms",
"data_transfer_protocol", "data_transfer_address"
])):
"""Configuration class for tf.data service dispatchers.
Fields:
dispatcher_address: Specifies the address of the dispatcher.
worker_address: Specifies the address of the worker server. This address is
passed to the dispatcher so that the dispatcher can tell clients how to
connect to this worker.
port: Specifies the port to bind to. A value of 0 indicates that the worker
can bind to any available port.
protocol: A string indicating the protocol to be used by the worker to
connect to the dispatcher. E.g. "grpc".
heartbeat_interval_ms: How often the worker should heartbeat to the
dispatcher, in milliseconds. If not set, the runtime will select a
reasonable default. A higher value will reduce the load on the dispatcher,
while a lower value will reduce the time it takes to reclaim resources
from finished jobs.
dispatcher_timeout_ms: How long, in milliseconds, to retry requests to the
dispatcher before giving up and reporting an error. Defaults to 1 hour.
data_transfer_protocol: A string indicating the protocol to be used by the
worker to transfer data to the client. E.g. "grpc".
data_transfer_address: A string indicating the data transfer address of the
worker server.
"""
def __new__(cls,
dispatcher_address,
worker_address=None,
port=0,
protocol=None,
heartbeat_interval_ms=None,
dispatcher_timeout_ms=None,
data_transfer_protocol=None,
data_transfer_address=None):
if worker_address is None:
worker_address = "localhost:%port%"
if protocol is None:
protocol = _pywrap_utils_exp.TF_DATA_DefaultProtocol()
if data_transfer_address is None:
data_transfer_address = "localhost:%dts_port%"
heartbeat_interval_ms = _get_time_or_placeholder(heartbeat_interval_ms)
dispatcher_timeout_ms = _get_time_or_placeholder(dispatcher_timeout_ms)
return super(WorkerConfig,
cls).__new__(cls, dispatcher_address, worker_address, port,
protocol, heartbeat_interval_ms,
dispatcher_timeout_ms, data_transfer_protocol,
data_transfer_address)
@tf_export("data.experimental.service.WorkerServer", v1=[])
| WorkerConfig |
python | django-guardian__django-guardian | example_project_custom_group/articles/tests.py | {
"start": 3831,
"end": 7414
} | class ____(TestCase):
def setUp(self):
self.article = Article.objects.create(title="foo-title", slug="foo-slug", content="bar-content")
self.factory = RequestFactory()
self.user = get_user_model().objects.create_user("joe", "joe@doe.com", "doe")
self.group = CustomGroup.objects.create(name="test-group")
self.user.groups.add(self.group)
self.client.login(username="joe", password="doe")
def test_list_permitted(self):
request = self.factory.get("/")
request.user = self.user
assign_perm("articles.view_article", self.group, self.article)
assign_perm("articles.delete_article", self.group, self.article)
view = ArticleListView.as_view()
response = view(request)
response.render()
self.assertContains(response, "foo-title")
def test_list_denied(self):
request = self.factory.get("/")
request.user = self.user
view = ArticleListView.as_view()
response = view(request)
response.render()
self.assertNotContains(response, "foo-title")
def test_create_permitted(self):
request = self.factory.get("/~create")
request.user = self.user
assign_perm("articles.add_article", self.group)
view = ArticleCreateView.as_view()
response = view(request)
self.assertEqual(response.status_code, 200)
def test_create_denied(self):
request = self.factory.get("/~create")
request.user = self.user
view = ArticleCreateView.as_view()
response = view(request)
self.assertEqual(response.status_code, 302)
def test_detail_permitted(self):
request = self.factory.get("/foo/")
request.user = self.user
assign_perm("articles.view_article", self.group, self.article)
view = ArticleDetailView.as_view()
response = view(request, slug="foo-slug")
self.assertEqual(response.status_code, 200)
def test_detail_denied(self):
request = self.factory.get("/foo/")
request.user = self.user
view = ArticleDetailView.as_view()
response = view(request, slug="foo-slug")
self.assertEqual(response.status_code, 302)
def test_update_permitted(self):
request = self.factory.get("/")
request.user = self.user
assign_perm("articles.view_article", self.group, self.article)
assign_perm("articles.change_article", self.group, self.article)
view = ArticleUpdateView.as_view()
response = view(request, slug="foo-slug")
self.assertEqual(response.status_code, 200)
def test_update_denied(self):
request = self.factory.get("/")
request.user = self.user
view = ArticleUpdateView.as_view()
response = view(request, slug="foo-slug")
self.assertEqual(response.status_code, 302)
def test_delete_permitted(self):
request = self.factory.get("/foo-slug/~delete")
request.user = self.user
assign_perm("articles.view_article", self.group, self.article)
assign_perm("articles.delete_article", self.group, self.article)
view = ArticleDeleteView.as_view()
response = view(request, slug="foo-slug")
self.assertEqual(response.status_code, 200)
def test_delete_denied(self):
request = self.factory.get("/foo/~delete")
request.user = self.user
view = ArticleDeleteView.as_view()
response = view(request, slug="foo-slug")
self.assertEqual(response.status_code, 302)
| ViewGroupTestCase |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 41120,
"end": 41309
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("IGNORED", "SUBSCRIBED", "UNSUBSCRIBED")
| SubscriptionState |
python | kamyu104__LeetCode-Solutions | Python/delete-n-nodes-after-m-nodes-of-a-linked-list.py | {
"start": 66,
"end": 182
} | class ____(object):
def __init__(self, val=0, next=None):
self.val = val
self.next = next
| ListNode |
python | huggingface__transformers | src/transformers/models/evolla/modeling_evolla.py | {
"start": 10803,
"end": 14554
} | class ____(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None, is_cross_attention=False):
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = config.attention_probs_dropout_prob
self.rotary_embeddings = None
self.position_embedding_type = position_embedding_type or getattr(
config, "position_embedding_type", "absolute"
)
if self.position_embedding_type == "rotary":
self.rotary_embeddings = EvollaSaProtRotaryEmbedding(dim=self.attention_head_size)
self.is_decoder = config.is_decoder
self.layer_idx = layer_idx
self.scaling = 1.0
self.is_causal = self.is_decoder and not is_cross_attention
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
batch_size, seq_length = hidden_states.shape[:-1]
hidden_shape = (batch_size, seq_length, -1, self.attention_head_size)
query_layer = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
is_cross_attention = encoder_hidden_states is not None
current_states = encoder_hidden_states if is_cross_attention else hidden_states
attention_mask = encoder_attention_mask if is_cross_attention else attention_mask
key_layer = self.key(current_states).view(hidden_shape).transpose(1, 2)
value_layer = self.value(current_states).view(hidden_shape).transpose(1, 2)
# Matt: Our BERT model (which this code was derived from) scales attention logits down by sqrt(head_dim).
# EVOLLA_SA_PROT scales the query down by the same factor instead. Modulo numerical stability these are equivalent,
# but not when rotary embeddings get involved. Therefore, we scale the query here to match the original
# EVOLLA_SA_PROT code and fix rotary embeddings.
query_layer = query_layer * self.attention_head_size**-0.5
if self.position_embedding_type == "rotary":
query_layer, key_layer = self.rotary_embeddings(query_layer, key_layer)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_layer,
key_layer,
value_layer,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(batch_size, seq_length, -1).contiguous()
return attn_output, attn_weights
| EvollaSaProtSelfAttention |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_bar14.py | {
"start": 315,
"end": 2121
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_bar14.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet3 = workbook.add_worksheet()
chartsheet1 = workbook.add_chartsheet()
chart1 = workbook.add_chart({"type": "bar"})
chart2 = workbook.add_chart({"type": "bar"})
chart3 = workbook.add_chart({"type": "column"})
chart1.axis_ids = [40294272, 40295808]
chart2.axis_ids = [40261504, 65749760]
chart3.axis_ids = [65465728, 66388352]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet2.default_url_format = None
worksheet2.write_column("A1", data[0])
worksheet2.write_column("B1", data[1])
worksheet2.write_column("C1", data[2])
worksheet2.write("A6", "http://www.perl.com/")
chart3.add_series({"values": "=Sheet2!$A$1:$A$5"})
chart3.add_series({"values": "=Sheet2!$B$1:$B$5"})
chart3.add_series({"values": "=Sheet2!$C$1:$C$5"})
chart1.add_series({"values": "=Sheet2!$A$1:$A$5"})
chart1.add_series({"values": "=Sheet2!$B$1:$B$5"})
chart1.add_series({"values": "=Sheet2!$C$1:$C$5"})
chart2.add_series({"values": "=Sheet2!$A$1:$A$5"})
worksheet2.insert_chart("E9", chart1)
worksheet2.insert_chart("F25", chart2)
chartsheet1.set_chart(chart3)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 907684,
"end": 908854
} | class ____(sgqlc.types.Type, RepositoryNode, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("comments", "commit", "path", "position", "pull_request")
comments = sgqlc.types.Field(
sgqlc.types.non_null(CommitCommentConnection),
graphql_name="comments",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
commit = sgqlc.types.Field(sgqlc.types.non_null(Commit), graphql_name="commit")
path = sgqlc.types.Field(String, graphql_name="path")
position = sgqlc.types.Field(Int, graphql_name="position")
pull_request = sgqlc.types.Field(
sgqlc.types.non_null(PullRequest), graphql_name="pullRequest"
)
| PullRequestCommitCommentThread |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 24838,
"end": 25356
} | class ____(Pix2SkyProjection, PseudoCylindrical):
r"""
Molleweide's projection - pixel to sky.
Corresponds to the ``MOL`` projection in FITS WCS.
.. math::
\phi &= \frac{\pi x}{2 \sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}} \\
\theta &= \sin^{-1}\left(
\frac{1}{90^\circ}\sin^{-1}\left(\frac{\pi}{180^\circ}\frac{y}{\sqrt{2}}\right)
+ \frac{y}{180^\circ}\sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}
\right)
"""
| Pix2Sky_Molleweide |
python | gevent__gevent | src/greentest/3.10/test_threading.py | {
"start": 54275,
"end": 55295
} | class ____(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
| TimerTests |
python | sqlalchemy__sqlalchemy | test/dialect/mysql/test_query.py | {
"start": 3562,
"end": 9022
} | class ____(fixtures.TablesTest):
__only_on__ = "mysql", "mariadb"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"cattable",
metadata,
Column("id", Integer, primary_key=True),
Column("description", String(50)),
mysql_engine="MyISAM",
mariadb_engine="MyISAM",
)
Table(
"matchtable",
metadata,
Column("id", Integer, primary_key=True),
Column("title", String(200)),
Column("category_id", Integer, ForeignKey("cattable.id")),
mysql_engine="MyISAM",
mariadb_engine="MyISAM",
)
@classmethod
def insert_data(cls, connection):
cattable, matchtable = cls.tables("cattable", "matchtable")
connection.execute(
cattable.insert(),
[
{"id": 1, "description": "Python"},
{"id": 2, "description": "Ruby"},
],
)
connection.execute(
matchtable.insert(),
[
{
"id": 1,
"title": "Agile Web Development with Ruby On Rails",
"category_id": 2,
},
{"id": 2, "title": "Dive Into Python", "category_id": 1},
{
"id": 3,
"title": "Programming Matz's Ruby",
"category_id": 2,
},
{
"id": 4,
"title": "The Definitive Guide to Django",
"category_id": 1,
},
{"id": 5, "title": "Python in a Nutshell", "category_id": 1},
],
)
def test_simple_match(self, connection):
matchtable = self.tables.matchtable
results = connection.execute(
matchtable.select()
.where(matchtable.c.title.match("python"))
.order_by(matchtable.c.id)
).fetchall()
eq_([2, 5], [r.id for r in results])
def test_not_match(self, connection):
matchtable = self.tables.matchtable
results = connection.execute(
matchtable.select()
.where(~matchtable.c.title.match("python"))
.order_by(matchtable.c.id)
)
eq_([1, 3, 4], [r.id for r in results])
def test_simple_match_with_apostrophe(self, connection):
matchtable = self.tables.matchtable
results = connection.execute(
matchtable.select().where(matchtable.c.title.match("Matz's"))
).fetchall()
eq_([3], [r.id for r in results])
def test_return_value(self, connection):
matchtable = self.tables.matchtable
# test [ticket:3263]
result = connection.execute(
select(
matchtable.c.title.match("Agile Ruby Programming").label(
"ruby"
),
matchtable.c.title.match("Dive Python").label("python"),
matchtable.c.title,
).order_by(matchtable.c.id)
).fetchall()
eq_(
result,
[
(2.0, 0.0, "Agile Web Development with Ruby On Rails"),
(0.0, 2.0, "Dive Into Python"),
(2.0, 0.0, "Programming Matz's Ruby"),
(0.0, 0.0, "The Definitive Guide to Django"),
(0.0, 1.0, "Python in a Nutshell"),
],
)
def test_or_match(self, connection):
matchtable = self.tables.matchtable
results1 = connection.execute(
matchtable.select()
.where(
or_(
matchtable.c.title.match("nutshell"),
matchtable.c.title.match("ruby"),
)
)
.order_by(matchtable.c.id)
).fetchall()
eq_([1, 3, 5], [r.id for r in results1])
results2 = connection.execute(
matchtable.select()
.where(matchtable.c.title.match("nutshell ruby"))
.order_by(matchtable.c.id)
).fetchall()
eq_([1, 3, 5], [r.id for r in results2])
def test_and_match(self, connection):
matchtable = self.tables.matchtable
results1 = connection.execute(
matchtable.select().where(
and_(
matchtable.c.title.match("python"),
matchtable.c.title.match("nutshell"),
)
)
).fetchall()
eq_([5], [r.id for r in results1])
results2 = connection.execute(
matchtable.select().where(
matchtable.c.title.match("+python +nutshell")
)
).fetchall()
eq_([5], [r.id for r in results2])
def test_match_across_joins(self, connection):
matchtable = self.tables.matchtable
cattable = self.tables.cattable
results = connection.execute(
matchtable.select()
.where(
and_(
cattable.c.id == matchtable.c.category_id,
or_(
cattable.c.description.match("Ruby"),
matchtable.c.title.match("nutshell"),
),
)
)
.order_by(matchtable.c.id)
).fetchall()
eq_([1, 3, 5], [r.id for r in results])
| MatchTest |
python | spack__spack | lib/spack/spack/util/spack_yaml.py | {
"start": 906,
"end": 1523
} | class ____(int):
pass
#: mapping from syaml type -> primitive type
syaml_types = {syaml_str: str, syaml_int: int, syaml_dict: dict, syaml_list: list}
markable_types = set(syaml_types) | {comments.CommentedSeq, comments.CommentedMap}
def syaml_type(obj):
"""Get the corresponding syaml wrapper type for a primitive type.
Return:
(object): syaml-typed copy of object, or the obj if no wrapper
"""
for syaml_t, t in syaml_types.items():
if type(obj) is not bool and isinstance(obj, t):
return syaml_t(obj) if type(obj) is not syaml_t else obj
return obj
| syaml_int |
python | apache__airflow | devel-common/src/docs/build_docs.py | {
"start": 6030,
"end": 6175
} | class ____(NamedTuple):
"""Specification of single build."""
package_name: str
is_autobuild: bool
verbose: bool
| BuildSpecification |
python | walkccc__LeetCode | solutions/1287. Element Appearing More Than 25% In Sorted Array/1287.py | {
"start": 0,
"end": 202
} | class ____:
def findSpecialInteger(self, arr: list[int]) -> int:
n = len(arr)
quarter = n // 4
for i in range(n - quarter):
if arr[i] == arr[i + quarter]:
return arr[i]
| Solution |
python | python__mypy | mypyc/ir/ops.py | {
"start": 2012,
"end": 4512
} | class ____:
"""IR basic block.
Contains a sequence of Ops and ends with a ControlOp (Goto,
Branch, Return or Unreachable). Only the last op can be a
ControlOp.
All generated Ops live in basic blocks. Basic blocks determine the
order of evaluation and control flow within a function. A basic
block is always associated with a single function/method (FuncIR).
When building the IR, ops that raise exceptions can be included in
the middle of a basic block, but the exceptions aren't checked.
Afterwards we perform a transform that inserts explicit checks for
all error conditions and splits basic blocks accordingly to preserve
the invariant that a jump, branch or return can only ever appear
as the final op in a block. Manually inserting error checking ops
would be boring and error-prone.
BasicBlocks have an error_handler attribute that determines where
to jump if an error occurs. If none is specified, an error will
propagate up out of the function. This is compiled away by the
`exceptions` module.
Block labels are used for pretty printing and emitting C code, and
get filled in by those passes.
Ops that may terminate the program aren't treated as exits.
"""
def __init__(self, label: int = -1) -> None:
self.label = label
self.ops: list[Op] = []
self.error_handler: BasicBlock | None = None
self.referenced = False
@property
def terminated(self) -> bool:
"""Does the block end with a jump, branch or return?
This should always be true after the basic block has been fully built, but
this is false during construction.
"""
return bool(self.ops) and isinstance(self.ops[-1], ControlOp)
@property
def terminator(self) -> ControlOp:
"""The terminator operation of the block."""
assert bool(self.ops) and isinstance(self.ops[-1], ControlOp)
return self.ops[-1]
# Never generates an exception
ERR_NEVER: Final = 0
# Generates magic value (c_error_value) based on target RType on exception
ERR_MAGIC: Final = 1
# Generates false (bool) on exception
ERR_FALSE: Final = 2
# Always fails
ERR_ALWAYS: Final = 3
# Like ERR_MAGIC, but the magic return overlaps with a possible return value, and
# an extra PyErr_Occurred() check is also required
ERR_MAGIC_OVERLAPPING: Final = 4
# Hack: using this line number for an op will suppress it in tracebacks
NO_TRACEBACK_LINE_NO = -10000
| BasicBlock |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 1200,
"end": 1279
} | class ____(ModelExtraB):
field3 = models.CharField(max_length=30)
| ModelExtraC |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 103918,
"end": 106372
} | class ____(BaseView):
@classmethod
def create(cls, x: IRNode, *, dim: Optional[int] = None) -> IRNode:
if is_storage_and_layout(x):
storage, old_layout = as_storage_and_layout(x)
new_size = []
new_stride = []
if dim is not None:
assert isinstance(dim, int), type(dim)
assert 0 <= dim and dim < len(old_layout.size)
for i, (size, stride) in enumerate(zip(old_layout.size, old_layout.stride)):
if dim is None:
# Only append if dim is not squeezed out
if not V.graph.sizevars.is_size_one_or_false(size):
new_size.append(size)
new_stride.append(stride)
else:
if i != dim:
new_size.append(size)
new_stride.append(stride)
else:
assert size == 1, "expected squeezed size to be 1"
new_layout = FixedLayout(
old_layout.device,
old_layout.dtype,
new_size,
new_stride,
old_layout.offset,
old_layout.is_pinned,
)
return ReinterpretView(data=storage, layout=new_layout)
if dim is None:
return View.create(
x,
[
s
for s in x.get_size()
if not V.graph.sizevars.is_size_one_or_false(s)
],
)
else:
assert x.get_size()[dim] == 1
return View.create(x, [s for i, s in enumerate(x.get_size()) if i != dim])
@staticmethod
def squeezer(
size: Sequence[Expr],
) -> tuple[list[int], Callable[[Sequence[Expr]], tuple[Expr]]]:
new_size = [s for s in size if s != 1]
not_one = [i for i, s in enumerate(size) if s != 1]
length = len(size)
def reindex(index: Sequence[Expr]) -> tuple[Expr]:
assert len(index) == len(not_one), f"{index} {not_one}"
new_index = [sympy.S.Zero] * length
for idx, s in zip(not_one, index):
new_index[idx] = s
return tuple(new_index)
return new_size, reindex
def __init__(self, data: Any) -> None:
raise AssertionError("use SqueezeView.create()")
@ir_dataclass
| SqueezeView |
python | redis__redis-py | redis/asyncio/multidb/healthcheck.py | {
"start": 602,
"end": 768
} | class ____(ABC):
@abstractmethod
async def check_health(self, database) -> bool:
"""Function to determine the health status."""
pass
| HealthCheck |
python | pypa__setuptools | setuptools/_distutils/compilers/C/tests/test_cygwin.py | {
"start": 496,
"end": 2701
} | class ____(support.TempdirManager):
def _get_config_h_filename(self):
return self.python_h
@pytest.mark.skipif('sys.platform != "cygwin"')
@pytest.mark.skipif('not os.path.exists("/usr/lib/libbash.dll.a")')
def test_find_library_file(self):
from distutils.cygwinccompiler import CygwinCCompiler
compiler = CygwinCCompiler()
link_name = "bash"
linkable_file = compiler.find_library_file(["/usr/lib"], link_name)
assert linkable_file is not None
assert os.path.exists(linkable_file)
assert linkable_file == f"/usr/lib/lib{link_name:s}.dll.a"
@pytest.mark.skipif('sys.platform != "cygwin"')
def test_runtime_library_dir_option(self):
from distutils.cygwinccompiler import CygwinCCompiler
compiler = CygwinCCompiler()
assert compiler.runtime_library_dir_option('/foo') == []
def test_check_config_h(self):
# check_config_h looks for "GCC" in sys.version first
# returns CONFIG_H_OK if found
sys.version = (
'2.6.1 (r261:67515, Dec 6 2008, 16:42:21) \n[GCC '
'4.0.1 (Apple Computer, Inc. build 5370)]'
)
assert cygwin.check_config_h()[0] == cygwin.CONFIG_H_OK
# then it tries to see if it can find "__GNUC__" in pyconfig.h
sys.version = 'something without the *CC word'
# if the file doesn't exist it returns CONFIG_H_UNCERTAIN
assert cygwin.check_config_h()[0] == cygwin.CONFIG_H_UNCERTAIN
# if it exists but does not contain __GNUC__, it returns CONFIG_H_NOTOK
self.write_file(self.python_h, 'xxx')
assert cygwin.check_config_h()[0] == cygwin.CONFIG_H_NOTOK
# and CONFIG_H_OK if __GNUC__ is found
self.write_file(self.python_h, 'xxx __GNUC__ xxx')
assert cygwin.check_config_h()[0] == cygwin.CONFIG_H_OK
def test_get_msvcr(self):
assert cygwin.get_msvcr() == []
@pytest.mark.skipif('sys.platform != "cygwin"')
def test_dll_libraries_not_none(self):
from distutils.cygwinccompiler import CygwinCCompiler
compiler = CygwinCCompiler()
assert compiler.dll_libraries is not None
| TestCygwinCCompiler |
python | google__jax | jax/_src/util.py | {
"start": 25709,
"end": 26329
} | class ____(abc.ABCMeta):
"""A variant of `abc.ABCMeta` which does not allow virtual subclasses.
Virtual subclasses support require `abc.ABCMeta` to roundtrip through
pure Python when doing instance/subclass checking. This if fine for ABCs
which need virtual subclasses, but is wasteful for the ones which don't.
"""
def register(cls, subclass):
del subclass # Unused.
raise NotImplementedError(f"{cls} does not support virtual subclasses")
__instancecheck__ = type.__instancecheck__ # type: ignore[assignment]
__subclasscheck__ = type.__subclasscheck__ # type: ignore[assignment]
| StrictABCMeta |
python | huggingface__transformers | src/transformers/models/sam2/modular_sam2.py | {
"start": 33692,
"end": 36498
} | class ____(SamPromptEncoder):
def __init__(self, config: Sam2PromptEncoderConfig):
nn.Module.__init__(self)
self.shared_embedding = Sam2PositionalEmbedding(config)
self.mask_embed = Sam2MaskEmbedding(config)
self.no_mask_embed = nn.Embedding(1, config.hidden_size)
self.image_embedding_size = (config.image_size // config.patch_size, config.image_size // config.patch_size)
self.mask_input_size = (4 * config.image_size // config.patch_size, 4 * config.image_size // config.patch_size)
self.input_image_size = config.image_size
self.point_embed = nn.Embedding(config.num_point_embeddings, config.hidden_size)
self.hidden_size = config.hidden_size
self.not_a_point_embed = nn.Embedding(1, config.hidden_size)
def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
points = torch.nn.functional.pad(points, (0, 0, 0, 1), mode="constant", value=0)
labels = torch.nn.functional.pad(labels, (0, 1), mode="constant", value=-1)
input_shape = (self.input_image_size, self.input_image_size)
point_embedding = self.shared_embedding(points, input_shape)
# torch.where and expanding the labels tensor is required by the ONNX export
point_embedding = torch.where(labels[..., None] == -1, self.not_a_point_embed.weight, point_embedding)
# This is required for the ONNX export. The dtype, device need to be explicitly
# specified as otherwise torch.onnx.export interprets as double
point_embedding = torch.where(
labels[..., None] != -10,
point_embedding,
torch.zeros_like(point_embedding),
)
# Add point embeddings for labels >= 0
point_embedding = point_embedding + self.point_embed(labels.clamp(min=0)) * (labels >= 0).unsqueeze(-1)
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5 # Shift to center of pixel
coords = boxes.view(*boxes.shape[:2], 2, 2)
# add padding point for consistency with the original implementation
coords = torch.nn.functional.pad(coords, (0, 0, 0, 1), mode="constant", value=0)
corner_embedding = self.shared_embedding(coords, (self.input_image_size, self.input_image_size))
corner_embedding[:, :, 0, :] += self.point_embed.weight[2]
corner_embedding[:, :, 1, :] += self.point_embed.weight[3]
corner_embedding[:, :, 2, :] = self.not_a_point_embed.weight.expand_as(corner_embedding[:, :, 2, :])
return corner_embedding
| Sam2PromptEncoder |
python | getsentry__sentry | tests/sentry/hybridcloud/models/test_outbox.py | {
"start": 5177,
"end": 10998
} | class ____(TransactionTestCase):
@patch("sentry.hybridcloud.models.outbox.process_region_outbox.send")
def test_draining_with_disabled_shards(self, mock_send: Mock) -> None:
outbox1 = Organization(id=1).outbox_for_update()
outbox2 = Organization(id=1).outbox_for_update()
outbox3 = Organization(id=2).outbox_for_update()
with outbox_context(flush=False):
outbox1.save()
outbox2.save()
outbox3.save()
with self.options({"hybrid_cloud.authentication.disabled_organization_shards": [1]}):
outbox1.drain_shard()
with pytest.raises(RegionOutbox.DoesNotExist):
outbox1.refresh_from_db()
outbox2.refresh_from_db() # still exists
assert mock_send.call_count == 0
outbox3.drain_shard()
with pytest.raises(RegionOutbox.DoesNotExist):
outbox3.refresh_from_db()
assert mock_send.call_count == 1
def test_drain_shard_not_flush_all__upper_bound(self) -> None:
outbox1 = Organization(id=1).outbox_for_update()
outbox2 = Organization(id=1).outbox_for_update()
with outbox_context(flush=False):
outbox1.save()
barrier: threading.Barrier = threading.Barrier(2, timeout=10)
processing_thread = threading.Thread(
target=wrap_with_connection_closure(
lambda: outbox1.drain_shard(_test_processing_barrier=barrier)
)
)
processing_thread.start()
barrier.wait()
# Does not include outboxes created after starting process.
with outbox_context(flush=False):
outbox2.save()
barrier.wait()
processing_thread.join(timeout=1)
assert not RegionOutbox.objects.filter(id=outbox1.id).first()
assert RegionOutbox.objects.filter(id=outbox2.id).first()
@patch("sentry.hybridcloud.models.outbox.process_region_outbox.send")
def test_drain_shard_not_flush_all__concurrent_processing(
self, mock_process_region_outbox: Mock
) -> None:
outbox1 = OrganizationMember(id=1, organization_id=3, user_id=1).outbox_for_update()
outbox2 = OrganizationMember(id=2, organization_id=3, user_id=2).outbox_for_update()
with outbox_context(flush=False):
outbox1.save()
outbox2.save()
barrier: threading.Barrier = threading.Barrier(2, timeout=1)
processing_thread_1 = threading.Thread(
target=wrap_with_connection_closure(
lambda: outbox1.drain_shard(_test_processing_barrier=barrier)
)
)
processing_thread_1.start()
# This concurrent process will block on, and not duplicate, the effort of the first thread.
processing_thread_2 = threading.Thread(
target=wrap_with_connection_closure(
lambda: outbox2.drain_shard(_test_processing_barrier=barrier)
)
)
barrier.wait()
processing_thread_2.start()
barrier.wait()
barrier.wait()
barrier.wait()
processing_thread_1.join()
processing_thread_2.join()
assert not RegionOutbox.objects.filter(id=outbox1.id).first()
assert not RegionOutbox.objects.filter(id=outbox2.id).first()
assert mock_process_region_outbox.call_count == 2
def test_drain_shard_flush_all__upper_bound(self) -> None:
outbox1 = Organization(id=1).outbox_for_update()
outbox2 = Organization(id=1).outbox_for_update()
with outbox_context(flush=False):
outbox1.save()
barrier: threading.Barrier = threading.Barrier(2, timeout=10)
processing_thread = threading.Thread(
target=wrap_with_connection_closure(
lambda: outbox1.drain_shard(flush_all=True, _test_processing_barrier=barrier)
)
)
processing_thread.start()
barrier.wait()
# Does include outboxes created after starting process.
with outbox_context(flush=False):
outbox2.save()
barrier.wait()
# Next iteration
barrier.wait()
barrier.wait()
processing_thread.join(timeout=1)
assert not RegionOutbox.objects.filter(id=outbox1.id).first()
assert not RegionOutbox.objects.filter(id=outbox2.id).first()
@patch("sentry.hybridcloud.models.outbox.process_region_outbox.send")
def test_drain_shard_flush_all__concurrent_processing__cooperation(
self, mock_process_region_outbox: Mock
) -> None:
outbox1 = OrganizationMember(id=1, organization_id=3, user_id=1).outbox_for_update()
outbox2 = OrganizationMember(id=2, organization_id=3, user_id=2).outbox_for_update()
with outbox_context(flush=False):
outbox1.save()
outbox2.save()
barrier: threading.Barrier = threading.Barrier(2, timeout=1)
processing_thread_1 = threading.Thread(
target=wrap_with_connection_closure(
lambda: outbox1.drain_shard(_test_processing_barrier=barrier)
)
)
processing_thread_1.start()
processing_thread_2 = threading.Thread(
target=wrap_with_connection_closure(
lambda: outbox2.drain_shard(flush_all=True, _test_processing_barrier=barrier)
)
)
barrier.wait()
processing_thread_2.start()
barrier.wait()
barrier.wait()
barrier.wait()
processing_thread_1.join()
processing_thread_2.join()
assert not RegionOutbox.objects.filter(id=outbox1.id).first()
assert not RegionOutbox.objects.filter(id=outbox2.id).first()
assert mock_process_region_outbox.call_count == 2
| OutboxDrainTest |
python | apache__airflow | airflow-core/src/airflow/exceptions.py | {
"start": 9524,
"end": 9887
} | class ____(ValueError):
"""Raised when an error is encountered while a pickling library deserializes a pickle file."""
def __str__(self):
return (
"Error deserializing result. Note that result deserialization "
"is not supported across major Python versions. Cause: " + str(self.__cause__)
)
| DeserializingResultError |
python | tornadoweb__tornado | tornado/test/auth_test.py | {
"start": 4524,
"end": 5516
} | class ____(RequestHandler, FacebookGraphMixin):
def initialize(self, test):
self._OAUTH_AUTHORIZE_URL = test.get_url("/facebook/server/authorize")
self._OAUTH_ACCESS_TOKEN_URL = test.get_url("/facebook/server/access_token")
self._FACEBOOK_BASE_URL = test.get_url("/facebook/server")
@gen.coroutine
def get(self):
if self.get_argument("code", None):
user = yield self.get_authenticated_user(
redirect_uri=self.request.full_url(),
client_id=self.settings["facebook_api_key"],
client_secret=self.settings["facebook_secret"],
code=self.get_argument("code"),
)
self.write(user)
else:
self.authorize_redirect(
redirect_uri=self.request.full_url(),
client_id=self.settings["facebook_api_key"],
extra_params={"scope": "read_stream,offline_access"},
)
| FacebookClientLoginHandler |
python | django__django | tests/migrations/test_autodetector.py | {
"start": 204279,
"end": 209180
} | class ____(SimpleTestCase):
def test_no_operations(self):
class Migration(migrations.Migration):
operations = []
migration = Migration("some_migration", "test_app")
self.assertIs(migration.suggest_name().startswith("auto_"), True)
def test_no_operations_initial(self):
class Migration(migrations.Migration):
initial = True
operations = []
migration = Migration("some_migration", "test_app")
self.assertEqual(migration.suggest_name(), "initial")
def test_single_operation(self):
class Migration(migrations.Migration):
operations = [migrations.CreateModel("Person", fields=[])]
migration = Migration("0001_initial", "test_app")
self.assertEqual(migration.suggest_name(), "person")
class Migration(migrations.Migration):
operations = [migrations.DeleteModel("Person")]
migration = Migration("0002_initial", "test_app")
self.assertEqual(migration.suggest_name(), "delete_person")
def test_single_operation_long_name(self):
class Migration(migrations.Migration):
operations = [migrations.CreateModel("A" * 53, fields=[])]
migration = Migration("some_migration", "test_app")
self.assertEqual(migration.suggest_name(), "a" * 53)
def test_two_operations(self):
class Migration(migrations.Migration):
operations = [
migrations.CreateModel("Person", fields=[]),
migrations.DeleteModel("Animal"),
]
migration = Migration("some_migration", "test_app")
self.assertEqual(migration.suggest_name(), "person_delete_animal")
def test_two_create_models(self):
class Migration(migrations.Migration):
operations = [
migrations.CreateModel("Person", fields=[]),
migrations.CreateModel("Animal", fields=[]),
]
migration = Migration("0001_initial", "test_app")
self.assertEqual(migration.suggest_name(), "person_animal")
def test_two_create_models_with_initial_true(self):
class Migration(migrations.Migration):
initial = True
operations = [
migrations.CreateModel("Person", fields=[]),
migrations.CreateModel("Animal", fields=[]),
]
migration = Migration("0001_initial", "test_app")
self.assertEqual(migration.suggest_name(), "initial")
def test_many_operations_suffix(self):
class Migration(migrations.Migration):
operations = [
migrations.CreateModel("Person1", fields=[]),
migrations.CreateModel("Person2", fields=[]),
migrations.CreateModel("Person3", fields=[]),
migrations.DeleteModel("Person4"),
migrations.DeleteModel("Person5"),
]
migration = Migration("some_migration", "test_app")
self.assertEqual(
migration.suggest_name(),
"person1_person2_person3_delete_person4_and_more",
)
def test_operation_with_no_suggested_name(self):
class Migration(migrations.Migration):
operations = [
migrations.CreateModel("Person", fields=[]),
migrations.RunSQL("SELECT 1 FROM person;"),
]
migration = Migration("some_migration", "test_app")
self.assertIs(migration.suggest_name().startswith("auto_"), True)
def test_operation_with_invalid_chars_in_suggested_name(self):
class Migration(migrations.Migration):
operations = [
migrations.AddConstraint(
"Person",
models.UniqueConstraint(
fields=["name"], name="person.name-*~unique!"
),
),
]
migration = Migration("some_migration", "test_app")
self.assertEqual(migration.suggest_name(), "person_person_name_unique_")
def test_none_name(self):
class Migration(migrations.Migration):
operations = [migrations.RunSQL("SELECT 1 FROM person;")]
migration = Migration("0001_initial", "test_app")
suggest_name = migration.suggest_name()
self.assertIs(suggest_name.startswith("auto_"), True)
def test_none_name_with_initial_true(self):
class Migration(migrations.Migration):
initial = True
operations = [migrations.RunSQL("SELECT 1 FROM person;")]
migration = Migration("0001_initial", "test_app")
self.assertEqual(migration.suggest_name(), "initial")
def test_auto(self):
migration = migrations.Migration("0001_initial", "test_app")
suggest_name = migration.suggest_name()
self.assertIs(suggest_name.startswith("auto_"), True)
| MigrationSuggestNameTests |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 268366,
"end": 268805
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "column", "deleted_card_id")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
column = sgqlc.types.Field("ProjectColumn", graphql_name="column")
deleted_card_id = sgqlc.types.Field(ID, graphql_name="deletedCardId")
| DeleteProjectCardPayload |
python | django__django | tests/i18n/tests.py | {
"start": 80622,
"end": 81287
} | class ____(TestCase):
def test_streaming_response(self):
# Regression test for #5241
response = self.client.get("/fr/streaming/")
self.assertContains(response, "Oui/Non")
response = self.client.get("/en/streaming/")
self.assertContains(response, "Yes/No")
@override_settings(
USE_I18N=True,
LANGUAGES=[
("en", "English"),
("de", "German"),
("fr", "French"),
],
MIDDLEWARE=[
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
],
ROOT_URLCONF="i18n.urls_default_unprefixed",
LANGUAGE_CODE="en",
)
| LocaleMiddlewareTests |
python | pydata__xarray | xarray/tests/test_namedarray.py | {
"start": 1737,
"end": 3783
} | class ____(
CustomArrayBase[_ShapeType_co, _DType_co],
ExplicitlyIndexed,
Generic[_ShapeType_co, _DType_co],
):
def __getitem__(
self, key: _IndexKeyLike | CustomArrayIndexable[Any, Any], /
) -> CustomArrayIndexable[Any, _DType_co]:
if isinstance(key, CustomArrayIndexable):
if isinstance(key.array, type(self.array)):
# TODO: key.array is duckarray here, can it be narrowed down further?
# an _arrayapi cannot be used on a _arrayfunction for example.
return type(self)(array=self.array[key.array]) # type: ignore[index]
else:
raise TypeError("key must have the same array type as self")
else:
return type(self)(array=self.array[key])
def __array_namespace__(self) -> ModuleType:
return np
def check_duck_array_typevar(a: duckarray[Any, _DType]) -> duckarray[Any, _DType]:
# Mypy checks a is valid:
b: duckarray[Any, _DType] = a
# Runtime check if valid:
if isinstance(b, _arrayfunction_or_api):
return b
else:
missing_attrs = ""
actual_attrs = set(dir(b))
for t in _arrayfunction_or_api:
if sys.version_info >= (3, 13):
# https://github.com/python/cpython/issues/104873
from typing import get_protocol_members
expected_attrs = get_protocol_members(t)
elif sys.version_info >= (3, 12):
expected_attrs = t.__protocol_attrs__
else:
from typing import _get_protocol_attrs # type: ignore[attr-defined]
expected_attrs = _get_protocol_attrs(t)
missing_attrs_ = expected_attrs - actual_attrs
if missing_attrs_:
missing_attrs += f"{t.__name__} - {missing_attrs_}\n"
raise TypeError(
f"a ({type(a)}) is not a valid _arrayfunction or _arrayapi. "
"Missing following attrs:\n"
f"{missing_attrs}"
)
| CustomArrayIndexable |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.