language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | python-poetry__poetry | src/poetry/layouts/layout.py | {
"start": 1359,
"end": 7340
} | class ____:
def __init__(
self,
project: str,
version: str = "0.1.0",
description: str = "",
readme_format: str = "md",
author: str | None = None,
license: str | None = None,
python: str | None = None,
dependencies: Mapping[str, str | Mapping[str, Any]] | None = None,
dev_dependencies: Mapping[str, str | Mapping[str, Any]] | None = None,
) -> None:
self._project = canonicalize_name(project)
self._package_path_relative = Path(
*(module_name(part) for part in project.split("."))
)
self._package_name = ".".join(self._package_path_relative.parts)
self._version = version
self._description = description
self._readme_format = readme_format.lower()
self._license = license
self._python = python
self._dependencies = dependencies or {}
self._dev_dependencies = dev_dependencies or {}
if not author:
author = "Your Name <you@example.com>"
self._author = author
@property
def basedir(self) -> Path:
return Path()
@property
def package_path(self) -> Path:
return self.basedir / self._package_path_relative
def get_package_include(self) -> InlineTable | None:
package = inline_table()
# If a project is created in the root directory (this is reasonable inside a
# docker container, eg <https://github.com/python-poetry/poetry/issues/5103>)
# then parts will be empty.
parts = self._package_path_relative.parts
if not parts:
return None
include = parts[0]
package.append("include", include)
if self.basedir != Path():
package.append("from", self.basedir.as_posix())
else:
if module_name(self._project) == include:
# package include and package name are the same,
# packages table is redundant here.
return None
return package
def create(
self, path: Path, with_tests: bool = True, with_pyproject: bool = True
) -> None:
path.mkdir(parents=True, exist_ok=True)
self._create_default(path)
self._create_readme(path)
if with_tests:
self._create_tests(path)
if with_pyproject:
self._write_poetry(path)
def generate_project_content(self) -> TOMLDocument:
template = POETRY_DEFAULT
content: dict[str, Any] = loads(template)
project_content = content["project"]
project_content["name"] = self._project
project_content["version"] = self._version
project_content["description"] = self._description
m = AUTHOR_REGEX.match(self._author)
if m is None:
# This should not happen because author has been validated before.
raise ValueError(f"Invalid author: {self._author}")
else:
author = {"name": m.group("name")}
if email := m.group("email"):
author["email"] = email
project_content["authors"].append(author)
if self._license:
project_content["license"]["text"] = self._license
else:
project_content.remove("license")
project_content["readme"] = f"README.{self._readme_format}"
if self._python:
project_content["requires-python"] = self._python
else:
project_content.remove("requires-python")
for dep_name, dep_constraint in self._dependencies.items():
dependency = Factory.create_dependency(dep_name, dep_constraint)
project_content["dependencies"].append(dependency.to_pep_508())
poetry_content = content["tool"]["poetry"]
packages = self.get_package_include()
if packages:
poetry_content["packages"].append(packages)
else:
poetry_content.remove("packages")
if self._dev_dependencies:
for dep_name, dep_constraint in self._dev_dependencies.items():
dependency = Factory.create_dependency(dep_name, dep_constraint)
content["dependency-groups"]["dev"].append(dependency.to_pep_508())
else:
del content["dependency-groups"]
if not poetry_content:
del content["tool"]["poetry"]
# Add build system
build_system = table()
build_system_version = ""
if BUILD_SYSTEM_MIN_VERSION is not None:
build_system_version = ">=" + BUILD_SYSTEM_MIN_VERSION
if BUILD_SYSTEM_MAX_VERSION is not None:
if build_system_version:
build_system_version += ","
build_system_version += "<" + BUILD_SYSTEM_MAX_VERSION
build_system.add("requires", ["poetry-core" + build_system_version])
build_system.add("build-backend", "poetry.core.masonry.api")
assert isinstance(content, TOMLDocument)
content.add("build-system", build_system)
return content
def _create_default(self, path: Path, src: bool = True) -> None:
package_path = path / self.package_path
package_path.mkdir(parents=True)
package_init = package_path / "__init__.py"
package_init.touch()
def _create_readme(self, path: Path) -> Path:
readme_file = path.joinpath(f"README.{self._readme_format}")
readme_file.touch()
return readme_file
@staticmethod
def _create_tests(path: Path) -> None:
tests = path / "tests"
tests.mkdir()
tests_init = tests / "__init__.py"
tests_init.touch(exist_ok=False)
def _write_poetry(self, path: Path) -> None:
pyproject = PyProjectTOML(path / "pyproject.toml")
content = self.generate_project_content()
for section, item in content.items():
pyproject.data.append(section, item)
pyproject.save()
| Layout |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/context_creation_job.py | {
"start": 10173,
"end": 13721
} | class ____(ExecutionContextManager[PlanOrchestrationContext]):
def __init__(
self,
context_event_generator: Callable[
...,
Iterator[Union[DagsterEvent, PlanOrchestrationContext]],
],
job: IJob,
execution_plan: ExecutionPlan,
run_config: Mapping[str, object],
dagster_run: DagsterRun,
instance: DagsterInstance,
raise_on_error: Optional[bool] = False,
output_capture: Optional[dict["StepOutputHandle", Any]] = None,
executor_defs: Optional[Sequence[ExecutorDefinition]] = None,
resume_from_failure=False,
):
event_generator = context_event_generator(
job,
execution_plan,
run_config,
dagster_run,
instance,
raise_on_error,
executor_defs,
output_capture,
resume_from_failure=resume_from_failure,
)
super().__init__(event_generator)
@property
def context_type(self) -> type[PlanOrchestrationContext]:
return PlanOrchestrationContext
def orchestration_context_event_generator(
job: IJob,
execution_plan: ExecutionPlan,
run_config: Mapping[str, object],
dagster_run: DagsterRun,
instance: DagsterInstance,
raise_on_error: bool,
executor_defs: Optional[Sequence[ExecutorDefinition]],
output_capture: Optional[dict["StepOutputHandle", Any]],
resume_from_failure: bool = False,
) -> Iterator[Union[DagsterEvent, PlanOrchestrationContext]]:
check.invariant(executor_defs is None)
context_creation_data = create_context_creation_data(
job,
execution_plan,
run_config,
dagster_run,
instance,
)
log_manager = create_log_manager(context_creation_data)
try:
executor = create_executor(context_creation_data)
execution_context = PlanOrchestrationContext(
plan_data=create_plan_data(
context_creation_data,
raise_on_error,
executor.retries,
executor.step_dependency_config,
),
log_manager=log_manager,
executor=executor,
output_capture=output_capture,
resume_from_failure=resume_from_failure,
)
_validate_plan_with_context(execution_context, execution_plan)
yield execution_context
except DagsterError as dagster_error:
dagster_error = cast("DagsterUserCodeExecutionError", dagster_error)
user_facing_exc_info = (
# pylint does not know original_exc_info exists is is_user_code_error is true
dagster_error.original_exc_info if dagster_error.is_user_code_error else sys.exc_info()
)
error_info = serializable_error_info_from_exc_info(user_facing_exc_info)
event = DagsterEvent.job_failure(
job_context_or_name=dagster_run.job_name,
context_msg=(
"Failure during initialization for job"
f' "{dagster_run.job_name}". This may be due to a failure in initializing the'
" executor or one of the loggers."
),
failure_reason=RunFailureReason.JOB_INITIALIZATION_FAILURE,
error_info=error_info,
)
log_manager.log_dagster_event(
level=logging.ERROR, msg=event.message or "", dagster_event=event
)
yield event
if raise_on_error:
raise dagster_error
| PlanOrchestrationContextManager |
python | huggingface__transformers | src/transformers/models/olmo2/modeling_olmo2.py | {
"start": 13606,
"end": 15384
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: Olmo2Config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Olmo2Attention(config=config, layer_idx=layer_idx)
self.mlp = Olmo2MLP(config)
self.post_attention_layernorm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_feedforward_layernorm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_feedforward_layernorm(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
| Olmo2DecoderLayer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 467078,
"end": 467575
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of AddProjectV2DraftIssue"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "project_item")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
project_item = sgqlc.types.Field("ProjectV2Item", graphql_name="projectItem")
"""The draft issue added to the project."""
| AddProjectV2DraftIssuePayload |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1124855,
"end": 1125297
} | class ____(sgqlc.types.Type, RepositoryNode):
"""A Dependabot Update for a dependency in a repository"""
__schema__ = github_schema
__field_names__ = ("error", "pull_request")
error = sgqlc.types.Field(DependabotUpdateError, graphql_name="error")
"""The error from a dependency update"""
pull_request = sgqlc.types.Field("PullRequest", graphql_name="pullRequest")
"""The associated pull request"""
| DependabotUpdate |
python | ray-project__ray | rllib/utils/framework.py | {
"start": 7056,
"end": 7249
} | class ____:
def __init__(self) -> None:
self.keras = _KerasStub()
def __bool__(self):
# if tf should return False
return False
# Fake module for tf.keras.
| _TFStub |
python | spyder-ide__spyder | spyder/plugins/debugger/widgets/framesbrowser.py | {
"start": 13061,
"end": 14581
} | class ____(QStyledItemDelegate):
def __init__(self, parent):
QStyledItemDelegate.__init__(self, parent)
self._margin = None
def paint(self, painter, option, index):
"""Paint the item."""
options = QStyleOptionViewItem(option)
self.initStyleOption(options, index)
style = (QApplication.style() if options.widget is None
else options.widget.style())
doc = QTextDocument()
text = options.text
doc.setHtml(text)
doc.setDocumentMargin(0)
# This needs to be an empty string to avoid overlapping the
# normal text of the QTreeWidgetItem
options.text = ""
style.drawControl(QStyle.CE_ItemViewItem, options, painter)
ctx = QAbstractTextDocumentLayout.PaintContext()
textRect = style.subElementRect(QStyle.SE_ItemViewItemText,
options, None)
painter.save()
painter.translate(textRect.topLeft())
painter.setClipRect(textRect.translated(-textRect.topLeft()))
doc.documentLayout().draw(painter, ctx)
painter.restore()
def sizeHint(self, option, index):
"""Get a size hint."""
options = QStyleOptionViewItem(option)
self.initStyleOption(options, index)
doc = QTextDocument()
doc.setHtml(options.text)
doc.setTextWidth(options.rect.width())
size = QSize(int(doc.idealWidth()), int(doc.size().height()))
return size
| ItemDelegate |
python | apache__airflow | task-sdk/tests/task_sdk/bases/test_sensor.py | {
"start": 2316,
"end": 2660
} | class ____(BaseSensorOperator):
def __init__(self, return_value=False, xcom_value=None, **kwargs):
super().__init__(**kwargs)
self.xcom_value = xcom_value
self.return_value = return_value
def poke(self, context: Context):
return PokeReturnValue(self.return_value, self.xcom_value)
| DummySensorWithXcomValue |
python | fastapi__sqlmodel | docs_src/tutorial/many_to_many/tutorial002_py310.py | {
"start": 544,
"end": 3125
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
teams: list[Team] = Relationship(back_populates="heroes", link_model=HeroTeamLink)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
hero_deadpond = Hero(
name="Deadpond",
secret_name="Dive Wilson",
teams=[team_z_force, team_preventers],
)
hero_rusty_man = Hero(
name="Rusty-Man",
secret_name="Tommy Sharp",
age=48,
teams=[team_preventers],
)
hero_spider_boy = Hero(
name="Spider-Boy", secret_name="Pedro Parqueador", teams=[team_preventers]
)
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Deadpond:", hero_deadpond)
print("Deadpond teams:", hero_deadpond.teams)
print("Rusty-Man:", hero_rusty_man)
print("Rusty-Man Teams:", hero_rusty_man.teams)
print("Spider-Boy:", hero_spider_boy)
print("Spider-Boy Teams:", hero_spider_boy.teams)
def update_heroes():
with Session(engine) as session:
hero_spider_boy = session.exec(
select(Hero).where(Hero.name == "Spider-Boy")
).one()
team_z_force = session.exec(select(Team).where(Team.name == "Z-Force")).one()
team_z_force.heroes.append(hero_spider_boy)
session.add(team_z_force)
session.commit()
print("Updated Spider-Boy's Teams:", hero_spider_boy.teams)
print("Z-Force heroes:", team_z_force.heroes)
hero_spider_boy.teams.remove(team_z_force)
session.add(team_z_force)
session.commit()
print("Reverted Z-Force's heroes:", team_z_force.heroes)
print("Reverted Spider-Boy's teams:", hero_spider_boy.teams)
def main():
create_db_and_tables()
create_heroes()
update_heroes()
if __name__ == "__main__":
main()
| Hero |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/bitstring.py | {
"start": 410,
"end": 10603
} | class ____(str):
"""Represent a PostgreSQL bit string in python.
This object is used by the :class:`_postgresql.BIT` type when returning
values. :class:`_postgresql.BitString` values may also be constructed
directly and used with :class:`_postgresql.BIT` columns::
from sqlalchemy.dialects.postgresql import BitString
with engine.connect() as conn:
conn.execute(table.insert(), {"data": BitString("011001101")})
.. versionadded:: 2.1
"""
_DIGITS = frozenset("01")
def __new__(cls, _value: str, _check: bool = True) -> BitString:
if isinstance(_value, BitString):
return _value
elif _check and cls._DIGITS.union(_value) > cls._DIGITS:
raise ValueError("BitString must only contain '0' and '1' chars")
else:
return super().__new__(cls, _value)
@classmethod
def from_int(cls, value: int, length: int) -> BitString:
"""Returns a BitString consisting of the bits in the integer ``value``.
A ``ValueError`` is raised if ``value`` is not a non-negative integer.
If the provided ``value`` can not be represented in a bit string
of at most ``length``, a ``ValueError`` will be raised. The bitstring
will be padded on the left by ``'0'`` to bits to produce a
bitstring of the desired length.
"""
if value < 0:
raise ValueError("value must be non-negative")
if length < 0:
raise ValueError("length must be non-negative")
template_str = f"{{0:0{length}b}}" if length > 0 else ""
r = template_str.format(value)
if (length == 0 and value > 0) or len(r) > length:
raise ValueError(
f"Cannot encode {value} as a BitString of length {length}"
)
return cls(r)
@classmethod
def from_bytes(cls, value: bytes, length: int = -1) -> BitString:
"""Returns a ``BitString`` consisting of the bits in the given
``value`` bytes.
If ``length`` is provided, then the length of the provided string
will be exactly ``length``, with ``'0'`` bits inserted at the left of
the string in order to produce a value of the required length.
If the bits obtained by omitting the leading ``'0'`` bits of ``value``
cannot be represented in a string of this length a ``ValueError``
will be raised.
"""
str_v: str = "".join(f"{int(c):08b}" for c in value)
if length >= 0:
str_v = str_v.lstrip("0")
if len(str_v) > length:
raise ValueError(
f"Cannot encode {value!r} as a BitString of "
f"length {length}"
)
str_v = str_v.zfill(length)
return cls(str_v)
def get_bit(self, index: int) -> Literal["0", "1"]:
"""Returns the value of the flag at the given
index::
BitString("0101").get_flag(4) == "1"
"""
return cast(Literal["0", "1"], super().__getitem__(index))
@property
def bit_length(self) -> int:
return len(self)
@property
def octet_length(self) -> int:
return math.ceil(len(self) / 8)
def has_bit(self, index: int) -> bool:
return self.get_bit(index) == "1"
def set_bit(
self, index: int, value: bool | int | Literal["0", "1"]
) -> BitString:
"""Set the bit at index to the given value.
If value is an int, then it is considered to be '1' iff nonzero.
"""
if index < 0 or index >= len(self):
raise IndexError("BitString index out of range")
if isinstance(value, (bool, int)):
value = "1" if value else "0"
if self.get_bit(index) == value:
return self
return BitString(
"".join([self[:index], value, self[index + 1 :]]), False
)
def lstrip(self, char: str | None = None) -> BitString:
"""Returns a copy of the BitString with leading characters removed.
If omitted or None, 'chars' defaults '0'::
BitString("00010101000").lstrip() == BitString("00010101")
BitString("11110101111").lstrip("1") == BitString("1111010")
"""
if char is None:
char = "0"
return BitString(super().lstrip(char), False)
def rstrip(self, char: str | None = "0") -> BitString:
"""Returns a copy of the BitString with trailing characters removed.
If omitted or None, ``'char'`` defaults to "0"::
BitString("00010101000").rstrip() == BitString("10101000")
BitString("11110101111").rstrip("1") == BitString("10101111")
"""
if char is None:
char = "0"
return BitString(super().rstrip(char), False)
def strip(self, char: str | None = "0") -> BitString:
"""Returns a copy of the BitString with both leading and trailing
characters removed.
If omitted or None, ``'char'`` defaults to ``"0"``::
BitString("00010101000").rstrip() == BitString("10101")
BitString("11110101111").rstrip("1") == BitString("1010")
"""
if char is None:
char = "0"
return BitString(super().strip(char))
def removeprefix(self, prefix: str, /) -> BitString:
return BitString(super().removeprefix(prefix), False)
def removesuffix(self, suffix: str, /) -> BitString:
return BitString(super().removesuffix(suffix), False)
def replace(
self,
old: str,
new: str,
count: SupportsIndex = -1,
) -> BitString:
new = BitString(new)
return BitString(super().replace(old, new, count), False)
def split(
self,
sep: str | None = None,
maxsplit: SupportsIndex = -1,
) -> list[str]:
return [BitString(word) for word in super().split(sep, maxsplit)]
def zfill(self, width: SupportsIndex) -> BitString:
return BitString(super().zfill(width), False)
def __repr__(self) -> str:
return f'BitString("{self.__str__()}")'
def __int__(self) -> int:
return int(self, 2) if self else 0
def to_bytes(self, length: int = -1) -> bytes:
return int(self).to_bytes(
length if length >= 0 else self.octet_length, byteorder="big"
)
def __bytes__(self) -> bytes:
return self.to_bytes()
def __getitem__(
self, key: SupportsIndex | slice[Any, Any, Any]
) -> BitString:
return BitString(super().__getitem__(key), False)
def __add__(self, o: str) -> BitString:
"""Return self + o"""
if not isinstance(o, str):
raise TypeError(
f"Can only concatenate str (not '{type(self)}') to BitString"
)
return BitString("".join([self, o]))
def __radd__(self, o: str) -> BitString:
if not isinstance(o, str):
raise TypeError(
f"Can only concatenate str (not '{type(self)}') to BitString"
)
return BitString("".join([o, self]))
def __lshift__(self, amount: int) -> BitString:
"""Shifts each the bitstring to the left by the given amount.
String length is preserved::
BitString("000101") << 1 == BitString("001010")
"""
return BitString(
"".join([self, *("0" for _ in range(amount))])[-len(self) :], False
)
def __rshift__(self, amount: int) -> BitString:
"""Shifts each bit in the bitstring to the right by the given amount.
String length is preserved::
BitString("101") >> 1 == BitString("010")
"""
return BitString(self[:-amount], False).zfill(width=len(self))
def __invert__(self) -> BitString:
"""Inverts (~) each bit in the
bitstring::
~BitString("01010") == BitString("10101")
"""
return BitString("".join("1" if x == "0" else "0" for x in self))
def __and__(self, o: str) -> BitString:
"""Performs a bitwise and (``&``) with the given operand.
A ``ValueError`` is raised if the operand is not the same length.
e.g.::
BitString("011") & BitString("011") == BitString("010")
"""
if not isinstance(o, str):
return NotImplemented
o = BitString(o)
if len(self) != len(o):
raise ValueError("Operands must be the same length")
return BitString(
"".join(
"1" if (x == "1" and y == "1") else "0"
for x, y in zip(self, o)
),
False,
)
def __or__(self, o: str) -> BitString:
"""Performs a bitwise or (``|``) with the given operand.
A ``ValueError`` is raised if the operand is not the same length.
e.g.::
BitString("011") | BitString("010") == BitString("011")
"""
if not isinstance(o, str):
return NotImplemented
if len(self) != len(o):
raise ValueError("Operands must be the same length")
o = BitString(o)
return BitString(
"".join(
"1" if (x == "1" or y == "1") else "0"
for (x, y) in zip(self, o)
),
False,
)
def __xor__(self, o: str) -> BitString:
"""Performs a bitwise xor (``^``) with the given operand.
A ``ValueError`` is raised if the operand is not the same length.
e.g.::
BitString("011") ^ BitString("010") == BitString("001")
"""
if not isinstance(o, BitString):
return NotImplemented
if len(self) != len(o):
raise ValueError("Operands must be the same length")
return BitString(
"".join(
(
"1"
if ((x == "1" and y == "0") or (x == "0" and y == "1"))
else "0"
)
for (x, y) in zip(self, o)
),
False,
)
__rand__ = __and__
__ror__ = __or__
__rxor__ = __xor__
| BitString |
python | lepture__mistune | src/mistune/directives/_rst.py | {
"start": 486,
"end": 1025
} | class ____(DirectiveParser):
name = "rst_directive"
@staticmethod
def parse_type(m: Match[str]) -> str:
return m.group("type")
@staticmethod
def parse_title(m: Match[str]) -> str:
return m.group("title")
@staticmethod
def parse_content(m: Match[str]) -> str:
full_content = m.group(0)
text = m.group("text")
pretext = full_content[: -len(text)]
leading = len(m.group(1)) + 2
return "\n".join(line[leading:] for line in text.splitlines()) + "\n"
| RSTParser |
python | getsentry__sentry | tests/sentry/event_manager/test_event_manager_grouping.py | {
"start": 14885,
"end": 23684
} | class ____(TestCase):
"""
Tests for a bug where error events were interpreted as default-type events and therefore all
came out with a placeholder title.
"""
def test_fixes_broken_title_data(self) -> None:
# An event before the bug was introduced
event1 = save_new_event(
{
"exception": {
"values": [{"type": "DogsAreNeverAnError", "value": "Dogs are great!"}],
},
# Use a fingerprint to guarantee all events end up in the same group
"fingerprint": ["adopt don't shop"],
},
self.project,
)
assert event1.group_id is not None
group = Group.objects.get(id=event1.group_id)
assert group.title == event1.title == "DogsAreNeverAnError: Dogs are great!"
assert group.data["title"] == event1.data["title"] == "DogsAreNeverAnError: Dogs are great!"
assert group.data["metadata"].get("title") is event1.data["metadata"].get("title") is None
assert group.message == "Dogs are great! DogsAreNeverAnError"
# Simulate the bug
with mock.patch(
"sentry.event_manager.get_event_type",
return_value=DefaultEvent(),
):
# Neutralize the data fixes by making them unable to recognize a bad title and by
# unconditionally using the incoming title
with (
mock.patch(
"sentry.event_manager._is_placeholder_title",
return_value=False,
),
mock.patch(
"sentry.event_manager._get_updated_group_title",
new=lambda existing_container, incoming_container: incoming_container.get(
"title"
),
),
):
event2 = save_new_event(
{
"exception": {
"values": [{"type": "DogsAreNeverAnError", "value": "Maisey is silly"}],
},
"fingerprint": ["adopt don't shop"],
},
self.project,
)
assert event1.group_id is not None and event2.group_id is not None
assert event2.group_id == event1.group_id
# Pull the group again to get updated data
group = Group.objects.get(id=event2.group_id)
# As expected, without the fixes, the bug screws up both the event and group data. (Compare
# this to the next test, where the fixes are left in place, and the group remains untouched.)
assert group.title == event2.title == "<unlabeled event>"
assert group.data["title"] == event2.data["title"] == "<unlabeled event>"
assert (
group.data["metadata"]["title"]
== event2.data["metadata"]["title"]
== "<unlabeled event>"
)
assert group.message == "<unlabeled event>"
# Now that we have a group with bad data, return to the current world - where the bug has
# been fixed and the data fix is also in place - and we can see that the group's data
# returns to what it should be
event3 = save_new_event(
{
"exception": {
"values": [{"type": "DogsAreNeverAnError", "value": "Charlie is goofy"}],
},
"fingerprint": ["adopt don't shop"],
},
self.project,
)
assert event1.group_id is not None
assert event2.group_id is not None
assert event3.group_id is not None
assert event3.group_id == event2.group_id == event1.group_id
# Pull the group again to get updated data
group = Group.objects.get(id=event3.group_id)
# Title data is updated with values from newest event, and is back to the structure it was
# before the bug
assert group.title == event3.title == "DogsAreNeverAnError: Charlie is goofy"
assert (
group.data["title"] == event3.data["title"] == "DogsAreNeverAnError: Charlie is goofy"
)
assert group.data["metadata"].get("title") is event3.data["metadata"].get("title") is None
assert group.message == "Charlie is goofy DogsAreNeverAnError"
# This is the same as the data-fixing test above, except that the fix is left in place when
# the bug happens, and so the bad titles never get saved on the group
def test_bug_regression_no_longer_breaks_titles(self) -> None:
# An event before the bug was introduced
event1 = save_new_event(
{
"exception": {
"values": [{"type": "DogsAreNeverAnError", "value": "Dogs are great!"}],
},
# Use a fingerprint to guarantee all events end up in the same group
"fingerprint": ["adopt don't shop"],
},
self.project,
)
assert event1.group_id is not None
group = Group.objects.get(id=event1.group_id)
assert group.title == event1.title == "DogsAreNeverAnError: Dogs are great!"
assert group.data["title"] == event1.data["title"] == "DogsAreNeverAnError: Dogs are great!"
assert group.data["metadata"].get("title") is event1.data["metadata"].get("title") is None
assert group.message == "Dogs are great! DogsAreNeverAnError"
# Simulate the bug, but with the fix in place
with mock.patch(
"sentry.event_manager.get_event_type",
return_value=DefaultEvent(),
):
event2 = save_new_event(
{
"exception": {
"values": [{"type": "DogsAreNeverAnError", "value": "Maisey is silly"}],
},
"fingerprint": ["adopt don't shop"],
},
self.project,
)
assert event1.group_id is not None and event2.group_id is not None
assert event2.group_id == event1.group_id
# Pull the group again to get updated data
group = Group.objects.get(id=event2.group_id)
# The event may be messed up, but it didn't mess up the group
assert event2.title == "<unlabeled event>"
assert group.title == "DogsAreNeverAnError: Dogs are great!"
assert event2.data["title"] == "<unlabeled event>"
assert group.data["title"] == "DogsAreNeverAnError: Dogs are great!"
assert group.data["metadata"].get("title") is None
assert event2.data["metadata"]["title"] == "<unlabeled event>"
assert group.message == "Dogs are great! DogsAreNeverAnError"
# An event after the bug was fixed
event3 = save_new_event(
{
"exception": {
"values": [{"type": "DogsAreNeverAnError", "value": "Charlie is goofy"}],
},
"fingerprint": ["adopt don't shop"],
},
self.project,
)
assert event1.group_id is not None
assert event2.group_id is not None
assert event3.group_id is not None
assert event3.group_id == event2.group_id == event1.group_id
# Pull the group again to get updated data
group = Group.objects.get(id=event3.group_id)
# Title data is updated with values from newest event
assert group.title == event3.title == "DogsAreNeverAnError: Charlie is goofy"
assert (
group.data["title"] == event3.data["title"] == "DogsAreNeverAnError: Charlie is goofy"
)
assert group.data["metadata"].get("title") is event3.data["metadata"].get("title") is None
assert group.message == "Charlie is goofy DogsAreNeverAnError"
@django_db_all
@pytest.mark.parametrize(
["existing_title", "incoming_title", "expected_title"],
[
("Dogs are great!", "Adopt don't shop", "Adopt don't shop"),
("Dogs are great!", "<untitled>", "Dogs are great!"),
("Dogs are great!", None, "Dogs are great!"),
("<unlabeled event>", "Adopt don't shop", "Adopt don't shop"),
("<unlabeled event>", "<untitled>", "<untitled>"),
("<unlabeled event>", None, None),
(None, "Adopt don't shop", "Adopt don't shop"),
(None, "<untitled>", None),
(None, None, None),
],
)
def test_get_updated_group_title(existing_title, incoming_title, expected_title) -> None:
existing_data = {"title": existing_title} if existing_title is not None else {}
incoming_data = {"title": incoming_title} if incoming_title is not None else {}
assert _get_updated_group_title(existing_data, incoming_data) == expected_title
| PlaceholderTitleTest |
python | tensorflow__tensorflow | tensorflow/python/keras/utils/metrics_utils.py | {
"start": 9812,
"end": 36030
} | class ____(Enum):
"""Type of AUC summation method.
https://en.wikipedia.org/wiki/Riemann_sum)
Contains the following values:
* 'interpolation': Applies mid-point summation scheme for `ROC` curve. For
`PR` curve, interpolates (true/false) positives but not the ratio that is
precision (see Davis & Goadrich 2006 for details).
* 'minoring': Applies left summation for increasing intervals and right
summation for decreasing intervals.
* 'majoring': Applies right summation for increasing intervals and left
summation for decreasing intervals.
"""
INTERPOLATION = 'interpolation'
MAJORING = 'majoring'
MINORING = 'minoring'
@staticmethod
def from_str(key):
if key in ('interpolation', 'Interpolation'):
return AUCSummationMethod.INTERPOLATION
elif key in ('majoring', 'Majoring'):
return AUCSummationMethod.MAJORING
elif key in ('minoring', 'Minoring'):
return AUCSummationMethod.MINORING
else:
raise ValueError('Invalid AUC summation method value "%s".' % key)
def _update_confusion_matrix_variables_optimized(
variables_to_update,
y_true,
y_pred,
thresholds,
multi_label=False,
sample_weights=None,
label_weights=None,
thresholds_with_epsilon=False):
"""Update confusion matrix variables with memory efficient alternative.
Note that the thresholds need to be evenly distributed within the list, eg,
the diff between consecutive elements are the same.
To compute TP/FP/TN/FN, we are measuring a binary classifier
C(t) = (predictions >= t)
at each threshold 't'. So we have
TP(t) = sum( C(t) * true_labels )
FP(t) = sum( C(t) * false_labels )
But, computing C(t) requires computation for each t. To make it fast,
observe that C(t) is a cumulative integral, and so if we have
thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
where n = num_thresholds, and if we can compute the bucket function
B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
then we get
C(t_i) = sum( B(j), j >= i )
which is the reversed cumulative sum in tf.cumsum().
We can compute B(i) efficiently by taking advantage of the fact that
our thresholds are evenly distributed, in that
width = 1.0 / (num_thresholds - 1)
thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
Given a prediction value p, we can map it to its bucket by
bucket_index(p) = floor( p * (num_thresholds - 1) )
so we can use tf.math.unsorted_segment_sum() to update the buckets in one
pass.
Consider following example:
y_true = [0, 0, 1, 1]
y_pred = [0.1, 0.5, 0.3, 0.9]
thresholds = [0.0, 0.5, 1.0]
num_buckets = 2 # [0.0, 1.0], (1.0, 2.0]
bucket_index(y_pred) = tf.math.floor(y_pred * num_buckets)
= tf.math.floor([0.2, 1.0, 0.6, 1.8])
= [0, 0, 0, 1]
# The meaning of this bucket is that if any of the label is true,
# then 1 will be added to the corresponding bucket with the index.
# Eg, if the label for 0.2 is true, then 1 will be added to bucket 0. If the
# label for 1.8 is true, then 1 will be added to bucket 1.
#
# Note the second item "1.0" is floored to 0, since the value need to be
# strictly larger than the bucket lower bound.
# In the implementation, we use tf.math.ceil() - 1 to achieve this.
tp_bucket_value = tf.math.unsorted_segment_sum(true_labels, bucket_indices,
num_segments=num_thresholds)
= [1, 1, 0]
# For [1, 1, 0] here, it means there is 1 true value contributed by bucket 0,
# and 1 value contributed by bucket 1. When we aggregate them to together,
# the result become [a + b + c, b + c, c], since large thresholds will always
# contribute to the value for smaller thresholds.
true_positive = tf.math.cumsum(tp_bucket_value, reverse=True)
= [2, 1, 0]
This implementation exhibits a run time and space complexity of O(T + N),
where T is the number of thresholds and N is the size of predictions.
Metrics that rely on standard implementation instead exhibit a complexity of
O(T * N).
Args:
variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid keys
and corresponding variables to update as values.
y_true: A floating point `Tensor` whose shape matches `y_pred`. Will be cast
to `bool`.
y_pred: A floating point `Tensor` of arbitrary shape and whose values are in
the range `[0, 1]`.
thresholds: A sorted floating point `Tensor` with value in `[0, 1]`. It need
to be evenly distributed (the diff between each element need to be the
same).
multi_label: Optional boolean indicating whether multidimensional
prediction/labels should be treated as multilabel responses, or flattened
into a single label. When True, the values of `variables_to_update` must
have a second dimension equal to the number of labels in y_true and
y_pred, and those tensors must not be RaggedTensors.
sample_weights: Optional `Tensor` whose rank is either 0, or the same rank
as `y_true`, and must be broadcastable to `y_true` (i.e., all dimensions
must be either `1`, or the same as the corresponding `y_true` dimension).
label_weights: Optional tensor of non-negative weights for multilabel data.
The weights are applied when calculating TP, FP, FN, and TN without
explicit multilabel handling (i.e. when the data is to be flattened).
thresholds_with_epsilon: Optional boolean indicating whether the leading and
tailing thresholds has any epsilon added for floating point imprecisions.
It will change how we handle the leading and tailing bucket.
Returns:
Update op.
"""
num_thresholds = thresholds.shape.as_list()[0]
if sample_weights is None:
sample_weights = 1.0
else:
sample_weights = weights_broadcast_ops.broadcast_weights(
math_ops.cast(sample_weights, dtype=y_pred.dtype), y_pred)
if not multi_label:
sample_weights = array_ops.reshape(sample_weights, [-1])
if label_weights is None:
label_weights = 1.0
else:
label_weights = array_ops.expand_dims(label_weights, 0)
label_weights = weights_broadcast_ops.broadcast_weights(label_weights,
y_pred)
if not multi_label:
label_weights = array_ops.reshape(label_weights, [-1])
weights = math_ops.multiply(sample_weights, label_weights)
# We shouldn't need this, but in case there are predict value that is out of
# the range of [0.0, 1.0]
y_pred = clip_ops.clip_by_value(y_pred,
clip_value_min=0.0, clip_value_max=1.0)
y_true = math_ops.cast(math_ops.cast(y_true, dtypes.bool), y_true.dtype)
if not multi_label:
y_true = array_ops.reshape(y_true, [-1])
y_pred = array_ops.reshape(y_pred, [-1])
true_labels = math_ops.multiply(y_true, weights)
false_labels = math_ops.multiply((1.0 - y_true), weights)
# Compute the bucket indices for each prediction value.
# Since the predict value has to be strictly greater than the thresholds,
# eg, buckets like [0, 0.5], (0.5, 1], and 0.5 belongs to first bucket.
# We have to use math.ceil(val) - 1 for the bucket.
bucket_indices = math_ops.ceil(y_pred * (num_thresholds - 1)) - 1
if thresholds_with_epsilon:
# In this case, the first bucket should actually take into account since
# the any prediction between [0.0, 1.0] should be larger than the first
# threshold. We change the bucket value from -1 to 0.
bucket_indices = nn_ops.relu(bucket_indices)
bucket_indices = math_ops.cast(bucket_indices, dtypes.int32)
if multi_label:
# We need to run bucket segment sum for each of the label class. In the
# multi_label case, the rank of the label is 2. We first transpose it so
# that the label dim becomes the first and we can parallel run though them.
true_labels = array_ops.transpose_v2(true_labels)
false_labels = array_ops.transpose_v2(false_labels)
bucket_indices = array_ops.transpose_v2(bucket_indices)
def gather_bucket(label_and_bucket_index):
label, bucket_index = label_and_bucket_index[0], label_and_bucket_index[1]
return math_ops.unsorted_segment_sum(
data=label, segment_ids=bucket_index, num_segments=num_thresholds)
tp_bucket_v = parallel_control_flow_ops.vectorized_map(
gather_bucket, (true_labels, bucket_indices))
fp_bucket_v = parallel_control_flow_ops.vectorized_map(
gather_bucket, (false_labels, bucket_indices))
tp = array_ops.transpose_v2(
math_ops.cumsum(tp_bucket_v, reverse=True, axis=1))
fp = array_ops.transpose_v2(
math_ops.cumsum(fp_bucket_v, reverse=True, axis=1))
else:
tp_bucket_v = math_ops.unsorted_segment_sum(
data=true_labels, segment_ids=bucket_indices,
num_segments=num_thresholds)
fp_bucket_v = math_ops.unsorted_segment_sum(
data=false_labels, segment_ids=bucket_indices,
num_segments=num_thresholds)
tp = math_ops.cumsum(tp_bucket_v, reverse=True)
fp = math_ops.cumsum(fp_bucket_v, reverse=True)
# fn = sum(true_labels) - tp
# tn = sum(false_labels) - fp
if (ConfusionMatrix.TRUE_NEGATIVES in variables_to_update or
ConfusionMatrix.FALSE_NEGATIVES in variables_to_update):
if multi_label:
total_true_labels = math_ops.reduce_sum(true_labels, axis=1)
total_false_labels = math_ops.reduce_sum(false_labels, axis=1)
else:
total_true_labels = math_ops.reduce_sum(true_labels)
total_false_labels = math_ops.reduce_sum(false_labels)
update_ops = []
if ConfusionMatrix.TRUE_POSITIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.TRUE_POSITIVES]
update_ops.append(variable.assign_add(tp))
if ConfusionMatrix.FALSE_POSITIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.FALSE_POSITIVES]
update_ops.append(variable.assign_add(fp))
if ConfusionMatrix.TRUE_NEGATIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.TRUE_NEGATIVES]
tn = total_false_labels - fp
update_ops.append(variable.assign_add(tn))
if ConfusionMatrix.FALSE_NEGATIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.FALSE_NEGATIVES]
fn = total_true_labels - tp
update_ops.append(variable.assign_add(fn))
return control_flow_ops.group(update_ops)
def is_evenly_distributed_thresholds(thresholds):
"""Check if the thresholds list is evenly distributed.
We could leverage evenly distributed thresholds to use less memory when
calculate metrcis like AUC where each individual threshold need to be
evaluated.
Args:
thresholds: A python list or tuple, or 1D numpy array whose value is ranged
in [0, 1].
Returns:
boolean, whether the values in the inputs are evenly distributed.
"""
# Check the list value and see if it is evenly distributed.
num_thresholds = len(thresholds)
if num_thresholds < 3:
return False
even_thresholds = np.arange(num_thresholds,
dtype=np.float32) / (num_thresholds - 1)
return np.allclose(thresholds, even_thresholds, atol=backend.epsilon())
def update_confusion_matrix_variables(variables_to_update,
y_true,
y_pred,
thresholds,
top_k=None,
class_id=None,
sample_weight=None,
multi_label=False,
label_weights=None,
thresholds_distributed_evenly=False):
"""Returns op to update the given confusion matrix variables.
For every pair of values in y_true and y_pred:
true_positive: y_true == True and y_pred > thresholds
false_negatives: y_true == True and y_pred <= thresholds
true_negatives: y_true == False and y_pred <= thresholds
false_positive: y_true == False and y_pred > thresholds
The results will be weighted and added together. When multiple thresholds are
provided, we will repeat the same for every threshold.
For estimation of these metrics over a stream of data, the function creates an
`update_op` operation that updates the given variables.
If `sample_weight` is `None`, weights default to 1.
Use weights of 0 to mask values.
Args:
variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid keys
and corresponding variables to update as values.
y_true: A `Tensor` whose shape matches `y_pred`. Will be cast to `bool`.
y_pred: A floating point `Tensor` of arbitrary shape and whose values are in
the range `[0, 1]`.
thresholds: A float value, float tensor, python list, or tuple of float
thresholds in `[0, 1]`, or NEG_INF (used when top_k is set).
top_k: Optional int, indicates that the positive labels should be limited to
the top k predictions.
class_id: Optional int, limits the prediction and labels to the class
specified by this argument.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true` (i.e., all dimensions must
be either `1`, or the same as the corresponding `y_true` dimension).
multi_label: Optional boolean indicating whether multidimensional
prediction/labels should be treated as multilabel responses, or flattened
into a single label. When True, the values of `variables_to_update` must
have a second dimension equal to the number of labels in y_true and
y_pred, and those tensors must not be RaggedTensors.
label_weights: (optional) tensor of non-negative weights for multilabel
data. The weights are applied when calculating TP, FP, FN, and TN without
explicit multilabel handling (i.e. when the data is to be flattened).
thresholds_distributed_evenly: Boolean, whether the thresholds are evenly
distributed within the list. An optimized method will be used if this is
the case. See _update_confusion_matrix_variables_optimized() for more
details.
Returns:
Update op.
Raises:
ValueError: If `y_pred` and `y_true` have mismatched shapes, or if
`sample_weight` is not `None` and its shape doesn't match `y_pred`, or if
`variables_to_update` contains invalid keys.
"""
if multi_label and label_weights is not None:
raise ValueError('`label_weights` for multilabel data should be handled '
'outside of `update_confusion_matrix_variables` when '
'`multi_label` is True.')
if variables_to_update is None:
return
if not any(
key for key in variables_to_update if key in list(ConfusionMatrix)):
raise ValueError(
'Please provide at least one valid confusion matrix '
'variable to update. Valid variable key options are: "{}". '
'Received: "{}"'.format(
list(ConfusionMatrix), variables_to_update.keys()))
variable_dtype = list(variables_to_update.values())[0].dtype
y_true = math_ops.cast(y_true, dtype=variable_dtype)
y_pred = math_ops.cast(y_pred, dtype=variable_dtype)
if thresholds_distributed_evenly:
# Check whether the thresholds has any leading or tailing epsilon added
# for floating point imprecision. The leading and tailing threshold will be
# handled bit differently as the corner case.
# At this point, thresholds should be a list/array with more than 2 items,
# and ranged between [0, 1]. See is_evenly_distributed_thresholds() for more
# details.
thresholds_with_epsilon = thresholds[0] < 0.0 or thresholds[-1] > 1.0
thresholds = tensor_conversion.convert_to_tensor_v2_with_dispatch(
thresholds, dtype=variable_dtype
)
num_thresholds = thresholds.shape.as_list()[0]
if multi_label:
one_thresh = math_ops.equal(
math_ops.cast(1, dtype=dtypes.int32),
array_ops.rank(thresholds),
name='one_set_of_thresholds_cond')
else:
[y_pred,
y_true], _ = ragged_assert_compatible_and_get_flat_values([y_pred, y_true],
sample_weight)
one_thresh = math_ops.cast(True, dtype=dtypes.bool)
invalid_keys = [
key for key in variables_to_update if key not in list(ConfusionMatrix)
]
if invalid_keys:
raise ValueError(
'Invalid keys: {}. Valid variable key options are: "{}"'.format(
invalid_keys, list(ConfusionMatrix)))
with ops.control_dependencies([
check_ops.assert_greater_equal(
y_pred,
math_ops.cast(0.0, dtype=y_pred.dtype),
message='predictions must be >= 0'),
check_ops.assert_less_equal(
y_pred,
math_ops.cast(1.0, dtype=y_pred.dtype),
message='predictions must be <= 1')
]):
if sample_weight is None:
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
else:
sample_weight = math_ops.cast(sample_weight, dtype=variable_dtype)
y_pred, y_true, sample_weight = (
losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true, sample_weight=sample_weight))
y_pred.shape.assert_is_compatible_with(y_true.shape)
if top_k is not None:
y_pred = _filter_top_k(y_pred, top_k)
if class_id is not None:
y_true = y_true[..., class_id]
y_pred = y_pred[..., class_id]
if thresholds_distributed_evenly and compat.forward_compatible(2021, 6, 8):
# The new approach will take effect after 2021/6/8, to give enough time
# for Brella release to pick up the new op tf.math.cumsum with float32.
return _update_confusion_matrix_variables_optimized(
variables_to_update, y_true, y_pred, thresholds,
multi_label=multi_label, sample_weights=sample_weight,
label_weights=label_weights,
thresholds_with_epsilon=thresholds_with_epsilon)
pred_shape = array_ops.shape(y_pred)
num_predictions = pred_shape[0]
if y_pred.shape.ndims == 1:
num_labels = 1
else:
num_labels = gen_math_ops.Prod(input=pred_shape[1:], axis=0)
thresh_label_tile = array_ops.where_v2(one_thresh, num_labels,
array_ops.ones([], dtype=dtypes.int32))
# Reshape predictions and labels, adding a dim for thresholding.
if multi_label:
predictions_extra_dim = array_ops.expand_dims(y_pred, 0)
labels_extra_dim = array_ops.expand_dims(
math_ops.cast(y_true, dtype=dtypes.bool), 0)
else:
# Flatten predictions and labels when not multilabel.
predictions_extra_dim = array_ops.reshape(y_pred, [1, -1])
labels_extra_dim = array_ops.reshape(
math_ops.cast(y_true, dtype=dtypes.bool), [1, -1])
# Tile the thresholds for every prediction.
if multi_label:
thresh_pretile_shape = [num_thresholds, 1, -1]
thresh_tiles = [1, num_predictions, thresh_label_tile]
data_tiles = [num_thresholds, 1, 1]
else:
thresh_pretile_shape = [num_thresholds, -1]
thresh_tiles = [1, num_predictions * num_labels]
data_tiles = [num_thresholds, 1]
thresh_tiled = array_ops.tile(
array_ops.reshape(thresholds, thresh_pretile_shape),
array_ops_stack.stack(thresh_tiles))
# Tile the predictions for every threshold.
preds_tiled = array_ops.tile(predictions_extra_dim, data_tiles)
# Compare predictions and threshold.
pred_is_pos = math_ops.greater(preds_tiled, thresh_tiled)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_extra_dim, data_tiles)
if sample_weight is not None:
sample_weight = weights_broadcast_ops.broadcast_weights(
math_ops.cast(sample_weight, dtype=variable_dtype), y_pred)
weights_tiled = array_ops.tile(
array_ops.reshape(sample_weight, thresh_tiles), data_tiles)
else:
weights_tiled = None
if label_weights is not None and not multi_label:
label_weights = array_ops.expand_dims(label_weights, 0)
label_weights = weights_broadcast_ops.broadcast_weights(label_weights,
y_pred)
label_weights_tiled = array_ops.tile(
array_ops.reshape(label_weights, thresh_tiles), data_tiles)
if weights_tiled is None:
weights_tiled = label_weights_tiled
else:
weights_tiled = math_ops.multiply(weights_tiled, label_weights_tiled)
update_ops = []
def weighted_assign_add(label, pred, weights, var):
label_and_pred = math_ops.cast(
math_ops.logical_and(label, pred), dtype=var.dtype)
if weights is not None:
label_and_pred *= math_ops.cast(weights, dtype=var.dtype)
return var.assign_add(math_ops.reduce_sum(label_and_pred, 1))
loop_vars = {
ConfusionMatrix.TRUE_POSITIVES: (label_is_pos, pred_is_pos),
}
update_tn = ConfusionMatrix.TRUE_NEGATIVES in variables_to_update
update_fp = ConfusionMatrix.FALSE_POSITIVES in variables_to_update
update_fn = ConfusionMatrix.FALSE_NEGATIVES in variables_to_update
if update_fn or update_tn:
pred_is_neg = math_ops.logical_not(pred_is_pos)
loop_vars[ConfusionMatrix.FALSE_NEGATIVES] = (label_is_pos, pred_is_neg)
if update_fp or update_tn:
label_is_neg = math_ops.logical_not(label_is_pos)
loop_vars[ConfusionMatrix.FALSE_POSITIVES] = (label_is_neg, pred_is_pos)
if update_tn:
loop_vars[ConfusionMatrix.TRUE_NEGATIVES] = (label_is_neg, pred_is_neg)
for matrix_cond, (label, pred) in loop_vars.items():
if matrix_cond in variables_to_update:
update_ops.append(
weighted_assign_add(label, pred, weights_tiled,
variables_to_update[matrix_cond]))
return control_flow_ops.group(update_ops)
def _filter_top_k(x, k):
"""Filters top-k values in the last dim of x and set the rest to NEG_INF.
Used for computing top-k prediction values in dense labels (which has the same
shape as predictions) for recall and precision top-k metrics.
Args:
x: tensor with any dimensions.
k: the number of values to keep.
Returns:
tensor with same shape and dtype as x.
"""
_, top_k_idx = nn_ops.top_k(x, k, sorted=False)
top_k_mask = math_ops.reduce_sum(
array_ops.one_hot(top_k_idx, array_ops.shape(x)[-1], axis=-1), axis=-2)
return x * top_k_mask + NEG_INF * (1 - top_k_mask)
def ragged_assert_compatible_and_get_flat_values(values, mask=None):
"""If ragged, it checks the compatibility and then returns the flat_values.
Note: If two tensors are dense, it does not check their compatibility.
Note: Although two ragged tensors with different ragged ranks could have
identical overall rank and dimension sizes and hence be compatible,
we do not support those cases.
Args:
values: A list of potentially ragged tensor of the same ragged_rank.
mask: A potentially ragged tensor of the same ragged_rank as elements in
Values.
Returns:
A tuple in which the first element is the list of tensors and the second
is the mask tensor. ([Values], mask). Mask and the element in Values
are equal to the flat_values of the input arguments (if they were ragged).
"""
if isinstance(values, list):
is_all_ragged = \
all(isinstance(rt, ragged_tensor.RaggedTensor) for rt in values)
is_any_ragged = \
any(isinstance(rt, ragged_tensor.RaggedTensor) for rt in values)
else:
is_all_ragged = isinstance(values, ragged_tensor.RaggedTensor)
is_any_ragged = is_all_ragged
if (is_all_ragged and
((mask is None) or isinstance(mask, ragged_tensor.RaggedTensor))):
to_be_stripped = False
if not isinstance(values, list):
values = [values]
to_be_stripped = True
# NOTE: we leave the flat_values compatibility to
# tf.TensorShape `assert_is_compatible_with`
# check if both dynamic dimensions are equal and then use the flat_values.
nested_row_split_list = [rt.nested_row_splits for rt in values]
assertion_list = _assert_splits_match(nested_row_split_list)
# if both are ragged sample_weights also should be ragged with same dims.
if isinstance(mask, ragged_tensor.RaggedTensor):
assertion_list_for_mask = _assert_splits_match(
[nested_row_split_list[0], mask.nested_row_splits])
with ops.control_dependencies(assertion_list_for_mask):
mask = array_ops.expand_dims(mask.flat_values, -1)
# values has at least 1 element.
flat_values = []
for value in values:
with ops.control_dependencies(assertion_list):
flat_values.append(array_ops.expand_dims(value.flat_values, -1))
values = flat_values[0] if to_be_stripped else flat_values
elif is_any_ragged:
raise TypeError('One of the inputs does not have acceptable types.')
# values are empty or value are not ragged and mask is ragged.
elif isinstance(mask, ragged_tensor.RaggedTensor):
raise TypeError('Ragged mask is not allowed with non-ragged inputs.')
return values, mask
def _assert_splits_match(nested_splits_lists):
"""Checks that the given splits lists are identical.
Performs static tests to ensure that the given splits lists are identical,
and returns a list of control dependency op tensors that check that they are
fully identical.
Args:
nested_splits_lists: A list of nested_splits_lists, where each split_list is
a list of `splits` tensors from a `RaggedTensor`, ordered from outermost
ragged dimension to innermost ragged dimension.
Returns:
A list of control dependency op tensors.
Raises:
ValueError: If the splits are not identical.
"""
error_msg = 'Inputs must have identical ragged splits'
for splits_list in nested_splits_lists:
if len(splits_list) != len(nested_splits_lists[0]):
raise ValueError(error_msg)
return [
check_ops.assert_equal(s1, s2, message=error_msg) # pylint: disable=g-complex-comprehension
for splits_list in nested_splits_lists[1:]
for (s1, s2) in zip(nested_splits_lists[0], splits_list)
]
| AUCSummationMethod |
python | spack__spack | lib/spack/spack/util/package_hash.py | {
"start": 855,
"end": 1669
} | class ____(ast.NodeTransformer):
"""Transformer that removes docstrings from a Python AST.
This removes *all* strings that aren't on the RHS of an assignment statement from
the body of functions, classes, and modules -- even if they're not directly after
the declaration.
"""
def remove_docstring(self, node):
if node.body:
node.body = [child for child in node.body if not unused_string(child)]
if not node.body:
node.body = [ast.Pass()]
self.generic_visit(node)
return node
def visit_FunctionDef(self, node):
return self.remove_docstring(node)
def visit_ClassDef(self, node):
return self.remove_docstring(node)
def visit_Module(self, node):
return self.remove_docstring(node)
| RemoveDocstrings |
python | huggingface__transformers | tests/models/sam_hq/test_modeling_sam_hq.py | {
"start": 9673,
"end": 10725
} | class ____:
def __init__(
self,
hidden_size=32,
input_image_size=24,
patch_size=2,
mask_input_channels=4,
num_point_embeddings=4,
hidden_act="gelu",
):
self.hidden_size = hidden_size
self.input_image_size = input_image_size
self.patch_size = patch_size
self.mask_input_channels = mask_input_channels
self.num_point_embeddings = num_point_embeddings
self.hidden_act = hidden_act
def get_config(self):
return SamHQPromptEncoderConfig(
image_size=self.input_image_size,
patch_size=self.patch_size,
mask_input_channels=self.mask_input_channels,
hidden_size=self.hidden_size,
num_point_embeddings=self.num_point_embeddings,
hidden_act=self.hidden_act,
)
def prepare_config_and_inputs(self):
dummy_points = floats_tensor([self.batch_size, 3, 2])
config = self.get_config()
return config, dummy_points
| SamHQPromptEncoderTester |
python | weaviate__weaviate-python-client | weaviate/collections/queries/near_image/generate/sync.py | {
"start": 314,
"end": 461
} | class ____(
Generic[Properties, References],
_NearImageGenerateExecutor[ConnectionSync, Properties, References],
):
pass
| _NearImageGenerate |
python | dask__distributed | distributed/comm/tcp.py | {
"start": 15770,
"end": 17091
} | class ____:
def _check_encryption(self, address, connection_args):
if not self.encrypted and connection_args.get("require_encryption"):
# XXX Should we have a dedicated SecurityError class?
raise RuntimeError(
"encryption required by Dask configuration, "
"refusing communication from/to %r" % (self.prefix + address,)
)
_NUMERIC_ONLY = socket.AI_NUMERICHOST | socket.AI_NUMERICSERV
async def _getaddrinfo(host, port, *, family, type=socket.SOCK_STREAM):
# If host and port are numeric, then getaddrinfo doesn't block and we
# can skip get_running_loop().getaddrinfo which is implemented by
# running in a ThreadPoolExecutor. So we try first with the
# _NUMERIC_ONLY flags set, and then only use the threadpool if that
# fails with EAI_NONAME:
try:
return socket.getaddrinfo(
host,
port,
family=family,
type=type,
flags=_NUMERIC_ONLY,
)
except socket.gaierror as e:
if e.errno != socket.EAI_NONAME:
raise
# That failed; it's a real hostname. We better use a thread.
return await asyncio.get_running_loop().getaddrinfo(
host, port, family=family, type=socket.SOCK_STREAM
)
| RequireEncryptionMixin |
python | zarr-developers__zarr-python | tests/package_with_entrypoint/__init__.py | {
"start": 1924,
"end": 2847
} | class ____(Bool):
"""
This is a "data type" that serializes to "test"
"""
_zarr_v3_name: ClassVar[Literal["test"]] = "test" # type: ignore[assignment]
@classmethod
def from_json(cls, data: DTypeJSON, *, zarr_format: Literal[2, 3]) -> Self:
if zarr_format == 2 and data == {"name": cls._zarr_v3_name, "object_codec_id": None}:
return cls()
if zarr_format == 3 and data == cls._zarr_v3_name:
return cls()
raise DataTypeValidationError(
f"Invalid JSON representation of {cls.__name__}. Got {data!r}"
)
def to_json(self, zarr_format: ZarrFormat) -> str | DTypeSpec_V2: # type: ignore[override]
if zarr_format == 2:
return {"name": self._zarr_v3_name, "object_codec_id": None}
if zarr_format == 3:
return self._zarr_v3_name
raise ValueError("zarr_format must be 2 or 3")
| TestDataType |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/rich/prompt.py | {
"start": 737,
"end": 8774
} | class ____(Generic[PromptType]):
"""Ask the user for input until a valid response is received. This is the base class, see one of
the concrete classes for examples.
Args:
prompt (TextType, optional): Prompt text. Defaults to "".
console (Console, optional): A Console instance or None to use global console. Defaults to None.
password (bool, optional): Enable password input. Defaults to False.
choices (List[str], optional): A list of valid choices. Defaults to None.
show_default (bool, optional): Show default in prompt. Defaults to True.
show_choices (bool, optional): Show choices in prompt. Defaults to True.
"""
response_type: type = str
validate_error_message = "[prompt.invalid]Please enter a valid value"
illegal_choice_message = (
"[prompt.invalid.choice]Please select one of the available options"
)
prompt_suffix = ": "
choices: Optional[List[str]] = None
def __init__(
self,
prompt: TextType = "",
*,
console: Optional[Console] = None,
password: bool = False,
choices: Optional[List[str]] = None,
show_default: bool = True,
show_choices: bool = True,
) -> None:
self.console = console or get_console()
self.prompt = (
Text.from_markup(prompt, style="prompt")
if isinstance(prompt, str)
else prompt
)
self.password = password
if choices is not None:
self.choices = choices
self.show_default = show_default
self.show_choices = show_choices
@classmethod
@overload
def ask(
cls,
prompt: TextType = "",
*,
console: Optional[Console] = None,
password: bool = False,
choices: Optional[List[str]] = None,
show_default: bool = True,
show_choices: bool = True,
default: DefaultType,
stream: Optional[TextIO] = None,
) -> Union[DefaultType, PromptType]:
...
@classmethod
@overload
def ask(
cls,
prompt: TextType = "",
*,
console: Optional[Console] = None,
password: bool = False,
choices: Optional[List[str]] = None,
show_default: bool = True,
show_choices: bool = True,
stream: Optional[TextIO] = None,
) -> PromptType:
...
@classmethod
def ask(
cls,
prompt: TextType = "",
*,
console: Optional[Console] = None,
password: bool = False,
choices: Optional[List[str]] = None,
show_default: bool = True,
show_choices: bool = True,
default: Any = ...,
stream: Optional[TextIO] = None,
) -> Any:
"""Shortcut to construct and run a prompt loop and return the result.
Example:
>>> filename = Prompt.ask("Enter a filename")
Args:
prompt (TextType, optional): Prompt text. Defaults to "".
console (Console, optional): A Console instance or None to use global console. Defaults to None.
password (bool, optional): Enable password input. Defaults to False.
choices (List[str], optional): A list of valid choices. Defaults to None.
show_default (bool, optional): Show default in prompt. Defaults to True.
show_choices (bool, optional): Show choices in prompt. Defaults to True.
stream (TextIO, optional): Optional text file open for reading to get input. Defaults to None.
"""
_prompt = cls(
prompt,
console=console,
password=password,
choices=choices,
show_default=show_default,
show_choices=show_choices,
)
return _prompt(default=default, stream=stream)
def render_default(self, default: DefaultType) -> Text:
"""Turn the supplied default in to a Text instance.
Args:
default (DefaultType): Default value.
Returns:
Text: Text containing rendering of default value.
"""
return Text(f"({default})", "prompt.default")
def make_prompt(self, default: DefaultType) -> Text:
"""Make prompt text.
Args:
default (DefaultType): Default value.
Returns:
Text: Text to display in prompt.
"""
prompt = self.prompt.copy()
prompt.end = ""
if self.show_choices and self.choices:
_choices = "/".join(self.choices)
choices = f"[{_choices}]"
prompt.append(" ")
prompt.append(choices, "prompt.choices")
if (
default != ...
and self.show_default
and isinstance(default, (str, self.response_type))
):
prompt.append(" ")
_default = self.render_default(default)
prompt.append(_default)
prompt.append(self.prompt_suffix)
return prompt
@classmethod
def get_input(
cls,
console: Console,
prompt: TextType,
password: bool,
stream: Optional[TextIO] = None,
) -> str:
"""Get input from user.
Args:
console (Console): Console instance.
prompt (TextType): Prompt text.
password (bool): Enable password entry.
Returns:
str: String from user.
"""
return console.input(prompt, password=password, stream=stream)
def check_choice(self, value: str) -> bool:
"""Check value is in the list of valid choices.
Args:
value (str): Value entered by user.
Returns:
bool: True if choice was valid, otherwise False.
"""
assert self.choices is not None
return value.strip() in self.choices
def process_response(self, value: str) -> PromptType:
"""Process response from user, convert to prompt type.
Args:
value (str): String typed by user.
Raises:
InvalidResponse: If ``value`` is invalid.
Returns:
PromptType: The value to be returned from ask method.
"""
value = value.strip()
try:
return_value: PromptType = self.response_type(value)
except ValueError:
raise InvalidResponse(self.validate_error_message)
if self.choices is not None and not self.check_choice(value):
raise InvalidResponse(self.illegal_choice_message)
return return_value
def on_validate_error(self, value: str, error: InvalidResponse) -> None:
"""Called to handle validation error.
Args:
value (str): String entered by user.
error (InvalidResponse): Exception instance the initiated the error.
"""
self.console.print(error)
def pre_prompt(self) -> None:
"""Hook to display something before the prompt."""
@overload
def __call__(self, *, stream: Optional[TextIO] = None) -> PromptType:
...
@overload
def __call__(
self, *, default: DefaultType, stream: Optional[TextIO] = None
) -> Union[PromptType, DefaultType]:
...
def __call__(self, *, default: Any = ..., stream: Optional[TextIO] = None) -> Any:
"""Run the prompt loop.
Args:
default (Any, optional): Optional default value.
Returns:
PromptType: Processed value.
"""
while True:
self.pre_prompt()
prompt = self.make_prompt(default)
value = self.get_input(self.console, prompt, self.password, stream=stream)
if value == "" and default != ...:
return default
try:
return_value = self.process_response(value)
except InvalidResponse as error:
self.on_validate_error(value, error)
continue
else:
return return_value
| PromptBase |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyflakes/F811_30.py | {
"start": 209,
"end": 292
} | class ____:
"""B."""
def baz(self) -> None:
"""Baz."""
baz = 1
| B |
python | pypa__warehouse | warehouse/utils/html.py | {
"start": 99,
"end": 2281
} | class ____(Extension):
"""
This extension adds support for a "Client side Include", which will be
included into the final page using javascript instead of on the server. It
is used like:
{% csi "/some/url/" %}
{% endcsi %}
Which will render as an empty div that will be replaced using javascript.
You may place default content inside of this div (in case js is disabled,
the include fails for some reason, or to render before the include happens)
by filling in the area between the two block tags, thus something like:
{% csi "/some/url/" %}
<p>You need Javascript Enabled to See this Content</p>
{% endcsi %}
Would render a client side include that instructs the user they need JS
if the CSI doesn't happen for one reason or another.
"""
tags = {"csi"}
def parse(self, parser):
# the first token is the token that started the tag. In our case
# we only listen to ``'csi'`` so this will be a name token with
# `csi` as value. We get the line number so that we can give that line
# number to the nodes we create by hand.
lineno = next(parser.stream).lineno
# Now we parse a single expression that is used as the URL we're going
# to include
args = [parser.parse_expression()]
# if there is a comma, the user provided a tag type. If not use
# 'div' as second parameter.
if parser.stream.skip_if("comma"):
args.append(parser.parse_expression())
else:
args.append(nodes.Const("div"))
# Now we parse the body of the csi block up to `endcsi` and drop the
# needle (which would always be `endcsi` in that case).
body = parser.parse_statements(["name:endcsi"], drop_needle=True)
# Now return a `CallBlock` node that calls our _csi helper method on
# this extension.
n = nodes.CallBlock(self.call_method("_csi", args), [], [], body)
n = n.set_lineno(lineno)
return n
def _csi(self, url, tag, caller):
return f'<{tag} data-html-include="{url}">{caller()}</{tag}>'
| ClientSideIncludeExtension |
python | lxml__lxml | src/lxml/tests/test_http_io.py | {
"start": 441,
"end": 4489
} | class ____(HelperTestCase):
etree = etree
def _parse_from_http(self, data, code=200, headers=None):
parser = self.etree.XMLParser(no_network=False)
handler = HTTPRequestCollector(data, code, headers)
with webserver(handler) as host_url:
tree = self.etree.parse(host_url + 'TEST', parser=parser)
self.assertEqual([('/TEST', [])], handler.requests)
return tree
@needs_http
def test_http_client(self):
tree = self._parse_from_http(b'<root><a/></root>')
self.assertEqual('root', tree.getroot().tag)
self.assertEqual('a', tree.getroot()[0].tag)
@needs_http
def test_http_client_404(self):
try:
self._parse_from_http(b'<root/>', code=404)
except OSError:
self.assertTrue(True)
else:
self.assertTrue(False, "expected IOError")
@needs_http
def test_http_client_gzip(self):
f = BytesIO()
gz = gzip.GzipFile(fileobj=f, mode='w', filename='test.xml')
gz.write(b'<root><a/></root>')
gz.close()
data = f.getvalue()
del f, gz
headers = [('Content-Encoding', 'gzip')]
tree = self._parse_from_http(data, headers=headers)
self.assertEqual('root', tree.getroot().tag)
self.assertEqual('a', tree.getroot()[0].tag)
@needs_http
def test_parser_input_mix(self):
data = b'<root><a/></root>'
handler = HTTPRequestCollector(data)
parser = self.etree.XMLParser(no_network=False)
with webserver(handler) as host_url:
tree = self.etree.parse(host_url, parser=parser)
root = tree.getroot()
self.assertEqual('a', root[0].tag)
root = self.etree.fromstring(data)
self.assertEqual('a', root[0].tag)
tree = self.etree.parse(host_url, parser=parser)
root = tree.getroot()
self.assertEqual('a', root[0].tag)
root = self.etree.fromstring(data)
self.assertEqual('a', root[0].tag)
root = self.etree.fromstring(data)
self.assertEqual('a', root[0].tag)
@needs_http
def test_network_dtd(self):
data = [_bytes(textwrap.dedent(s)) for s in [
# XML file
'''\
<?xml version="1.0"?>
<!DOCTYPE root SYSTEM "./file.dtd">
<root>&myentity;</root>
''',
# DTD
'<!ENTITY myentity "DEFINED">',
]]
responses = []
def handler(environ, start_response):
start_response('200 OK', [])
return [responses.pop()]
with webserver(handler) as host_url:
# DTD network loading enabled
responses = data[::-1]
tree = self.etree.parse(
host_url + 'dir/test.xml',
parser=self.etree.XMLParser(
load_dtd=True, no_network=False))
self.assertFalse(responses) # all read
root = tree.getroot()
self.assertEqual('DEFINED', root.text)
# DTD network loading disabled
responses = data[::-1]
try:
self.etree.parse(
host_url + 'dir/test.xml',
parser=self.etree.XMLParser(
load_dtd=True, no_network=True))
except self.etree.XMLSyntaxError:
self.assertTrue("myentity" in str(sys.exc_info()[1]))
self.assertEqual(1, len(responses)) # DTD not read
except OSError:
self.assertTrue("failed to load" in str(sys.exc_info()[1]))
self.assertEqual(2, len(responses)) # nothing read
else:
self.assertTrue(False)
def test_suite():
suite = unittest.TestSuite()
if not IS_PYPY:
suite.addTests([unittest.defaultTestLoader.loadTestsFromTestCase(HttpIOTestCase)])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
| HttpIOTestCase |
python | django__django | tests/tasks/test_dummy_backend.py | {
"start": 7067,
"end": 7659
} | class ____(TransactionTestCase):
available_apps = []
@override_settings(
TASKS={
"default": {
"BACKEND": "django.tasks.backends.dummy.DummyBackend",
}
}
)
def test_doesnt_wait_until_transaction_commit_by_default(self):
with transaction.atomic():
result = test_tasks.noop_task.enqueue()
self.assertIsNotNone(result.enqueued_at)
self.assertEqual(len(default_task_backend.results), 1)
self.assertEqual(len(default_task_backend.results), 1)
| DummyBackendTransactionTestCase |
python | numpy__numpy | benchmarks/benchmarks/bench_core.py | {
"start": 6330,
"end": 7156
} | class ____(Benchmark):
params = [['int64', 'uint64', 'float32', 'float64',
'complex64', 'bool_'],
[100, 10000]]
param_names = ['dtype', 'size']
def setup(self, dtype, size):
self.data = np.ones(size, dtype=dtype)
if dtype.startswith('complex'):
self.data = np.random.randn(size) + 1j * np.random.randn(size)
def time_min(self, dtype, size):
self.data.min()
def time_max(self, dtype, size):
self.data.max()
def time_mean(self, dtype, size):
self.data.mean()
def time_std(self, dtype, size):
self.data.std()
def time_prod(self, dtype, size):
self.data.prod()
def time_var(self, dtype, size):
self.data.var()
def time_sum(self, dtype, size):
self.data.sum()
| StatsMethods |
python | getsentry__sentry | src/sentry/api/serializers/release_details_types.py | {
"start": 260,
"end": 360
} | class ____(TypedDict, total=False):
dateStarted: str | None
url: str | None
| LastDeployOptional |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 90307,
"end": 92732
} | class ____(Request):
"""
Get all 'plot' events for this task
:param task: Task ID
:type task: str
:param iters: Max number of latest iterations for which to return debug images
:type iters: int
:param scroll_id: Scroll ID of previous call (used for getting more results)
:type scroll_id: str
"""
_service = "events"
_action = "get_task_plots"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"iters": {
"description": "Max number of latest iterations for which to return debug images",
"type": "integer",
},
"scroll_id": {
"description": "Scroll ID of previous call (used for getting more results)",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(self, task: str, iters: Optional[int] = None, scroll_id: Optional[str] = None, **kwargs: Any) -> None:
super(GetTaskPlotsRequest, self).__init__(**kwargs)
self.task = task
self.iters = iters
self.scroll_id = scroll_id
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("iters")
def iters(self) -> Optional[int]:
return self._property_iters
@iters.setter
def iters(self, value: Optional[int]) -> None:
if value is None:
self._property_iters = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "iters", six.integer_types)
self._property_iters = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| GetTaskPlotsRequest |
python | django__django | tests/defer/models.py | {
"start": 70,
"end": 192
} | class ____(models.Model):
first = models.CharField(max_length=50)
second = models.CharField(max_length=50)
| Secondary |
python | huggingface__transformers | src/transformers/models/wav2vec2/tokenization_wav2vec2.py | {
"start": 3303,
"end": 4365
} | class ____(ModelOutput):
"""
Output type of [` Wav2Vec2CTCTokenizer`], with transcription.
Args:
text (list of `str` or `str`):
Decoded logits in text from. Usually the speech transcription.
char_offsets (list of `list[dict[str, Union[int, str]]]` or `list[dict[str, Union[int, str]]]`):
Offsets of the decoded characters. In combination with sampling rate and model downsampling rate char
offsets can be used to compute time stamps for each character. Total logit score of the beam associated with
produced text.
word_offsets (list of `list[dict[str, Union[int, str]]]` or `list[dict[str, Union[int, str]]]`):
Offsets of the decoded words. In combination with sampling rate and model downsampling rate word offsets
can be used to compute time stamps for each word.
"""
text: Union[list[str], str]
char_offsets: Union[list[ListOfDict], ListOfDict] = None
word_offsets: Union[list[ListOfDict], ListOfDict] = None
| Wav2Vec2CTCTokenizerOutput |
python | ansible__ansible | test/units/plugins/connection/test_connection.py | {
"start": 943,
"end": 1493
} | class ____(ConnectionBase):
@property
def transport(self):
"""This method is never called by unit tests."""
def _connect(self):
"""This method is never called by unit tests."""
def exec_command(self):
"""This method is never called by unit tests."""
def put_file(self):
"""This method is never called by unit tests."""
def fetch_file(self):
"""This method is never called by unit tests."""
def close(self):
"""This method is never called by unit tests."""
| NoOpConnection |
python | kamyu104__LeetCode-Solutions | Python/binary-tree-zigzag-level-order-traversal.py | {
"start": 154,
"end": 787
} | class ____(object):
# @param root, a tree node
# @return a list of lists of integers
def zigzagLevelOrder(self, root):
if root is None:
return []
result, current = [], [root]
while current:
next_level, vals = [], []
for node in current:
vals.append(node.val)
if node.left:
next_level.append(node.left)
if node.right:
next_level.append(node.right)
result.append(vals[::-1] if len(result) % 2 else vals)
current = next_level
return result
| Solution |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/evaluator.py | {
"start": 3932,
"end": 5617
} | class ____(object):
"""Evaluates Python expressions using debug tensor values from a dump."""
def __init__(self, dump):
"""Constructor of ExpressionEvaluator.
Args:
dump: an instance of `DebugDumpDir`.
"""
self._dump = dump
self._cached_tensor_values = {}
def evaluate(self, expression):
"""Parse an expression.
Args:
expression: the expression to be parsed.
Returns:
The result of the evaluation.
Raises:
ValueError: If the value of one or more of the debug tensors in the
expression are not available.
"""
dump_tensors_iter = re.finditer(_DUMP_TENSOR_PATTERN, expression)
rewritten_expression = expression
for match in reversed(list(dump_tensors_iter)):
tensor_name = match.group(0)[1:-1].strip()
device_name, node_name, output_slot, debug_op, exec_index = (
_parse_debug_tensor_name(tensor_name))
if tensor_name not in self._cached_tensor_values:
try:
value = self._dump.get_tensors(
node_name, output_slot, debug_op,
device_name=device_name)[exec_index]
except debug_data.WatchKeyDoesNotExistInDebugDumpDirError:
raise ValueError(
"Eval failed due to the value of %s:%d:DebugIdentity being "
"unavailable" % (node_name, output_slot))
self._cached_tensor_values[tensor_name] = value
rewritten_expression = (
rewritten_expression[:match.start(0)] +
"self._cached_tensor_values['" + tensor_name + "']" +
rewritten_expression[match.end(0):])
return eval(rewritten_expression) # pylint: disable=eval-used
| ExpressionEvaluator |
python | huggingface__transformers | tests/models/sam3_video/test_modeling_sam3_video.py | {
"start": 1213,
"end": 23879
} | class ____(unittest.TestCase):
def setUp(self):
super().setUp()
checkpoint_path = "facebook/sam3"
self.video_model = Sam3VideoModel.from_pretrained(checkpoint_path).to(torch.float32)
self.processor = Sam3VideoProcessor.from_pretrained(checkpoint_path)
self.video_model.to(torch_device)
self.video_model.eval()
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
backend_empty_cache(torch_device)
def test_inference_video_propagate_with_text_prompt(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(
video=raw_video,
inference_device=torch_device,
processing_device="cpu",
video_storage_device="cpu",
)
# Add text prompt
text = "person"
inference_session = self.processor.add_text_prompt(
inference_session=inference_session,
text=text,
)
# Propagate through video frames
outputs_per_frame = {}
model_outputs_per_frame = {}
for model_outputs in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
max_frame_num_to_track=3,
):
processed_outputs = self.processor.postprocess_outputs(inference_session, model_outputs)
outputs_per_frame[model_outputs.frame_idx] = processed_outputs
model_outputs_per_frame[model_outputs.frame_idx] = model_outputs
# Check we processed the expected number of frames
self.assertGreaterEqual(len(outputs_per_frame), 1)
self.assertLessEqual(len(outputs_per_frame), 4) # frame 0 + up to 3 more
# Check output structure for each frame
for processed_outputs in outputs_per_frame.values():
self.assertIn("object_ids", processed_outputs)
self.assertIn("scores", processed_outputs)
self.assertIn("boxes", processed_outputs)
self.assertIn("masks", processed_outputs)
num_objects = len(processed_outputs["object_ids"])
if num_objects > 0:
self.assertEqual(processed_outputs["scores"].shape, (num_objects,))
self.assertEqual(processed_outputs["boxes"].shape, (num_objects, 4))
self.assertEqual(
processed_outputs["masks"].shape, (num_objects, raw_video.shape[-3], raw_video.shape[-2])
)
# Check boxes are in XYXY format (absolute coordinates)
boxes = processed_outputs["boxes"]
self.assertTrue(torch.all(boxes[:, 2] >= boxes[:, 0])) # x2 >= x1
self.assertTrue(torch.all(boxes[:, 3] >= boxes[:, 1])) # y2 >= y1
# Check numeric values for first frame
if len(outputs_per_frame) > 0:
first_frame_idx = min(outputs_per_frame.keys())
first_outputs = outputs_per_frame[first_frame_idx]
num_objects = len(first_outputs["object_ids"])
if num_objects > 0:
# Move outputs to CPU for comparison (postprocess_outputs may return CPU tensors)
object_ids = (
first_outputs["object_ids"].cpu()
if isinstance(first_outputs["object_ids"], torch.Tensor)
else torch.tensor(first_outputs["object_ids"])
)
scores = (
first_outputs["scores"].cpu()
if isinstance(first_outputs["scores"], torch.Tensor)
else torch.tensor(first_outputs["scores"])
)
boxes = (
first_outputs["boxes"].cpu()
if isinstance(first_outputs["boxes"], torch.Tensor)
else torch.tensor(first_outputs["boxes"])
)
masks = (
first_outputs["masks"].cpu()
if isinstance(first_outputs["masks"], torch.Tensor)
else torch.tensor(first_outputs["masks"])
)
torch.testing.assert_close(
object_ids,
torch.tensor([0, 1], dtype=torch.int64),
)
torch.testing.assert_close(
scores,
torch.tensor([0.968647837638855, 0.9736108779907227], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
boxes[0],
torch.tensor([146.0, 135.0, 291.0, 404.0], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
masks[0, :3, :3].float(),
torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
# Check raw model_outputs mask values for first frame
if len(model_outputs_per_frame) > 0:
first_frame_idx = min(model_outputs_per_frame.keys())
first_model_outputs = model_outputs_per_frame[first_frame_idx]
num_objects = len(first_model_outputs.object_ids)
if num_objects > 0:
# Check raw mask from model_outputs (low-resolution, before post-processing)
first_obj_id = first_model_outputs.object_ids[0]
raw_mask = first_model_outputs.obj_id_to_mask[first_obj_id].cpu()
torch.testing.assert_close(
raw_mask[:1, :3, :3].float(),
torch.tensor(
[
[
[-2.952317476272583, -5.94632625579834, -7.991223335266113],
[-6.916913986206055, -10.058566093444824, -11.114638328552246],
[-8.195585250854492, -9.787644386291504, -10.39273452758789],
]
],
dtype=torch.float32,
),
atol=5e-3, # Higher tolerance for raw logits
rtol=5e-3,
)
# Check numeric values for last frame (to verify propagation consistency)
if len(outputs_per_frame) > 1:
last_frame_idx = max(outputs_per_frame.keys())
last_outputs = outputs_per_frame[last_frame_idx]
num_objects = len(last_outputs["object_ids"])
if num_objects > 0:
# Move outputs to CPU for comparison
object_ids = (
last_outputs["object_ids"].cpu()
if isinstance(last_outputs["object_ids"], torch.Tensor)
else torch.tensor(last_outputs["object_ids"])
)
scores = (
last_outputs["scores"].cpu()
if isinstance(last_outputs["scores"], torch.Tensor)
else torch.tensor(last_outputs["scores"])
)
boxes = (
last_outputs["boxes"].cpu()
if isinstance(last_outputs["boxes"], torch.Tensor)
else torch.tensor(last_outputs["boxes"])
)
masks = (
last_outputs["masks"].cpu()
if isinstance(last_outputs["masks"], torch.Tensor)
else torch.tensor(last_outputs["masks"])
)
torch.testing.assert_close(
object_ids,
torch.tensor([0, 1], dtype=torch.int64),
)
torch.testing.assert_close(
scores,
torch.tensor([0.968647837638855, 0.9736108779907227], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
boxes[0],
torch.tensor([157.0, 116.0, 295.0, 382.0], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
masks[0, :3, :3].float(),
torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
# Check raw model_outputs mask values for last frame
if len(model_outputs_per_frame) > 1:
last_frame_idx = max(model_outputs_per_frame.keys())
last_model_outputs = model_outputs_per_frame[last_frame_idx]
num_objects = len(last_model_outputs.object_ids)
if num_objects > 0:
# Check raw mask from model_outputs (low-resolution, before post-processing)
first_obj_id = last_model_outputs.object_ids[0]
raw_mask = last_model_outputs.obj_id_to_mask[first_obj_id].cpu()
torch.testing.assert_close(
raw_mask[:1, :3, :3].float(),
torch.tensor(
[
[
[-23.023313522338867, -27.02887535095215, -22.29985237121582],
[-24.373233795166016, -31.428438186645508, -24.268810272216797],
[-24.550016403198242, -32.607383728027344, -26.500947952270508],
]
],
dtype=torch.float32,
),
atol=5e-3, # Higher tolerance for raw logits
rtol=5e-3,
)
def test_inference_video_streaming_with_text_prompt(self):
raw_video = prepare_video()
# Initialize session for streaming (no video provided)
inference_session = self.processor.init_video_session(
inference_device=torch_device,
processing_device="cpu",
video_storage_device="cpu",
)
# Add text prompt
text = "person"
inference_session = self.processor.add_text_prompt(
inference_session=inference_session,
text=text,
)
# Process frames one by one (streaming mode)
outputs_per_frame = {}
model_outputs_per_frame = {}
max_frame_num_to_track = 3
for frame_idx, frame in enumerate(raw_video):
if frame_idx >= max_frame_num_to_track:
break
# Process frame using processor
inputs = self.processor(images=frame, device=torch_device, return_tensors="pt")
# Process frame using streaming inference
model_outputs = self.video_model(
inference_session=inference_session,
frame=inputs.pixel_values[0], # Provide processed frame - this enables streaming mode
reverse=False,
)
# Post-process outputs with original_sizes for proper resolution handling
processed_outputs = self.processor.postprocess_outputs(
inference_session,
model_outputs,
original_sizes=inputs.original_sizes, # Required for streaming inference
)
outputs_per_frame[frame_idx] = processed_outputs
model_outputs_per_frame[frame_idx] = model_outputs
# Check we processed the expected number of frames
self.assertEqual(len(outputs_per_frame), max_frame_num_to_track)
# Check output structure for each frame
for frame_idx, processed_outputs in outputs_per_frame.items():
self.assertIn("object_ids", processed_outputs)
self.assertIn("scores", processed_outputs)
self.assertIn("boxes", processed_outputs)
self.assertIn("masks", processed_outputs)
num_objects = len(processed_outputs["object_ids"])
if num_objects > 0:
self.assertEqual(processed_outputs["scores"].shape, (num_objects,))
self.assertEqual(processed_outputs["boxes"].shape, (num_objects, 4))
# For streaming, masks should be at original frame resolution
H_orig, W_orig = raw_video[frame_idx].shape[0], raw_video[frame_idx].shape[1]
self.assertEqual(processed_outputs["masks"].shape, (num_objects, H_orig, W_orig))
# Check boxes are in XYXY format (absolute coordinates)
boxes = processed_outputs["boxes"]
self.assertTrue(torch.all(boxes[:, 2] >= boxes[:, 0])) # x2 >= x1
self.assertTrue(torch.all(boxes[:, 3] >= boxes[:, 1])) # y2 >= y1
# Check numeric values for first frame
if len(outputs_per_frame) > 0:
first_frame_idx = min(outputs_per_frame.keys())
first_outputs = outputs_per_frame[first_frame_idx]
num_objects = len(first_outputs["object_ids"])
if num_objects > 0:
# Move outputs to CPU for comparison (postprocess_outputs may return CPU tensors)
object_ids = (
first_outputs["object_ids"].cpu()
if isinstance(first_outputs["object_ids"], torch.Tensor)
else torch.tensor(first_outputs["object_ids"])
)
scores = (
first_outputs["scores"].cpu()
if isinstance(first_outputs["scores"], torch.Tensor)
else torch.tensor(first_outputs["scores"])
)
boxes = (
first_outputs["boxes"].cpu()
if isinstance(first_outputs["boxes"], torch.Tensor)
else torch.tensor(first_outputs["boxes"])
)
masks = (
first_outputs["masks"].cpu()
if isinstance(first_outputs["masks"], torch.Tensor)
else torch.tensor(first_outputs["masks"])
)
torch.testing.assert_close(
object_ids,
torch.tensor([0, 1], dtype=torch.int64),
)
torch.testing.assert_close(
scores,
torch.tensor([0.9683944582939148, 0.9740181565284729], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
boxes[0],
torch.tensor([146.0, 135.0, 291.0, 404.0], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
masks[0, :3, :3].float(),
torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
# Check raw model_outputs mask values for first frame
if len(model_outputs_per_frame) > 0:
first_frame_idx = min(model_outputs_per_frame.keys())
first_model_outputs = model_outputs_per_frame[first_frame_idx]
num_objects = len(first_model_outputs.object_ids)
if num_objects > 0:
# Check raw mask from model_outputs (low-resolution, before post-processing)
first_obj_id = first_model_outputs.object_ids[0]
raw_mask = first_model_outputs.obj_id_to_mask[first_obj_id].cpu()
torch.testing.assert_close(
raw_mask[:1, :3, :3].float(),
torch.tensor(
[
[
[-2.987567901611328, -5.944897651672363, -7.973854064941406],
[-7.017378330230713, -10.088018417358398, -11.089308738708496],
[-8.274458885192871, -9.851463317871094, -10.428947448730469],
]
],
dtype=torch.float32,
),
atol=5e-3, # Higher tolerance for raw logits
rtol=5e-3,
)
# Check numeric values for last frame (to verify propagation consistency)
if len(outputs_per_frame) > 1:
last_frame_idx = max(outputs_per_frame.keys())
last_outputs = outputs_per_frame[last_frame_idx]
num_objects = len(last_outputs["object_ids"])
if num_objects > 0:
# Move outputs to CPU for comparison
object_ids = (
last_outputs["object_ids"].cpu()
if isinstance(last_outputs["object_ids"], torch.Tensor)
else torch.tensor(last_outputs["object_ids"])
)
scores = (
last_outputs["scores"].cpu()
if isinstance(last_outputs["scores"], torch.Tensor)
else torch.tensor(last_outputs["scores"])
)
boxes = (
last_outputs["boxes"].cpu()
if isinstance(last_outputs["boxes"], torch.Tensor)
else torch.tensor(last_outputs["boxes"])
)
masks = (
last_outputs["masks"].cpu()
if isinstance(last_outputs["masks"], torch.Tensor)
else torch.tensor(last_outputs["masks"])
)
torch.testing.assert_close(
object_ids,
torch.tensor([0, 1], dtype=torch.int64),
)
torch.testing.assert_close(
scores,
torch.tensor([0.9683944582939148, 0.9740181565284729], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
boxes[0],
torch.tensor([154.0, 117.0, 294.0, 395.0], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
masks[0, :3, :3].float(),
torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=torch.float32),
atol=1e-4,
rtol=1e-4,
)
# Check raw model_outputs mask values for last frame
if len(model_outputs_per_frame) > 1:
last_frame_idx = max(model_outputs_per_frame.keys())
last_model_outputs = model_outputs_per_frame[last_frame_idx]
num_objects = len(last_model_outputs.object_ids)
if num_objects > 0:
# Check raw mask from model_outputs (low-resolution, before post-processing)
first_obj_id = last_model_outputs.object_ids[0]
raw_mask = last_model_outputs.obj_id_to_mask[first_obj_id].cpu()
torch.testing.assert_close(
raw_mask[:1, :3, :3].float(),
torch.tensor(
[
[
[-23.935535430908203, -27.967025756835938, -23.519914627075195],
[-25.742399215698242, -32.65046310424805, -24.71213150024414],
[-25.263212203979492, -33.807132720947266, -27.463823318481445],
]
],
dtype=torch.float32,
),
atol=5e-3, # Higher tolerance for raw logits
rtol=5e-3,
)
def test_inference_video_multi_prompt(self):
"""Test multi-prompt tracking - detecting multiple object categories in one pass."""
raw_video = prepare_video()
inference_session = self.processor.init_video_session(
video=raw_video,
inference_device=torch_device,
processing_device="cpu",
video_storage_device="cpu",
)
# Add multiple text prompts
prompts = ["person", "bed"]
self.processor.add_text_prompt(
inference_session=inference_session,
text=prompts,
)
# Propagate through video frames
outputs_per_frame = {}
for model_outputs in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
max_frame_num_to_track=3,
):
processed_outputs = self.processor.postprocess_outputs(inference_session, model_outputs)
outputs_per_frame[model_outputs.frame_idx] = processed_outputs
# Check we processed the expected number of frames
self.assertGreaterEqual(len(outputs_per_frame), 1)
self.assertLessEqual(len(outputs_per_frame), 4)
# Check output structure for each frame
for processed_outputs in outputs_per_frame.values():
self.assertIn("object_ids", processed_outputs)
self.assertIn("scores", processed_outputs)
self.assertIn("boxes", processed_outputs)
self.assertIn("masks", processed_outputs)
self.assertIn("prompt_to_obj_ids", processed_outputs) # Multi-prompt specific
# Check prompt_to_obj_ids structure
prompt_to_obj_ids = processed_outputs["prompt_to_obj_ids"]
self.assertIsInstance(prompt_to_obj_ids, dict)
for prompt, obj_ids in prompt_to_obj_ids.items():
self.assertIsInstance(prompt, str)
self.assertIsInstance(obj_ids, list)
# Each object ID should be in the main object_ids list
for obj_id in obj_ids:
self.assertIn(obj_id, processed_outputs["object_ids"].tolist())
# Check that we detected objects from multiple prompts
first_frame_outputs = outputs_per_frame[min(outputs_per_frame.keys())]
prompt_to_obj_ids = first_frame_outputs["prompt_to_obj_ids"]
# Should have at least one prompt with detections
self.assertGreater(len(prompt_to_obj_ids), 0)
# All prompts in prompt_to_obj_ids should be from our original prompts
for prompt in prompt_to_obj_ids.keys():
self.assertIn(prompt, prompts)
| Sam3VideoModelIntegrationTest |
python | fluentpython__example-code | 14-it-generator/isis2json/iso2709.py | {
"start": 1126,
"end": 2215
} | class ____(object):
def __init__(self, filename, encoding = DEFAULT_ENCODING):
self.file = open(filename, 'rb')
self.encoding = encoding
def __iter__(self):
return self
def next(self):
return IsoRecord(self)
__next__ = next # Python 3 compatibility
def read(self, size):
''' read and drop all CR and LF characters '''
# TODO: this is inneficient but works, patches accepted!
# NOTE: our fixtures include files which have no linebreaks,
# files with CR-LF linebreaks and files with LF linebreaks
chunks = []
count = 0
while count < size:
chunk = self.file.read(size-count)
if len(chunk) == 0:
break
chunk = chunk.replace(CR+LF,'')
if CR in chunk:
chunk = chunk.replace(CR,'')
if LF in chunk:
chunk = chunk.replace(LF,'')
count += len(chunk)
chunks.append(chunk)
return ''.join(chunks)
def close(self):
self.file.close()
| IsoFile |
python | scrapy__scrapy | scrapy/exceptions.py | {
"start": 960,
"end": 1354
} | class ____(Exception):
"""
Stop the download of the body for a given response.
The 'fail' boolean parameter indicates whether or not the resulting partial response
should be handled by the request errback. Note that 'fail' is a keyword-only argument.
"""
def __init__(self, *, fail: bool = True):
super().__init__()
self.fail = fail
# Items
| StopDownload |
python | apache__avro | lang/py/avro/schema.py | {
"start": 27712,
"end": 30380
} | class ____(EqualByJsonMixin, Schema):
"""
names is a dictionary of schema objects
"""
def __init__(self, schemas, names=None, validate_names: bool = True):
# Ensure valid ctor args
if not isinstance(schemas, list):
fail_msg = "Union schema requires a list of schemas."
raise avro.errors.SchemaParseException(fail_msg)
# Call parent ctor
Schema.__init__(self, "union", validate_names=validate_names)
# Add class members
schema_objects: List[Schema] = []
for schema in schemas:
if isinstance(schema, str) and names.has_name(schema, None):
new_schema = names.get_name(schema, None)
else:
try:
new_schema = make_avsc_object(schema, names, validate_names=self.validate_names)
except Exception as e:
raise avro.errors.SchemaParseException(f"Union item must be a valid Avro schema: {e}")
# check the new schema
if (
new_schema.type in avro.constants.VALID_TYPES
and new_schema.type not in avro.constants.NAMED_TYPES
and new_schema.type in [schema.type for schema in schema_objects]
):
raise avro.errors.SchemaParseException(f"{new_schema.type} type already in Union")
elif new_schema.type == "union":
raise avro.errors.SchemaParseException("Unions cannot contain other unions.")
else:
schema_objects.append(new_schema)
self._schemas = schema_objects
# read-only properties
@property
def schemas(self):
return self._schemas
def match(self, writer):
"""Return True if the current schema (as reader) matches the writer schema.
@arg writer: the schema to match against
@return bool
"""
return writer.type in {"union", "error_union"} or any(s.match(writer) for s in self.schemas)
def to_json(self, names=None):
names = names or Names(validate_names=self.validate_names)
to_dump = []
for schema in self.schemas:
to_dump.append(schema.to_json(names))
return to_dump
def to_canonical_json(self, names=None):
names = names or Names(validate_names=self.validate_names)
return [schema.to_canonical_json(names) for schema in self.schemas]
def validate(self, datum):
"""Return the first branch schema of which datum is a valid example, else None."""
return next((branch for branch in self.schemas if branch.validate(datum) is not None), None)
| UnionSchema |
python | jd__tenacity | tests/test_tenacity.py | {
"start": 28782,
"end": 29251
} | class ____:
"""Holds counter state for invoking a method several times in a row."""
def __init__(self, count):
self.counter = 0
self.count = count
def go(self):
"""Raise a NameError until after count threshold has been crossed.
Then return True.
"""
if self.counter < self.count:
self.counter += 1
raise NameError("Hi there, I'm a NameError")
return True
| NoNameErrorAfterCount |
python | kamyu104__LeetCode-Solutions | Python/removing-minimum-number-of-magic-beans.py | {
"start": 40,
"end": 275
} | class ____(object):
def minimumRemoval(self, beans):
"""
:type beans: List[int]
:rtype: int
"""
beans.sort()
return sum(beans) - max(x*(len(beans)-i)for i, x in enumerate(beans))
| Solution |
python | pandas-dev__pandas | pandas/core/indexes/period.py | {
"start": 2003,
"end": 18813
} | class ____(DatetimeIndexOpsMixin):
"""
Immutable ndarray holding ordinal values indicating regular periods in time.
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1d int np.ndarray or PeriodArray), optional
Optional period-like data to construct index with.
freq : str or period object, optional
One of pandas period strings or corresponding objects.
dtype : str or PeriodDtype, default None
A dtype from which to extract a freq.
copy : bool
Make a copy of input ndarray.
name : str, default None
Name of the resulting PeriodIndex.
Attributes
----------
day
dayofweek
day_of_week
dayofyear
day_of_year
days_in_month
daysinmonth
end_time
freq
freqstr
hour
is_leap_year
minute
month
quarter
qyear
second
start_time
week
weekday
weekofyear
year
Methods
-------
asfreq
strftime
to_timestamp
from_fields
from_ordinals
Raises
------
ValueError
Passing the parameter data as a list without specifying either freq or
dtype will raise a ValueError: "freq not specified and cannot be inferred"
See Also
--------
Index : The base pandas Index type.
Period : Represents a period of time.
DatetimeIndex : Index with datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
period_range : Create a fixed-frequency PeriodIndex.
Examples
--------
>>> idx = pd.PeriodIndex(data=["2000Q1", "2002Q3"], freq="Q")
>>> idx
PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]')
"""
_typ = "periodindex"
_data: PeriodArray
freq: BaseOffset
dtype: PeriodDtype
_data_cls = PeriodArray
_supports_partial_string_indexing = True
@property
def _engine_type(self) -> type[libindex.PeriodEngine]:
return libindex.PeriodEngine
@cache_readonly
def _resolution_obj(self) -> Resolution:
# for compat with DatetimeIndex
return self.dtype._resolution_obj
# --------------------------------------------------------------------
# methods that dispatch to array and wrap result in Index
# These are defined here instead of via inherit_names for mypy
@doc(
PeriodArray.asfreq,
other="arrays.PeriodArray",
other_name="PeriodArray",
**_shared_doc_kwargs,
)
def asfreq(self, freq=None, how: str = "E") -> Self:
arr = self._data.asfreq(freq, how)
return type(self)._simple_new(arr, name=self.name)
@doc(PeriodArray.to_timestamp)
def to_timestamp(self, freq=None, how: str = "start") -> DatetimeIndex:
arr = self._data.to_timestamp(freq, how)
return DatetimeIndex._simple_new(arr, name=self.name)
@property
@doc(PeriodArray.hour.fget)
def hour(self) -> Index:
return Index(self._data.hour, name=self.name)
@property
@doc(PeriodArray.minute.fget)
def minute(self) -> Index:
return Index(self._data.minute, name=self.name)
@property
@doc(PeriodArray.second.fget)
def second(self) -> Index:
return Index(self._data.second, name=self.name)
# ------------------------------------------------------------------------
# Index Constructors
def __new__(
cls,
data=None,
freq=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable | None = None,
) -> Self:
refs = None
if not copy and isinstance(data, (Index, ABCSeries)):
refs = data._references
name = maybe_extract_name(name, data, cls)
freq = validate_dtype_freq(dtype, freq)
# PeriodIndex allow PeriodIndex(period_index, freq=different)
# Let's not encourage that kind of behavior in PeriodArray.
if freq and isinstance(data, cls) and data.freq != freq:
# TODO: We can do some of these with no-copy / coercion?
# e.g. D -> 2D seems to be OK
data = data.asfreq(freq)
# don't pass copy here, since we copy later.
data = period_array(data=data, freq=freq)
if copy:
data = data.copy()
return cls._simple_new(data, name=name, refs=refs)
@classmethod
def from_fields(
cls,
*,
year=None,
quarter=None,
month=None,
day=None,
hour=None,
minute=None,
second=None,
freq=None,
) -> Self:
"""
Construct a PeriodIndex from fields (year, month, day, etc.).
Parameters
----------
year : int, array, or Series, default None
Year for the PeriodIndex.
quarter : int, array, or Series, default None
Quarter for the PeriodIndex.
month : int, array, or Series, default None
Month for the PeriodIndex.
day : int, array, or Series, default None
Day for the PeriodIndex.
hour : int, array, or Series, default None
Hour for the PeriodIndex.
minute : int, array, or Series, default None
Minute for the PeriodIndex.
second : int, array, or Series, default None
Second for the PeriodIndex.
freq : str or period object, optional
One of pandas period strings or corresponding objects.
Returns
-------
PeriodIndex
See Also
--------
PeriodIndex.from_ordinals : Construct a PeriodIndex from ordinals.
PeriodIndex.to_timestamp : Cast to DatetimeArray/Index.
Examples
--------
>>> idx = pd.PeriodIndex.from_fields(year=[2000, 2002], quarter=[1, 3])
>>> idx
PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]')
"""
fields = {
"year": year,
"quarter": quarter,
"month": month,
"day": day,
"hour": hour,
"minute": minute,
"second": second,
}
fields = {key: value for key, value in fields.items() if value is not None}
arr = PeriodArray._from_fields(fields=fields, freq=freq)
return cls._simple_new(arr)
@classmethod
def from_ordinals(cls, ordinals, *, freq, name=None) -> Self:
"""
Construct a PeriodIndex from ordinals.
Parameters
----------
ordinals : array-like of int
The period offsets from the proleptic Gregorian epoch.
freq : str or period object
One of pandas period strings or corresponding objects.
name : str, default None
Name of the resulting PeriodIndex.
Returns
-------
PeriodIndex
See Also
--------
PeriodIndex.from_fields : Construct a PeriodIndex from fields
(year, month, day, etc.).
PeriodIndex.to_timestamp : Cast to DatetimeArray/Index.
Examples
--------
>>> idx = pd.PeriodIndex.from_ordinals([-1, 0, 1], freq="Q")
>>> idx
PeriodIndex(['1969Q4', '1970Q1', '1970Q2'], dtype='period[Q-DEC]')
"""
ordinals = np.asarray(ordinals, dtype=np.int64)
dtype = PeriodDtype(freq)
data = PeriodArray._simple_new(ordinals, dtype=dtype)
return cls._simple_new(data, name=name)
# ------------------------------------------------------------------------
# Data
@property
def values(self) -> npt.NDArray[np.object_]:
return np.asarray(self, dtype=object)
def _maybe_convert_timedelta(self, other) -> int | npt.NDArray[np.int64]:
"""
Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError.
"""
if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):
if isinstance(self.freq, (Tick, Day)):
# _check_timedeltalike_freq_compat will raise if incompatible
delta = self._data._check_timedeltalike_freq_compat(other)
return delta
elif isinstance(other, BaseOffset):
if other.base == self.freq.base:
return other.n
raise raise_on_incompatible(self, other)
elif is_integer(other):
assert isinstance(other, int)
return other
# raise when input doesn't have freq
raise raise_on_incompatible(self, None)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
return self.dtype == dtype
# ------------------------------------------------------------------------
# Index Methods
def asof_locs(self, where: Index, mask: npt.NDArray[np.bool_]) -> np.ndarray:
"""
where : array of timestamps
mask : np.ndarray[bool]
Array of booleans where data is not NA.
"""
if isinstance(where, DatetimeIndex):
where = PeriodIndex(where._values, freq=self.freq)
elif not isinstance(where, PeriodIndex):
raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex")
return super().asof_locs(where, mask)
@property
def is_full(self) -> bool:
"""
Returns True if this PeriodIndex is range-like in that all Periods
between start and end are present, in order.
"""
if len(self) == 0:
return True
if not self.is_monotonic_increasing:
raise ValueError("Index is not monotonic")
values = self.asi8
return bool(((values[1:] - values[:-1]) < 2).all())
@property
def inferred_type(self) -> str:
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return "period"
# ------------------------------------------------------------------------
# Indexing Methods
def _convert_tolerance(self, tolerance, target):
# Returned tolerance must be in dtype/units so that
# `|self._get_engine_target() - target._engine_target()| <= tolerance`
# is meaningful. Since PeriodIndex returns int64 for engine_target,
# we may need to convert timedelta64 tolerance to int64.
tolerance = super()._convert_tolerance(tolerance, target)
if self.dtype == target.dtype:
# convert tolerance to i8
tolerance = self._maybe_convert_timedelta(tolerance)
return tolerance
def get_loc(self, key):
"""
Get integer location for requested label.
Parameters
----------
key : Period, NaT, str, or datetime
String or datetime key must be parsable as Period.
Returns
-------
loc : int or ndarray[int64]
Raises
------
KeyError
Key is not present in the index.
TypeError
If key is listlike or otherwise not hashable.
"""
orig_key = key
self._check_indexing_error(key)
if is_valid_na_for_dtype(key, self.dtype):
key = NaT
elif isinstance(key, str):
try:
parsed, reso = self._parse_with_reso(key)
except ValueError as err:
# A string with invalid format
raise KeyError(f"Cannot interpret '{key}' as period") from err
if self._can_partial_date_slice(reso):
try:
return self._partial_date_slice(reso, parsed)
except KeyError as err:
raise KeyError(key) from err
if reso == self._resolution_obj:
# the reso < self._resolution_obj case goes
# through _get_string_slice
key = self._cast_partial_indexing_scalar(parsed)
else:
raise KeyError(key)
elif isinstance(key, Period):
self._disallow_mismatched_indexing(key)
elif isinstance(key, datetime):
key = self._cast_partial_indexing_scalar(key)
else:
# in particular integer, which Period constructor would cast to string
raise KeyError(key)
try:
return Index.get_loc(self, key)
except KeyError as err:
raise KeyError(orig_key) from err
def _disallow_mismatched_indexing(self, key: Period) -> None:
if key._dtype != self.dtype:
raise KeyError(key)
def _cast_partial_indexing_scalar(self, label: datetime) -> Period:
try:
period = Period(label, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(label) from err
return period
@doc(DatetimeIndexOpsMixin._maybe_cast_slice_bound)
def _maybe_cast_slice_bound(self, label, side: str):
if isinstance(label, datetime):
label = self._cast_partial_indexing_scalar(label)
return super()._maybe_cast_slice_bound(label, side)
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
freq = OFFSET_TO_PERIOD_FREQSTR.get(reso.attr_abbrev, reso.attr_abbrev)
iv = Period(parsed, freq=freq)
return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
@doc(DatetimeIndexOpsMixin.shift)
def shift(self, periods: int = 1, freq=None) -> Self:
if freq is not None:
raise TypeError(
f"`freq` argument is not supported for {type(self).__name__}.shift"
)
return self + periods
@set_module("pandas")
def period_range(
start=None,
end=None,
periods: int | None = None,
freq=None,
name: Hashable | None = None,
) -> PeriodIndex:
"""
Return a fixed frequency PeriodIndex.
The day (calendar) is the default frequency.
Parameters
----------
start : str, datetime, date, pandas.Timestamp, or period-like, default None
Left bound for generating periods.
end : str, datetime, date, pandas.Timestamp, or period-like, default None
Right bound for generating periods.
periods : int, default None
Number of periods to generate.
freq : str or DateOffset, optional
Frequency alias. By default the freq is taken from `start` or `end`
if those are Period objects. Otherwise, the default is ``"D"`` for
daily frequency.
name : str, default None
Name of the resulting PeriodIndex.
Returns
-------
PeriodIndex
A PeriodIndex of fixed frequency periods.
See Also
--------
date_range : Returns a fixed frequency DatetimeIndex.
Period : Represents a period of time.
PeriodIndex : Immutable ndarray holding ordinal values indicating regular periods
in time.
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see
:ref:`this link<timeseries.offset_aliases>`.
Examples
--------
>>> pd.period_range(start="2017-01-01", end="2018-01-01", freq="M")
PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06',
'2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12',
'2018-01'],
dtype='period[M]')
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
``period_range`` constructor.
>>> pd.period_range(
... start=pd.Period("2017Q1", freq="Q"),
... end=pd.Period("2017Q2", freq="Q"),
... freq="M",
... )
PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
dtype='period[M]')
"""
if com.count_not_none(start, end, periods) != 2:
raise ValueError(
"Of the three parameters: start, end, and periods, "
"exactly two must be specified"
)
if freq is None and (not isinstance(start, Period) and not isinstance(end, Period)):
freq = "D"
data, freq = PeriodArray._generate_range(start, end, periods, freq)
dtype = PeriodDtype(freq)
data = PeriodArray(data, dtype=dtype)
return PeriodIndex(data, name=name)
| PeriodIndex |
python | openai__openai-python | src/openai/types/responses/response_computer_tool_call_param.py | {
"start": 1484,
"end": 1631
} | class ____(TypedDict, total=False):
x: Required[int]
"""The x-coordinate."""
y: Required[int]
"""The y-coordinate."""
| ActionDragPath |
python | spack__spack | lib/spack/spack/cmd/__init__.py | {
"start": 20998,
"end": 21246
} | class ____(spack.error.SpackError):
"""Exception class thrown for impermissible python names"""
def __init__(self, name):
self.name = name
super().__init__("{0} is not a permissible Python name.".format(name))
| PythonNameError |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/scaffold/branch/ai.py | {
"start": 6658,
"end": 8905
} | class ____:
"""Output channel that prints to stdout using click.echo."""
def write(self, text: str) -> None:
click.echo(text)
@contextmanager
def enter_waiting_phase(phase_name: str, spin: bool = True) -> Iterator["OutputChannel"]:
"""Enter a phase of non interactivity where we wait for the CLI agent to complete its work.
This yields an OutputChannel that coordinates with a loading indicator unless spin is disabled.
"""
if spin:
with daggy_spinner_context(phase_name) as spinner:
yield spinner
else:
channel = PrintOutputChannel()
channel.write(phase_name)
yield channel
def scaffold_content_for_plan(
plan: str,
input_type: type[InputType],
diagnostics: ClaudeDiagnostics,
verbose: bool,
model: ModelType,
use_spinner: bool = True,
) -> None:
"""Scaffolds content from the plan generated by the planning phase."""
ensure_claude_sdk_python_version()
from claude_code_sdk.types import ResultMessage
from dagster_dg_cli.cli.scaffold.branch.claude.sdk_client import ClaudeSDKClient
prompt = load_scaffolding_prompt(plan)
allowed_tools = get_allowed_commands_scaffolding() + input_type.additional_allowed_tools()
with enter_waiting_phase("Scaffolding", spin=use_spinner) as channel:
with diagnostics.claude_operation(
operation_name="content_scaffolding",
error_code="content_scaffolding_failed",
error_message="Content scaffolding failed with SDK",
):
claude_sdk = ClaudeSDKClient(diagnostics)
messages = asyncio.run(
claude_sdk.scaffold_with_streaming(
prompt=prompt,
model=model,
allowed_tools=allowed_tools,
output_channel=channel,
disallowed_tools=["Bash(python:*)", "WebSearch", "WebFetch"],
verbose=verbose,
)
)
for message in messages:
if isinstance(message, ResultMessage):
click.echo(
f"✅ Scaffolding completed (${message.total_cost_usd:.2f}, {format_duration(message.duration_ms)})."
)
| PrintOutputChannel |
python | sympy__sympy | sympy/utilities/decorator.py | {
"start": 3055,
"end": 11184
} | class ____:
"""Don't 'inherit' certain attributes from a base class
>>> from sympy.utilities.decorator import no_attrs_in_subclass
>>> class A(object):
... x = 'test'
>>> A.x = no_attrs_in_subclass(A, A.x)
>>> class B(A):
... pass
>>> hasattr(A, 'x')
True
>>> hasattr(B, 'x')
False
"""
def __init__(self, cls, f):
self.cls = cls
self.f = f
def __get__(self, instance, owner=None):
if owner == self.cls:
if hasattr(self.f, '__get__'):
return self.f.__get__(instance, owner)
return self.f
raise AttributeError
def doctest_depends_on(exe=None, modules=None, disable_viewers=None,
python_version=None, ground_types=None):
"""
Adds metadata about the dependencies which need to be met for doctesting
the docstrings of the decorated objects.
``exe`` should be a list of executables
``modules`` should be a list of modules
``disable_viewers`` should be a list of viewers for :func:`~sympy.printing.preview.preview` to disable
``python_version`` should be the minimum Python version required, as a tuple
(like ``(3, 0)``)
"""
dependencies = {}
if exe is not None:
dependencies['executables'] = exe
if modules is not None:
dependencies['modules'] = modules
if disable_viewers is not None:
dependencies['disable_viewers'] = disable_viewers
if python_version is not None:
dependencies['python_version'] = python_version
if ground_types is not None:
dependencies['ground_types'] = ground_types
def skiptests():
from sympy.testing.runtests import DependencyError, SymPyDocTests, PyTestReporter # lazy import
r = PyTestReporter()
t = SymPyDocTests(r, None)
try:
t._check_dependencies(**dependencies)
except DependencyError:
return True # Skip doctests
else:
return False # Run doctests
def depends_on_deco(fn):
fn._doctest_depends_on = dependencies
fn.__doctest_skip__ = skiptests
if inspect.isclass(fn):
fn._doctest_depdends_on = no_attrs_in_subclass(
fn, fn._doctest_depends_on)
fn.__doctest_skip__ = no_attrs_in_subclass(
fn, fn.__doctest_skip__)
return fn
return depends_on_deco
def public(obj: T) -> T:
"""
Append ``obj``'s name to global ``__all__`` variable (call site).
By using this decorator on functions or classes you achieve the same goal
as by filling ``__all__`` variables manually, you just do not have to repeat
yourself (object's name). You also know if object is public at definition
site, not at some random location (where ``__all__`` was set).
Note that in multiple decorator setup (in almost all cases) ``@public``
decorator must be applied before any other decorators, because it relies
on the pointer to object's global namespace. If you apply other decorators
first, ``@public`` may end up modifying the wrong namespace.
Examples
========
>>> from sympy.utilities.decorator import public
>>> __all__ # noqa: F821
Traceback (most recent call last):
...
NameError: name '__all__' is not defined
>>> @public
... def some_function():
... pass
>>> __all__ # noqa: F821
['some_function']
"""
if isinstance(obj, types.FunctionType):
ns = obj.__globals__
name = obj.__name__
elif isinstance(obj, (type(type), type)):
ns = sys.modules[obj.__module__].__dict__
name = obj.__name__
else:
raise TypeError("expected a function or a class, got %s" % obj)
if "__all__" not in ns:
ns["__all__"] = [name]
else:
ns["__all__"].append(name)
return obj
def memoize_property(propfunc):
"""Property decorator that caches the value of potentially expensive
``propfunc`` after the first evaluation. The cached value is stored in
the corresponding property name with an attached underscore."""
attrname = '_' + propfunc.__name__
sentinel = object()
@wraps(propfunc)
def accessor(self):
val = getattr(self, attrname, sentinel)
if val is sentinel:
val = propfunc(self)
setattr(self, attrname, val)
return val
return property(accessor)
def deprecated(message, *, deprecated_since_version,
active_deprecations_target, stacklevel=3):
'''
Mark a function as deprecated.
This decorator should be used if an entire function or class is
deprecated. If only a certain functionality is deprecated, you should use
:func:`~.warns_deprecated_sympy` directly. This decorator is just a
convenience. There is no functional difference between using this
decorator and calling ``warns_deprecated_sympy()`` at the top of the
function.
The decorator takes the same arguments as
:func:`~.warns_deprecated_sympy`. See its
documentation for details on what the keywords to this decorator do.
See the :ref:`deprecation-policy` document for details on when and how
things should be deprecated in SymPy.
Examples
========
>>> from sympy.utilities.decorator import deprecated
>>> from sympy import simplify
>>> @deprecated("""\
... The simplify_this(expr) function is deprecated. Use simplify(expr)
... instead.""", deprecated_since_version="1.1",
... active_deprecations_target='simplify-this-deprecation')
... def simplify_this(expr):
... """
... Simplify ``expr``.
...
... .. deprecated:: 1.1
...
... The ``simplify_this`` function is deprecated. Use :func:`simplify`
... instead. See its documentation for more information. See
... :ref:`simplify-this-deprecation` for details.
...
... """
... return simplify(expr)
>>> from sympy.abc import x
>>> simplify_this(x*(x + 1) - x**2) # doctest: +SKIP
<stdin>:1: SymPyDeprecationWarning:
<BLANKLINE>
The simplify_this(expr) function is deprecated. Use simplify(expr)
instead.
<BLANKLINE>
See https://docs.sympy.org/latest/explanation/active-deprecations.html#simplify-this-deprecation
for details.
<BLANKLINE>
This has been deprecated since SymPy version 1.1. It
will be removed in a future version of SymPy.
<BLANKLINE>
simplify_this(x)
x
See Also
========
sympy.utilities.exceptions.SymPyDeprecationWarning
sympy.utilities.exceptions.sympy_deprecation_warning
sympy.utilities.exceptions.ignore_warnings
sympy.testing.pytest.warns_deprecated_sympy
'''
decorator_kwargs = {"deprecated_since_version": deprecated_since_version,
"active_deprecations_target": active_deprecations_target}
def deprecated_decorator(wrapped):
if hasattr(wrapped, '__mro__'): # wrapped is actually a class
class wrapper(wrapped):
__doc__ = wrapped.__doc__
__module__ = wrapped.__module__
_sympy_deprecated_func = wrapped
if '__new__' in wrapped.__dict__:
def __new__(cls, *args, **kwargs):
sympy_deprecation_warning(message, **decorator_kwargs, stacklevel=stacklevel)
return super().__new__(cls, *args, **kwargs)
else:
def __init__(self, *args, **kwargs):
sympy_deprecation_warning(message, **decorator_kwargs, stacklevel=stacklevel)
super().__init__(*args, **kwargs)
wrapper.__name__ = wrapped.__name__
else:
@wraps(wrapped)
def wrapper(*args, **kwargs):
sympy_deprecation_warning(message, **decorator_kwargs, stacklevel=stacklevel)
return wrapped(*args, **kwargs)
wrapper._sympy_deprecated_func = wrapped
return wrapper
return deprecated_decorator
| no_attrs_in_subclass |
python | pypa__setuptools | setuptools/msvc.py | {
"start": 917,
"end": 3900
} | class ____:
"""
Current and Target Architectures information.
Parameters
----------
arch: str
Target architecture.
"""
current_cpu = environ.get('processor_architecture', '').lower()
def __init__(self, arch: str) -> None:
self.arch = arch.lower().replace('x64', 'amd64')
@property
def target_cpu(self) -> str:
"""
Return Target CPU architecture.
Return
------
str
Target CPU
"""
return self.arch[self.arch.find('_') + 1 :]
def target_is_x86(self) -> bool:
"""
Return True if target CPU is x86 32 bits..
Return
------
bool
CPU is x86 32 bits
"""
return self.target_cpu == 'x86'
def current_is_x86(self) -> bool:
"""
Return True if current CPU is x86 32 bits..
Return
------
bool
CPU is x86 32 bits
"""
return self.current_cpu == 'x86'
def current_dir(self, hidex86=False, x64=False) -> str:
"""
Current platform specific subfolder.
Parameters
----------
hidex86: bool
return '' and not '\x86' if architecture is x86.
x64: bool
return '\x64' and not '\amd64' if architecture is amd64.
Return
------
str
subfolder: '\target', or '' (see hidex86 parameter)
"""
return (
''
if (self.current_cpu == 'x86' and hidex86)
else r'\x64'
if (self.current_cpu == 'amd64' and x64)
else rf'\{self.current_cpu}'
)
def target_dir(self, hidex86=False, x64=False) -> str:
r"""
Target platform specific subfolder.
Parameters
----------
hidex86: bool
return '' and not '\x86' if architecture is x86.
x64: bool
return '\x64' and not '\amd64' if architecture is amd64.
Return
------
str
subfolder: '\current', or '' (see hidex86 parameter)
"""
return (
''
if (self.target_cpu == 'x86' and hidex86)
else r'\x64'
if (self.target_cpu == 'amd64' and x64)
else rf'\{self.target_cpu}'
)
def cross_dir(self, forcex86=False) -> str:
r"""
Cross platform specific subfolder.
Parameters
----------
forcex86: bool
Use 'x86' as current architecture even if current architecture is
not x86.
Return
------
str
subfolder: '' if target architecture is current architecture,
'\current_target' if not.
"""
current = 'x86' if forcex86 else self.current_cpu
return (
''
if self.target_cpu == current
else self.target_dir().replace('\\', f'\\{current}_')
)
| PlatformInfo |
python | gevent__gevent | src/greentest/3.10/test_smtpd.py | {
"start": 35548,
"end": 37185
} | class ____(unittest.TestCase):
def setUp(self):
smtpd.socket = asyncore.socket = mock_socket
self.old_debugstream = smtpd.DEBUGSTREAM
self.debug = smtpd.DEBUGSTREAM = io.StringIO()
self.server = DummyServer((socket_helper.HOST, 0), ('b', 0),
decode_data=True)
conn, addr = self.server.accept()
# Set decode_data to True
self.channel = smtpd.SMTPChannel(self.server, conn, addr,
decode_data=True)
def tearDown(self):
asyncore.close_all()
asyncore.socket = smtpd.socket = socket
smtpd.DEBUGSTREAM = self.old_debugstream
def write_line(self, line):
self.channel.socket.queue_recv(line)
self.channel.handle_read()
def test_ascii_data(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.write_line(b'RCPT To:spam@example')
self.write_line(b'DATA')
self.write_line(b'plain ascii text')
self.write_line(b'.')
self.assertEqual(self.channel.received_data, 'plain ascii text')
def test_utf8_data(self):
self.write_line(b'HELO example')
self.write_line(b'MAIL From:eggs@example')
self.write_line(b'RCPT To:spam@example')
self.write_line(b'DATA')
self.write_line(b'utf8 enriched text: \xc5\xbc\xc5\xba\xc4\x87')
self.write_line(b'and some plain ascii')
self.write_line(b'.')
self.assertEqual(
self.channel.received_data,
'utf8 enriched text: żźć\nand some plain ascii')
| SMTPDChannelWithDecodeDataTrue |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py | {
"start": 15672,
"end": 17320
} | class ____(graphene.Mutation):
"""Deletes partitions from a dynamic partition set."""
Output = graphene.NonNull(GrapheneDeleteDynamicPartitionsResult)
class Arguments:
repositorySelector = graphene.NonNull(GrapheneRepositorySelector)
partitionsDefName = graphene.NonNull(graphene.String)
partitionKeys = non_null_list(graphene.String)
class Meta:
name = "DeleteDynamicPartitionsMutation"
@capture_error
@require_permission_check(Permissions.EDIT_DYNAMIC_PARTITIONS)
def mutate(
self,
graphene_info: ResolveInfo,
repositorySelector: GrapheneRepositorySelector,
partitionsDefName: str,
partitionKeys: Sequence[str],
):
return delete_dynamic_partitions(
graphene_info, repositorySelector, partitionsDefName, partitionKeys
)
async def create_execution_params_and_launch_pipeline_reexec(graphene_info, execution_params_dict):
execution_params = await create_execution_params(graphene_info, execution_params_dict)
assert_permission_for_job(
graphene_info,
Permissions.LAUNCH_PIPELINE_REEXECUTION,
JobSelector(
location_name=execution_params.selector.location_name,
repository_name=execution_params.selector.repository_name,
job_name=execution_params.selector.job_name,
),
list(execution_params.selector.entity_selection)
if execution_params.selector.entity_selection
else None,
)
return await launch_pipeline_reexecution(graphene_info, execution_params=execution_params)
| GrapheneDeleteDynamicPartitionsMutation |
python | streamlit__streamlit | lib/tests/streamlit/elements/heading_test.py | {
"start": 16211,
"end": 17370
} | class ____(DeltaGeneratorTestCase):
"""Test st.title text_alignment parameter."""
@parameterized.expand(
[
("left", 1),
("center", 2),
("right", 3),
("justify", 4),
(None, 1), # Default case
]
)
def test_st_title_text_alignment(
self, text_alignment: str | None, expected_alignment: int
):
"""Test st.title with various text_alignment values."""
if text_alignment is None:
st.title("Title text")
else:
st.title("Title text", text_alignment=text_alignment)
el = self.get_delta_from_queue().new_element
assert el.heading.body == "Title text"
assert el.heading.tag == "h1"
assert el.text_alignment_config.alignment == expected_alignment
def test_st_title_text_alignment_invalid(self):
"""Test st.title with invalid text_alignment raises error."""
with pytest.raises(StreamlitAPIException) as exc:
st.title("Title text", text_alignment="bottom")
assert 'Invalid text_alignment value: "bottom"' in str(exc.value)
| StTitleTextAlignmentTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-deepset/destination_deepset/writer.py | {
"start": 371,
"end": 452
} | class ____:
"""Raised when an error is encountered by the writer"""
| WriterError |
python | ray-project__ray | rllib/algorithms/tests/test_algorithm_config.py | {
"start": 568,
"end": 17297
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init()
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_running_specific_algo_with_generic_config(self):
"""Tests, whether some algo can be run with the generic AlgorithmConfig."""
config = (
AlgorithmConfig(algo_class=PPO)
.environment("CartPole-v0")
.training(lr=0.12345, train_batch_size=3000, minibatch_size=300)
)
algo = config.build()
self.assertTrue(algo.config.lr == 0.12345)
self.assertTrue(algo.config.train_batch_size == 3000)
algo.train()
algo.stop()
def test_freezing_of_algo_config(self):
"""Tests, whether freezing an AlgorithmConfig actually works as expected."""
config = (
AlgorithmConfig()
.environment("CartPole-v0")
.training(lr=0.12345, train_batch_size=3000)
.multi_agent(
policies={
"pol1": (None, None, None, AlgorithmConfig.overrides(lr=0.001))
},
policy_mapping_fn=lambda agent_id, episode, worker, **kw: "pol1",
)
)
config.freeze()
def set_lr(config):
config.lr = 0.01
self.assertRaisesRegex(
AttributeError,
"Cannot set attribute.+of an already frozen AlgorithmConfig",
lambda: set_lr(config),
)
# TODO: Figure out, whether we should convert all nested structures into
# frozen ones (set -> frozenset; dict -> frozendict; list -> tuple).
def set_one_policy(config):
config.policies["pol1"] = (None, None, None, {"lr": 0.123})
# self.assertRaisesRegex(
# AttributeError,
# "Cannot set attribute.+of an already frozen AlgorithmConfig",
# lambda: set_one_policy(config),
# )
def test_rollout_fragment_length(self):
"""Tests the proper auto-computation of the `rollout_fragment_length`."""
config = (
AlgorithmConfig()
.env_runners(
num_env_runners=4,
num_envs_per_env_runner=3,
rollout_fragment_length="auto",
)
.training(train_batch_size=2456)
)
# 2456 / (3 * 4) -> 204.666 -> 204 or 205 (depending on worker index).
# Actual train batch size: 2457 (off by only 1).
self.assertTrue(config.get_rollout_fragment_length(worker_index=0) == 205)
self.assertTrue(config.get_rollout_fragment_length(worker_index=1) == 205)
self.assertTrue(config.get_rollout_fragment_length(worker_index=2) == 205)
self.assertTrue(config.get_rollout_fragment_length(worker_index=3) == 205)
self.assertTrue(config.get_rollout_fragment_length(worker_index=4) == 204)
config = (
AlgorithmConfig()
.env_runners(
num_env_runners=3,
num_envs_per_env_runner=2,
rollout_fragment_length="auto",
)
.training(train_batch_size=4000)
)
# 4000 / 6 -> 666.66 -> 666 or 667 (depending on worker index)
# Actual train batch size: 4000 (perfect match)
self.assertTrue(config.get_rollout_fragment_length(worker_index=0) == 667)
self.assertTrue(config.get_rollout_fragment_length(worker_index=1) == 667)
self.assertTrue(config.get_rollout_fragment_length(worker_index=2) == 667)
self.assertTrue(config.get_rollout_fragment_length(worker_index=3) == 666)
config = (
AlgorithmConfig()
.env_runners(
num_env_runners=12,
rollout_fragment_length="auto",
)
.training(train_batch_size=1342)
)
# 1342 / 12 -> 111.83 -> 111 or 112 (depending on worker index)
# Actual train batch size: 1342 (perfect match)
for i in range(11):
self.assertTrue(config.get_rollout_fragment_length(worker_index=i) == 112)
self.assertTrue(config.get_rollout_fragment_length(worker_index=11) == 111)
self.assertTrue(config.get_rollout_fragment_length(worker_index=12) == 111)
def test_detect_atari_env(self):
"""Tests that we can properly detect Atari envs."""
config = AlgorithmConfig().environment(
env="ale_py:ALE/Breakout-v5", env_config={"frameskip": 1}
)
self.assertTrue(config.is_atari)
config = AlgorithmConfig().environment(env="ale_py:ALE/Pong-v5")
self.assertTrue(config.is_atari)
config = AlgorithmConfig().environment(env="CartPole-v1")
# We do not auto-detect callable env makers for Atari envs.
self.assertFalse(config.is_atari)
config = AlgorithmConfig().environment(
env=lambda ctx: gym.make(
"ale_py:ALE/Breakout-v5",
frameskip=1,
)
)
# We do not auto-detect callable env makers for Atari envs.
self.assertFalse(config.is_atari)
config = AlgorithmConfig().environment(env="NotAtari")
self.assertFalse(config.is_atari)
def test_rl_module_api(self):
config = PPOConfig().environment("CartPole-v1").framework("torch")
self.assertEqual(config.rl_module_spec.module_class, PPOTorchRLModule)
class A:
pass
config = config.rl_module(rl_module_spec=RLModuleSpec(A))
self.assertEqual(config.rl_module_spec.module_class, A)
def test_config_per_module(self):
"""Tests, whether per-module config overrides (multi-agent) work as expected."""
# Compile individual agents' PPO configs from a config object.
config = (
PPOConfig()
.training(kl_coeff=0.5)
.multi_agent(
policies={"module_1", "module_2", "module_3"},
# Override config settings fro `module_1` and `module_2`.
algorithm_config_overrides_per_module={
"module_1": PPOConfig.overrides(lr=0.01, kl_coeff=0.1),
"module_2": PPOConfig.overrides(grad_clip=100.0),
},
)
)
# Check default config.
check(config.lr, 0.00005)
check(config.grad_clip, None)
check(config.grad_clip_by, "global_norm")
check(config.kl_coeff, 0.5)
# `module_1` overrides.
config_1 = config.get_config_for_module("module_1")
check(config_1.lr, 0.01)
check(config_1.grad_clip, None)
check(config_1.grad_clip_by, "global_norm")
check(config_1.kl_coeff, 0.1)
# `module_2` overrides.
config_2 = config.get_config_for_module("module_2")
check(config_2.lr, 0.00005)
check(config_2.grad_clip, 100.0)
check(config_2.grad_clip_by, "global_norm")
check(config_2.kl_coeff, 0.5)
# No `module_3` overrides (b/c module_3 uses the top-level config
# object directly).
self.assertTrue("module_3" not in config._per_module_overrides)
config_3 = config.get_config_for_module("module_3")
self.assertTrue(config_3 is config)
def test_learner_api(self):
config = PPOConfig().environment("CartPole-v1")
self.assertEqual(config.learner_class, PPOTorchLearner)
def _assertEqualMARLSpecs(self, spec1, spec2):
self.assertEqual(spec1.multi_rl_module_class, spec2.multi_rl_module_class)
self.assertEqual(set(spec1.module_specs.keys()), set(spec2.module_specs.keys()))
for k, module_spec1 in spec1.module_specs.items():
module_spec2 = spec2.module_specs[k]
self.assertEqual(module_spec1.module_class, module_spec2.module_class)
self.assertEqual(
module_spec1.observation_space, module_spec2.observation_space
)
self.assertEqual(module_spec1.action_space, module_spec2.action_space)
self.assertEqual(
module_spec1.model_config_dict, module_spec2.model_config_dict
)
def _get_expected_marl_spec(
self,
config: AlgorithmConfig,
expected_module_class: Type[RLModule],
passed_module_class: Type[RLModule] = None,
expected_multi_rl_module_class: Type[MultiRLModule] = None,
):
"""This is a utility function that retrieves the expected marl specs.
Args:
config: The algorithm config.
expected_module_class: This is the expected RLModule class that is going to
be reference in the RLModuleSpec parts of the MultiLModuleSpec.
passed_module_class: This is the RLModule class that is passed into the
module_spec argument of get_multi_rl_module_spec. The function is
designed so that it will use the passed in module_spec for the
RLModuleSpec parts of the MultiRLModuleSpec.
expected_multi_rl_module_class: This is the expected MultiRLModule class
that is going to be reference in the MultiRLModuleSpec.
Returns:
Tuple of the returned MultiRLModuleSpec from config.
get_multi_rl_module_spec() and the expected MultiRLModuleSpec.
"""
from ray.rllib.policy.policy import PolicySpec
if expected_multi_rl_module_class is None:
expected_multi_rl_module_class = MultiRLModule
env = gym.make("CartPole-v1")
policy_spec_ph = PolicySpec(
observation_space=env.observation_space,
action_space=env.action_space,
config=AlgorithmConfig(),
)
marl_spec = config.get_multi_rl_module_spec(
policy_dict={"p1": policy_spec_ph, "p2": policy_spec_ph},
single_agent_rl_module_spec=RLModuleSpec(module_class=passed_module_class)
if passed_module_class
else None,
)
expected_marl_spec = MultiRLModuleSpec(
multi_rl_module_class=expected_multi_rl_module_class,
rl_module_specs={
"p1": RLModuleSpec(
module_class=expected_module_class,
observation_space=env.observation_space,
action_space=env.action_space,
),
"p2": RLModuleSpec(
module_class=expected_module_class,
observation_space=env.observation_space,
action_space=env.action_space,
),
},
)
return marl_spec, expected_marl_spec
def test_get_multi_rl_module_spec(self):
"""Tests whether the get_multi_rl_module_spec() method works properly."""
from ray.rllib.examples.rl_modules.classes.vpg_torch_rlm import VPGTorchRLModule
class CustomRLModule1(VPGTorchRLModule):
pass
class CustomRLModule2(VPGTorchRLModule):
pass
class CustomRLModule3(VPGTorchRLModule):
pass
class CustomMultiRLModule1(MultiRLModule):
pass
########################################
# single agent
class SingleAgentAlgoConfig(AlgorithmConfig):
def get_default_rl_module_spec(self):
return RLModuleSpec(module_class=VPGTorchRLModule)
# multi-agent
class MultiAgentAlgoConfigWithNoSingleAgentSpec(AlgorithmConfig):
def get_default_rl_module_spec(self):
return MultiRLModuleSpec(multi_rl_module_class=CustomMultiRLModule1)
########################################
# This is the simplest case where we have to construct the MultiRLModule based
# on the default specs only.
config = SingleAgentAlgoConfig().api_stack(
enable_rl_module_and_learner=True,
enable_env_runner_and_connector_v2=True,
)
spec, expected = self._get_expected_marl_spec(config, VPGTorchRLModule)
self._assertEqualMARLSpecs(spec, expected)
# expected module should become the passed module if we pass it in.
spec, expected = self._get_expected_marl_spec(
config, CustomRLModule2, passed_module_class=CustomRLModule2
)
self._assertEqualMARLSpecs(spec, expected)
########################################
# This is the case where we pass in a `MultiRLModuleSpec` that asks the
# algorithm to assign a specific type of RLModule class to certain module_ids.
config = (
SingleAgentAlgoConfig()
.api_stack(
enable_rl_module_and_learner=True,
enable_env_runner_and_connector_v2=True,
)
.rl_module(
rl_module_spec=MultiRLModuleSpec(
rl_module_specs={
"p1": RLModuleSpec(module_class=CustomRLModule1),
"p2": RLModuleSpec(module_class=CustomRLModule1),
},
),
)
)
spec, expected = self._get_expected_marl_spec(config, CustomRLModule1)
self._assertEqualMARLSpecs(spec, expected)
########################################
# This is the case where we ask the algorithm to assign a specific type of
# RLModule class to ALL module_ids.
config = (
SingleAgentAlgoConfig()
.api_stack(
enable_rl_module_and_learner=True,
enable_env_runner_and_connector_v2=True,
)
.rl_module(
rl_module_spec=RLModuleSpec(module_class=CustomRLModule1),
)
)
spec, expected = self._get_expected_marl_spec(config, CustomRLModule1)
self._assertEqualMARLSpecs(spec, expected)
# expected module should become the passed module if we pass it in.
spec, expected = self._get_expected_marl_spec(
config, CustomRLModule2, passed_module_class=CustomRLModule2
)
self._assertEqualMARLSpecs(spec, expected)
########################################
# This is not only assigning a specific type of RLModule class to EACH
# module_id, but also defining a new custom MultiRLModule class to be used
# in the multi-agent scenario.
config = (
SingleAgentAlgoConfig()
.api_stack(
enable_rl_module_and_learner=True,
enable_env_runner_and_connector_v2=True,
)
.rl_module(
rl_module_spec=MultiRLModuleSpec(
multi_rl_module_class=CustomMultiRLModule1,
rl_module_specs={
"p1": RLModuleSpec(module_class=CustomRLModule1),
"p2": RLModuleSpec(module_class=CustomRLModule1),
},
),
)
)
spec, expected = self._get_expected_marl_spec(
config, CustomRLModule1, expected_multi_rl_module_class=CustomMultiRLModule1
)
self._assertEqualMARLSpecs(spec, expected)
# This is expected to return CustomRLModule1 instead of CustomRLModule3 which
# is passed in. Because the default for p1, p2 is to use CustomRLModule1. The
# passed module_spec only sets a default to fall back onto in case the
# module_id is not specified in the original MultiRLModuleSpec. Since P1
# and P2 are both assigned to CustomeRLModule1, the passed module_spec will not
# be used. This is the expected behavior for adding a new modules to a
# `MultiRLModule` that is not defined in the original MultiRLModuleSpec.
spec, expected = self._get_expected_marl_spec(
config,
CustomRLModule1,
passed_module_class=CustomRLModule3,
expected_multi_rl_module_class=CustomMultiRLModule1,
)
self._assertEqualMARLSpecs(spec, expected)
########################################
# This is the case where we ask the algorithm to use its default
# MultiRLModuleSpec, but the MultiRLModuleSpec has not defined its
# RLModuleSpecs.
config = MultiAgentAlgoConfigWithNoSingleAgentSpec().api_stack(
enable_rl_module_and_learner=True,
enable_env_runner_and_connector_v2=True,
)
self.assertRaisesRegex(
ValueError,
"Module_specs cannot be None",
lambda: config.rl_module_spec,
)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestAlgorithmConfig |
python | django__django | tests/composite_pk/test_get.py | {
"start": 91,
"end": 5049
} | class ____(TestCase):
maxDiff = None
@classmethod
def setUpTestData(cls):
cls.tenant_1 = Tenant.objects.create()
cls.tenant_2 = Tenant.objects.create()
cls.user_1 = User.objects.create(
tenant=cls.tenant_1,
id=1,
email="user0001@example.com",
)
cls.user_2 = User.objects.create(
tenant=cls.tenant_1,
id=2,
email="user0002@example.com",
)
cls.user_3 = User.objects.create(
tenant=cls.tenant_2,
id=3,
email="user0003@example.com",
)
cls.comment_1 = Comment.objects.create(id=1, user=cls.user_1)
def test_get_user(self):
test_cases = (
{"pk": self.user_1.pk},
{"pk": (self.tenant_1.id, self.user_1.id)},
{"id": self.user_1.id},
)
for lookup in test_cases:
with self.subTest(lookup=lookup):
self.assertEqual(User.objects.get(**lookup), self.user_1)
def test_get_comment(self):
test_cases = (
{"pk": self.comment_1.pk},
{"pk": (self.tenant_1.id, self.comment_1.id)},
{"id": self.comment_1.id},
{"user": self.user_1},
{"user_id": self.user_1.id},
{"user__id": self.user_1.id},
{"user__pk": self.user_1.pk},
{"tenant": self.tenant_1},
{"tenant_id": self.tenant_1.id},
{"tenant__id": self.tenant_1.id},
{"tenant__pk": self.tenant_1.pk},
)
for lookup in test_cases:
with self.subTest(lookup=lookup):
self.assertEqual(Comment.objects.get(**lookup), self.comment_1)
def test_get_or_create_user(self):
test_cases = (
{
"pk": self.user_1.pk,
"defaults": {"email": "user9201@example.com"},
},
{
"pk": (self.tenant_1.id, self.user_1.id),
"defaults": {"email": "user9201@example.com"},
},
{
"tenant": self.tenant_1,
"id": self.user_1.id,
"defaults": {"email": "user3512@example.com"},
},
{
"tenant_id": self.tenant_1.id,
"id": self.user_1.id,
"defaults": {"email": "user8239@example.com"},
},
)
for fields in test_cases:
with self.subTest(fields=fields):
count = User.objects.count()
user, created = User.objects.get_or_create(**fields)
self.assertIs(created, False)
self.assertEqual(user.id, self.user_1.id)
self.assertEqual(user.pk, (self.tenant_1.id, self.user_1.id))
self.assertEqual(user.tenant_id, self.tenant_1.id)
self.assertEqual(user.email, self.user_1.email)
self.assertEqual(count, User.objects.count())
def test_lookup_errors(self):
m_tuple = "'%s' lookup of 'pk' must be a tuple or a list"
m_2_elements = "'%s' lookup of 'pk' must have 2 elements"
m_tuple_collection = (
"'in' lookup of 'pk' must be a collection of tuples or lists"
)
m_2_elements_each = "'in' lookup of 'pk' must have 2 elements each"
test_cases = (
({"pk": 1}, m_tuple % "exact"),
({"pk": (1, 2, 3)}, m_2_elements % "exact"),
({"pk__exact": 1}, m_tuple % "exact"),
({"pk__exact": (1, 2, 3)}, m_2_elements % "exact"),
({"pk__in": 1}, m_tuple % "in"),
({"pk__in": (1, 2, 3)}, m_tuple_collection),
({"pk__in": ((1, 2, 3),)}, m_2_elements_each),
({"pk__gt": 1}, m_tuple % "gt"),
({"pk__gt": (1, 2, 3)}, m_2_elements % "gt"),
({"pk__gte": 1}, m_tuple % "gte"),
({"pk__gte": (1, 2, 3)}, m_2_elements % "gte"),
({"pk__lt": 1}, m_tuple % "lt"),
({"pk__lt": (1, 2, 3)}, m_2_elements % "lt"),
({"pk__lte": 1}, m_tuple % "lte"),
({"pk__lte": (1, 2, 3)}, m_2_elements % "lte"),
)
for kwargs, message in test_cases:
with (
self.subTest(kwargs=kwargs),
self.assertRaisesMessage(ValueError, message),
):
Comment.objects.get(**kwargs)
def test_get_user_by_comments(self):
self.assertEqual(User.objects.get(comments=self.comment_1), self.user_1)
def test_get_previous_by_field(self):
stamp_1 = TimeStamped.objects.create(id=1)
stamp_2 = TimeStamped(id=2)
msg = "get_next/get_previous cannot be used on unsaved objects."
with self.assertRaisesMessage(ValueError, msg):
stamp_2.get_previous_by_created()
stamp_2.save()
self.assertEqual(stamp_2.get_previous_by_created(), stamp_1)
| CompositePKGetTests |
python | django__django | tests/auth_tests/test_auth_backends.py | {
"start": 4108,
"end": 20639
} | class ____:
"""
A base class for tests that need to validate the ModelBackend
with different User models. Subclasses should define a class
level UserModel attribute, and a create_users() method to
construct two users for test purposes.
"""
backend = "django.contrib.auth.backends.ModelBackend"
@classmethod
def setUpClass(cls):
cls.enterClassContext(
modify_settings(AUTHENTICATION_BACKENDS={"append": cls.backend})
)
super().setUpClass()
def setUp(self):
# The custom_perms test messes with ContentTypes, which will be cached.
# Flush the cache to ensure there are no side effects.
self.addCleanup(ContentType.objects.clear_cache)
self.create_users()
def test_has_perm(self):
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertIs(user.has_perm("auth.test"), False)
user.is_staff = True
user.save()
self.assertIs(user.has_perm("auth.test"), False)
user.is_superuser = True
user.save()
self.assertIs(user.has_perm("auth.test"), True)
user.is_staff = True
user.is_superuser = True
user.is_active = False
user.save()
self.assertIs(user.has_perm("auth.test"), False)
async def test_ahas_perm(self):
user = await self.UserModel._default_manager.aget(pk=self.user.pk)
self.assertIs(await user.ahas_perm("auth.test"), False)
user.is_staff = True
await user.asave()
self.assertIs(await user.ahas_perm("auth.test"), False)
user.is_superuser = True
await user.asave()
self.assertIs(await user.ahas_perm("auth.test"), True)
self.assertIs(await user.ahas_module_perms("auth"), True)
user.is_staff = True
user.is_superuser = True
user.is_active = False
await user.asave()
self.assertIs(await user.ahas_perm("auth.test"), False)
def test_custom_perms(self):
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(
name="test", content_type=content_type, codename="test"
)
user.user_permissions.add(perm)
# reloading user to purge the _perm_cache
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.get_all_permissions(), {"auth.test"})
self.assertEqual(user.get_user_permissions(), {"auth.test"})
self.assertEqual(user.get_group_permissions(), set())
self.assertIs(user.has_module_perms("Group"), False)
self.assertIs(user.has_module_perms("auth"), True)
perm = Permission.objects.create(
name="test2", content_type=content_type, codename="test2"
)
user.user_permissions.add(perm)
perm = Permission.objects.create(
name="test3", content_type=content_type, codename="test3"
)
user.user_permissions.add(perm)
user = self.UserModel._default_manager.get(pk=self.user.pk)
expected_user_perms = {"auth.test2", "auth.test", "auth.test3"}
self.assertEqual(user.get_all_permissions(), expected_user_perms)
self.assertIs(user.has_perm("test"), False)
self.assertIs(user.has_perm("auth.test"), True)
self.assertIs(user.has_perms(["auth.test2", "auth.test3"]), True)
perm = Permission.objects.create(
name="test_group", content_type=content_type, codename="test_group"
)
group = Group.objects.create(name="test_group")
group.permissions.add(perm)
user.groups.add(group)
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(
user.get_all_permissions(), {*expected_user_perms, "auth.test_group"}
)
self.assertEqual(user.get_user_permissions(), expected_user_perms)
self.assertEqual(user.get_group_permissions(), {"auth.test_group"})
self.assertIs(user.has_perms(["auth.test3", "auth.test_group"]), True)
user = AnonymousUser()
self.assertIs(user.has_perm("test"), False)
self.assertIs(user.has_perms(["auth.test2", "auth.test3"]), False)
async def test_acustom_perms(self):
user = await self.UserModel._default_manager.aget(pk=self.user.pk)
content_type = await sync_to_async(ContentType.objects.get_for_model)(Group)
perm = await Permission.objects.acreate(
name="test", content_type=content_type, codename="test"
)
await user.user_permissions.aadd(perm)
# Reloading user to purge the _perm_cache.
user = await self.UserModel._default_manager.aget(pk=self.user.pk)
self.assertEqual(await user.aget_all_permissions(), {"auth.test"})
self.assertEqual(await user.aget_user_permissions(), {"auth.test"})
self.assertEqual(await user.aget_group_permissions(), set())
self.assertIs(await user.ahas_module_perms("Group"), False)
self.assertIs(await user.ahas_module_perms("auth"), True)
perm = await Permission.objects.acreate(
name="test2", content_type=content_type, codename="test2"
)
await user.user_permissions.aadd(perm)
perm = await Permission.objects.acreate(
name="test3", content_type=content_type, codename="test3"
)
await user.user_permissions.aadd(perm)
user = await self.UserModel._default_manager.aget(pk=self.user.pk)
expected_user_perms = {"auth.test2", "auth.test", "auth.test3"}
self.assertEqual(await user.aget_all_permissions(), expected_user_perms)
self.assertIs(await user.ahas_perm("test"), False)
self.assertIs(await user.ahas_perm("auth.test"), True)
self.assertIs(await user.ahas_perms(["auth.test2", "auth.test3"]), True)
perm = await Permission.objects.acreate(
name="test_group", content_type=content_type, codename="test_group"
)
group = await Group.objects.acreate(name="test_group")
await group.permissions.aadd(perm)
await user.groups.aadd(group)
user = await self.UserModel._default_manager.aget(pk=self.user.pk)
self.assertEqual(
await user.aget_all_permissions(), {*expected_user_perms, "auth.test_group"}
)
self.assertEqual(await user.aget_user_permissions(), expected_user_perms)
self.assertEqual(await user.aget_group_permissions(), {"auth.test_group"})
self.assertIs(await user.ahas_perms(["auth.test3", "auth.test_group"]), True)
user = AnonymousUser()
self.assertIs(await user.ahas_perm("test"), False)
self.assertIs(await user.ahas_perms(["auth.test2", "auth.test3"]), False)
def test_has_no_object_perm(self):
"""Regressiontest for #12462"""
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(
name="test", content_type=content_type, codename="test"
)
user.user_permissions.add(perm)
self.assertIs(user.has_perm("auth.test", "object"), False)
self.assertEqual(user.get_all_permissions("object"), set())
self.assertIs(user.has_perm("auth.test"), True)
self.assertEqual(user.get_all_permissions(), {"auth.test"})
async def test_ahas_no_object_perm(self):
"""See test_has_no_object_perm()"""
user = await self.UserModel._default_manager.aget(pk=self.user.pk)
content_type = await sync_to_async(ContentType.objects.get_for_model)(Group)
perm = await Permission.objects.acreate(
name="test", content_type=content_type, codename="test"
)
await user.user_permissions.aadd(perm)
self.assertIs(await user.ahas_perm("auth.test", "object"), False)
self.assertEqual(await user.aget_all_permissions("object"), set())
self.assertIs(await user.ahas_perm("auth.test"), True)
self.assertEqual(await user.aget_all_permissions(), {"auth.test"})
def test_anonymous_has_no_permissions(self):
"""
#17903 -- Anonymous users shouldn't have permissions in
ModelBackend.get_(all|user|group)_permissions().
"""
backend = ModelBackend()
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
user_perm = Permission.objects.create(
name="test", content_type=content_type, codename="test_user"
)
group_perm = Permission.objects.create(
name="test2", content_type=content_type, codename="test_group"
)
user.user_permissions.add(user_perm)
group = Group.objects.create(name="test_group")
user.groups.add(group)
group.permissions.add(group_perm)
self.assertEqual(
backend.get_all_permissions(user), {"auth.test_user", "auth.test_group"}
)
self.assertEqual(backend.get_user_permissions(user), {"auth.test_user"})
self.assertEqual(backend.get_group_permissions(user), {"auth.test_group"})
with mock.patch.object(self.UserModel, "is_anonymous", True):
self.assertEqual(backend.get_all_permissions(user), set())
self.assertEqual(backend.get_user_permissions(user), set())
self.assertEqual(backend.get_group_permissions(user), set())
async def test_aanonymous_has_no_permissions(self):
"""See test_anonymous_has_no_permissions()"""
backend = ModelBackend()
user = await self.UserModel._default_manager.aget(pk=self.user.pk)
content_type = await sync_to_async(ContentType.objects.get_for_model)(Group)
user_perm = await Permission.objects.acreate(
name="test", content_type=content_type, codename="test_user"
)
group_perm = await Permission.objects.acreate(
name="test2", content_type=content_type, codename="test_group"
)
await user.user_permissions.aadd(user_perm)
group = await Group.objects.acreate(name="test_group")
await user.groups.aadd(group)
await group.permissions.aadd(group_perm)
self.assertEqual(
await backend.aget_all_permissions(user),
{"auth.test_user", "auth.test_group"},
)
self.assertEqual(await backend.aget_user_permissions(user), {"auth.test_user"})
self.assertEqual(
await backend.aget_group_permissions(user), {"auth.test_group"}
)
with mock.patch.object(self.UserModel, "is_anonymous", True):
self.assertEqual(await backend.aget_all_permissions(user), set())
self.assertEqual(await backend.aget_user_permissions(user), set())
self.assertEqual(await backend.aget_group_permissions(user), set())
def test_inactive_has_no_permissions(self):
"""
#17903 -- Inactive users shouldn't have permissions in
ModelBackend.get_(all|user|group)_permissions().
"""
backend = ModelBackend()
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
user_perm = Permission.objects.create(
name="test", content_type=content_type, codename="test_user"
)
group_perm = Permission.objects.create(
name="test2", content_type=content_type, codename="test_group"
)
user.user_permissions.add(user_perm)
group = Group.objects.create(name="test_group")
user.groups.add(group)
group.permissions.add(group_perm)
self.assertEqual(
backend.get_all_permissions(user), {"auth.test_user", "auth.test_group"}
)
self.assertEqual(backend.get_user_permissions(user), {"auth.test_user"})
self.assertEqual(backend.get_group_permissions(user), {"auth.test_group"})
user.is_active = False
user.save()
self.assertEqual(backend.get_all_permissions(user), set())
self.assertEqual(backend.get_user_permissions(user), set())
self.assertEqual(backend.get_group_permissions(user), set())
async def test_ainactive_has_no_permissions(self):
"""See test_inactive_has_no_permissions()"""
backend = ModelBackend()
user = await self.UserModel._default_manager.aget(pk=self.user.pk)
content_type = await sync_to_async(ContentType.objects.get_for_model)(Group)
user_perm = await Permission.objects.acreate(
name="test", content_type=content_type, codename="test_user"
)
group_perm = await Permission.objects.acreate(
name="test2", content_type=content_type, codename="test_group"
)
await user.user_permissions.aadd(user_perm)
group = await Group.objects.acreate(name="test_group")
await user.groups.aadd(group)
await group.permissions.aadd(group_perm)
self.assertEqual(
await backend.aget_all_permissions(user),
{"auth.test_user", "auth.test_group"},
)
self.assertEqual(await backend.aget_user_permissions(user), {"auth.test_user"})
self.assertEqual(
await backend.aget_group_permissions(user), {"auth.test_group"}
)
user.is_active = False
await user.asave()
self.assertEqual(await backend.aget_all_permissions(user), set())
self.assertEqual(await backend.aget_user_permissions(user), set())
self.assertEqual(await backend.aget_group_permissions(user), set())
def test_get_all_superuser_permissions(self):
"""A superuser has all permissions. Refs #14795."""
user = self.UserModel._default_manager.get(pk=self.superuser.pk)
self.assertEqual(len(user.get_all_permissions()), len(Permission.objects.all()))
async def test_aget_all_superuser_permissions(self):
"""See test_get_all_superuser_permissions()"""
user = await self.UserModel._default_manager.aget(pk=self.superuser.pk)
self.assertEqual(
len(await user.aget_all_permissions()), await Permission.objects.acount()
)
@override_settings(
PASSWORD_HASHERS=["auth_tests.test_auth_backends.CountingMD5PasswordHasher"]
)
def test_authentication_timing(self):
"""
Hasher is run once regardless of whether the user exists. Refs #20760.
"""
# Re-set the password, because this tests overrides PASSWORD_HASHERS
self.user.set_password("test")
self.user.save()
CountingMD5PasswordHasher.calls = 0
username = getattr(self.user, self.UserModel.USERNAME_FIELD)
authenticate(username=username, password="test")
self.assertEqual(CountingMD5PasswordHasher.calls, 1)
CountingMD5PasswordHasher.calls = 0
authenticate(username="no_such_user", password="test")
self.assertEqual(CountingMD5PasswordHasher.calls, 1)
@override_settings(
PASSWORD_HASHERS=["auth_tests.test_auth_backends.CountingMD5PasswordHasher"]
)
async def test_aauthentication_timing(self):
"""See test_authentication_timing()"""
# Re-set the password, because this tests overrides PASSWORD_HASHERS.
self.user.set_password("test")
await self.user.asave()
CountingMD5PasswordHasher.calls = 0
username = getattr(self.user, self.UserModel.USERNAME_FIELD)
await aauthenticate(username=username, password="test")
self.assertEqual(CountingMD5PasswordHasher.calls, 1)
CountingMD5PasswordHasher.calls = 0
await aauthenticate(username="no_such_user", password="test")
self.assertEqual(CountingMD5PasswordHasher.calls, 1)
@override_settings(
PASSWORD_HASHERS=["auth_tests.test_auth_backends.CountingMD5PasswordHasher"]
)
def test_authentication_without_credentials(self):
CountingMD5PasswordHasher.calls = 0
for credentials in (
{},
{"username": getattr(self.user, self.UserModel.USERNAME_FIELD)},
{"password": "test"},
):
with self.subTest(credentials=credentials):
with self.assertNumQueries(0):
authenticate(**credentials)
self.assertEqual(CountingMD5PasswordHasher.calls, 0)
| BaseModelBackendTest |
python | getsentry__sentry | src/sentry/release_health/base.py | {
"start": 5577,
"end": 5762
} | class ____(TypedDict):
sessions: int
sessions_healthy: int
sessions_crashed: int
sessions_abnormal: int
sessions_unhandled: int
sessions_errored: int
| SessionCounts |
python | walkccc__LeetCode | solutions/658. Find K Closest Elements/658.py | {
"start": 0,
"end": 274
} | class ____:
def findClosestElements(self, arr: list[int], k: int, x: int) -> list[int]:
l = 0
r = len(arr) - k
while l < r:
m = (l + r) // 2
if x - arr[m] <= arr[m + k] - x:
r = m
else:
l = m + 1
return arr[l:l + k]
| Solution |
python | kamyu104__LeetCode-Solutions | Python/sum-of-squares-of-special-elements.py | {
"start": 51,
"end": 447
} | class ____(object):
def sumOfSquares(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
for i in xrange(1, int(len(nums)**0.5)+1):
if len(nums)%i:
continue
result += nums[i-1]**2
if len(nums)//i != i:
result += nums[len(nums)//i-1]**2
return result
| Solution |
python | rapidsai__cudf | python/cudf/cudf/core/udf/groupby_typing.py | {
"start": 6800,
"end": 8523
} | class ____(AbstractTemplate):
def make_error_string(self, args):
fname = self.key.split(".")[-1]
args = (self.this, *args)
dtype_err = ", ".join([str(g.group_scalar_type) for g in args])
sr_err = ", ".join(["Series" for _ in range(len(args) - 1)])
return (
f"Series.{fname}({sr_err}) is not supported for "
f"({dtype_err}) within JIT GroupBy apply. To see "
f"what's available, visit {_UDF_DOC_URL}"
)
def generic(self, args, kws):
# earlystop to make sure typing doesn't fail for normal
# non-group ops
if not all(isinstance(arg, GroupType) for arg in args):
return None
# check if any groups are poisioned for this op
for arg in (self.this, *args):
if isinstance(arg.group_scalar_type, types.Poison):
raise UDFError(
f"Use of a column of {arg.group_scalar_type.ty} detected "
"within UDAF body. Only columns of the following dtypes "
"may be used through the GroupBy.apply() JIT engine: "
f"{[str(x) for x in SUPPORTED_GROUPBY_NUMPY_TYPES]}"
)
fname = self.key.split(".")[-1]
if funcs := call_cuda_functions.get(fname):
for sig in funcs.keys():
retty, selfty, *argtys = sig
if self.this.group_scalar_type == selfty and all(
arg.group_scalar_type == ty
for arg, ty in zip(args, argtys, strict=True)
):
return nb_signature(retty, *args, recvr=self.this)
raise UDFError(self.make_error_string(args))
| GroupAttrBase |
python | getsentry__sentry | src/sentry/models/group.py | {
"start": 10378,
"end": 21522
} | class ____(BaseManager["Group"]):
use_for_related_fields = True
def get_queryset(self):
return (
super()
.get_queryset()
.with_post_update_signal(options.get("groups.enable-post-update-signal"))
)
def by_qualified_short_id(self, organization_id: int, short_id: str):
return self.by_qualified_short_id_bulk(organization_id, [short_id])[0]
def by_qualified_short_id_bulk(
self, organization_id: int, short_ids_raw: list[str]
) -> Sequence[Group]:
short_ids = []
for short_id_raw in short_ids_raw:
parsed_short_id = parse_short_id(short_id_raw)
if parsed_short_id is None:
raise Group.DoesNotExist()
short_ids.append(parsed_short_id)
if not short_ids:
raise Group.DoesNotExist()
project_short_id_lookup = defaultdict(list)
for short_id in short_ids:
project_short_id_lookup[short_id.project_slug].append(short_id.short_id)
short_id_lookup = reduce(
or_,
[
Q(project__slug=slug, short_id__in=short_ids)
for slug, short_ids in project_short_id_lookup.items()
],
)
base_group_queryset = self.exclude(
status__in=[
GroupStatus.PENDING_DELETION,
GroupStatus.DELETION_IN_PROGRESS,
GroupStatus.PENDING_MERGE,
]
).filter(project__organization=organization_id)
groups = list(base_group_queryset.filter(short_id_lookup))
group_lookup: set[int] = {group.short_id for group in groups}
# If any requested short_ids are missing after the exact slug match,
# fallback to a case-insensitive slug lookup to handle legacy/mixed-case slugs.
# Handles legacy project slugs that may not be entirely lowercase.
missing_by_slug = defaultdict(list)
for sid in short_ids:
if sid.short_id not in group_lookup:
missing_by_slug[sid.project_slug].append(sid.short_id)
if len(missing_by_slug) > 0:
ci_short_id_lookup = reduce(
or_,
[
Q(project__slug__iexact=slug, short_id__in=sids)
for slug, sids in missing_by_slug.items()
],
)
fallback_groups = list(base_group_queryset.filter(ci_short_id_lookup))
groups.extend(fallback_groups)
group_lookup.update(group.short_id for group in fallback_groups)
for short_id in short_ids:
if short_id.short_id not in group_lookup:
raise Group.DoesNotExist()
return groups
def from_event_id(self, project, event_id):
"""Resolves the 32 character event_id string into a Group for which it is found."""
group_id = None
event = eventstore.backend.get_event_by_id(project.id, event_id)
if event:
group_id = event.group_id
if group_id is None:
# Raise a Group.DoesNotExist here since it makes
# more logical sense since this is intending to resolve
# a Group.
raise Group.DoesNotExist()
return self.get(id=group_id)
def filter_by_event_id(self, project_ids, event_id, tenant_ids=None):
events = eventstore.backend.get_events(
filter=eventstore.Filter(
event_ids=[event_id],
project_ids=project_ids,
conditions=[["group_id", "IS NOT NULL", None]],
),
limit=max(len(project_ids), 100),
referrer="Group.filter_by_event_id",
tenant_ids=tenant_ids,
)
return self.filter(id__in={event.group_id for event in events})
def get_groups_by_external_issue(
self,
integration: Integration | RpcIntegration,
organizations: Iterable[Organization],
external_issue_key: str | None,
) -> QuerySet[Group]:
from sentry.integrations.models.external_issue import ExternalIssue
from sentry.integrations.services.integration import integration_service
from sentry.models.grouplink import GroupLink
external_issue_subquery = ExternalIssue.objects.get_for_integration(
integration, external_issue_key
).values_list("id", flat=True)
group_link_subquery = GroupLink.objects.filter(
linked_id__in=external_issue_subquery
).values_list("group_id", flat=True)
org_ids_with_integration = list(
i.organization_id
for i in integration_service.get_organization_integrations(
organization_ids=[o.id for o in organizations],
integration_id=integration.id,
)
)
return self.filter(
id__in=group_link_subquery,
project__organization_id__in=org_ids_with_integration,
).select_related("project")
def update_group_status(
self,
groups: Iterable[Group],
status: int,
substatus: int | None,
activity_type: ActivityType,
activity_data: Mapping[str, Any] | None = None,
send_activity_notification: bool = True,
from_substatus: int | None = None,
detector_id: int | None = None,
update_date: datetime | None = None,
) -> None:
"""For each groups, update status to `status` and create an Activity."""
from sentry.incidents.grouptype import MetricIssue
from sentry.models.activity import Activity
from sentry.models.groupopenperiod import update_group_open_period
from sentry.workflow_engine.models.incident_groupopenperiod import (
update_incident_based_on_open_period_status_change,
)
modified_groups_list = []
selected_groups = Group.objects.filter(id__in=[g.id for g in groups]).exclude(
status=status, substatus=substatus
)
should_update_priority = (
from_substatus == GroupSubStatus.ESCALATING
and activity_type == ActivityType.AUTO_SET_ONGOING
)
# Track the resolved groups that are being unresolved and need to have their open period reopened
should_reopen_open_period = {
group.id: group.status == GroupStatus.RESOLVED for group in selected_groups
}
resolved_at = update_date if update_date is not None else timezone.now()
updated_priority = {}
for group in selected_groups:
group.status = status
group.substatus = substatus
if status == GroupStatus.RESOLVED:
group.resolved_at = resolved_at
if should_update_priority:
priority = get_priority_for_ongoing_group(group)
if priority and group.priority != priority:
group.priority = priority
updated_priority[group.id] = priority
modified_groups_list.append(group)
Group.objects.bulk_update(
modified_groups_list, ["status", "substatus", "priority", "resolved_at"]
)
for group in modified_groups_list:
activity = Activity.objects.create_group_activity(
group,
activity_type,
data=activity_data,
send_notification=send_activity_notification,
datetime=update_date,
)
record_group_history_from_activity_type(group, activity_type.value)
if group.id in updated_priority:
new_priority = updated_priority[group.id]
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_PRIORITY,
data={
"priority": new_priority.to_str(),
"reason": PriorityChangeReason.ONGOING,
},
datetime=update_date,
)
record_group_history(group, PRIORITY_TO_GROUP_HISTORY_STATUS[new_priority])
is_status_resolved = status == GroupStatus.RESOLVED
is_status_unresolved = status == GroupStatus.UNRESOLVED
# The open period is only updated when a group is resolved or reopened. We don't want to
# update the open period when a group transitions between different substatuses within UNRESOLVED.
if is_status_resolved:
update_group_open_period(
group=group,
new_status=GroupStatus.RESOLVED,
resolution_time=activity.datetime,
resolution_activity=activity,
)
elif is_status_unresolved and should_reopen_open_period[group.id]:
update_group_open_period(
group=group,
new_status=GroupStatus.UNRESOLVED,
)
should_update_incident = is_status_resolved or (
is_status_unresolved and should_reopen_open_period[group.id]
)
# TODO (aci cleanup): remove this once we've deprecated the incident model
if group.type == MetricIssue.type_id and should_update_incident:
if detector_id is None:
logger.error(
"Call to update metric issue status missing detector ID",
extra={"group_id": group.id},
)
continue
update_incident_based_on_open_period_status_change(group, status)
def from_share_id(self, share_id: str) -> Group:
if not share_id or len(share_id) != 32:
raise Group.DoesNotExist
from sentry.models.groupshare import GroupShare
return self.get(id__in=GroupShare.objects.filter(uuid=share_id).values_list("group_id")[:1])
def filter_to_team(self, team):
from sentry.models.groupassignee import GroupAssignee
from sentry.models.project import Project
project_list = Project.objects.get_for_team_ids(team_ids=[team.id])
user_ids = list(team.member_set.values_list("user_id", flat=True))
assigned_groups = (
GroupAssignee.objects.filter(team=team)
.union(GroupAssignee.objects.filter(user_id__in=user_ids))
.values_list("group_id", flat=True)
)
return self.filter(
project__in=project_list,
id__in=assigned_groups,
)
def get_issues_mapping(
self,
group_ids: Iterable[int],
project_ids: Sequence[int],
organization: Organization,
) -> Mapping[int, str | None]:
"""Create a dictionary of group_ids to their qualified_short_ids."""
return {
i.id: i.qualified_short_id
for i in self.filter(
id__in=group_ids,
project_id__in=project_ids,
project__organization=organization,
)
}
@region_silo_model
| GroupManager |
python | django-import-export__django-import-export | tests/core/models.py | {
"start": 3885,
"end": 3976
} | class ____(models.Model):
role = models.ForeignKey(Role, on_delete=models.CASCADE)
| Person |
python | pytorch__pytorch | torch/distributed/_functional_collectives.py | {
"start": 23452,
"end": 32116
} | class ____(torch.Tensor):
r"""
A Tensor wrapper subclass that is used to trigger a call to wait
prior to first use of the underlying tensor.
Use it inside functional collective pytorch wrappers like the following:
def functional_collective(self, group, tag):
tag, rankset, group_size = _expand_group(group, tag)
tensor = torch.ops.c10d_functional.{collective}(self, tag, rankset, group_size)
return _maybe_wrap_tensor(tensor)
"""
elem: torch.Tensor
completed: bool
__slots__ = ["elem", "completed"]
@staticmethod
def __new__(cls, elem: torch.Tensor):
r = torch.Tensor._make_wrapper_subclass(
cls,
elem.size(),
strides=elem.stride(),
storage_offset=elem.storage_offset(),
dtype=elem.dtype,
layout=elem.layout,
device=elem.device,
requires_grad=elem.requires_grad,
)
r.elem = elem
r.completed = False
return r
def __tensor_flatten__(self):
return ["elem"], None
def tolist(self):
return self.trigger_wait().tolist()
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
if meta is not None:
raise AssertionError(
"meta must be None for AsyncCollectiveTensor unflatten"
)
elem = inner_tensors["elem"]
return AsyncCollectiveTensor(elem)
def __coerce_same_metadata_as_tangent__(
self, expected_metadata: Any, expected_type: Optional[type] = None
):
if expected_type is not torch.Tensor:
return None
return self.trigger_wait()
def __repr__(self) -> str: # type: ignore[override]
return f"AsyncCollectiveTensor({self.trigger_wait()})"
def trigger_wait(self):
if not self.completed:
out = wait_tensor(self.elem)
self.completed = True
return out
else:
return self.elem
def wait(self) -> torch.Tensor:
return wait_tensor(self.elem)
def _get_acs_underlying_tensor(self):
"""This method enables _functional_collectives_impl to test if a tensor is an ACS"""
return self.elem
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None): # type: ignore[override]
if func is torch.ops.aten.view.default:
# Fast handle aten.view as a lot of view related op goes to aten.view
# eventually, this avoids pytree slowdown
# pyrefly: ignore [index-error]
res = func(args[0].elem, args[1])
wrapper_res = AsyncCollectiveTensor(res)
return wrapper_res
is_view_op = _is_view_op(func)
def unwrap(e: AsyncCollectiveTensor):
# wait_tensor is idepotent and will do stream sync only once
if not is_view_op:
return e.trigger_wait()
return e.elem
def wrap(e: torch.Tensor):
# wait_tensor is idepotent and will do stream sync only once
if isinstance(e, AsyncCollectiveTensor):
raise AssertionError(
"Cannot wrap an AsyncCollectiveTensor inside another AsyncCollectiveTensor"
)
res = AsyncCollectiveTensor(e)
return res
unwrapped_args = tree_map_only(AsyncCollectiveTensor, unwrap, args)
unwrapped_kwargs = tree_map_only(AsyncCollectiveTensor, unwrap, kwargs)
# we don't wrap the result as it doesn't need to be waited on.
out = func(*unwrapped_args, **unwrapped_kwargs)
# View ops dont require a sync, so we should re-wrap the outputs.
if is_view_op:
out = tree_map_only(torch.Tensor, wrap, out)
return out
def numpy(self): # type: ignore[override]
return self.wait().numpy()
"""
Utils and infrastructure for tracing support
"""
def _expand_group(group: RANK_TYPES, tag: str = "") -> tuple[str, list[int], int]:
"""
_expand_group desugars the different RANK_TYPES types into a canonical format that is traceable.
By having this be part of the explicit eager codepath, we avoid having to specialize behavior inside
torchdynamo and can still interoperate with processgroup objects or other untraceable forms.
"""
# had to define this hack _inside_ expand_group to avoid
# graph_break [('torch.* op returned non-Tensor int
# caused by 'cast_*` functions being treated as 'torch.*' ops (iiuc)
if TYPE_CHECKING:
def cast_listlistint(x):
return cast(list[list[int]], x)
def cast_listint(x):
return cast(list[int], x)
else:
# fake cast op for use at runtime since dynamo doesn't support real cast
# also, dynamo didn't like encountering 'typing' objects ()
# NotImplementedError: argument of type: <class 'typing._GenericAlias'>
def cast_listlistint(x):
return x
def cast_listint(x):
return x
rankset: list[int]
if isinstance(group, list):
if isinstance(group[0], list):
nested_list = cast_listlistint(group)
rankset = []
group_size = -1
for rs in nested_list:
rankset.extend(rs)
if group_size != -1 and group_size != len(rs):
raise ValueError(
f"group sizes must be identical found {group_size} and {len(rs)}"
)
group_size = len(rs)
else:
rankset = cast_listint(group)
group_size = len(rankset)
elif isinstance(group, dist.ProcessGroup):
rankset = dist.get_process_group_ranks(group)
group_size = len(rankset)
tag = tag or c10d._get_group_tag(group)
elif isinstance(group, DeviceMesh):
if group.ndim != 1:
raise AssertionError(
"Only 1D mesh is supported, pass in (DeviceMesh, int) together if mesh > 1D"
)
# TODO: it should run collective in the whole mesh instead of dim 0
pg = group.get_group()
rankset = dist.get_process_group_ranks(pg)
group_size = len(rankset)
tag = tag or c10d._get_group_tag(pg)
elif isinstance(group, tuple):
if (
len(group) == 2
and isinstance(group[0], DeviceMesh)
and isinstance(group[1], int)
):
dmesh = group[0]
dim = group[1]
pg = dmesh.get_group(dim)
rankset = dist.get_process_group_ranks(pg)
group_size = len(rankset)
tag = tag or c10d._get_group_tag(pg)
else:
raise ValueError("Invalid tuple for group must be (DeviceMesh, int)")
else:
raise ValueError(
"Invalid type for group, must be one of List, Processgroup, DeviceMesh or (DeviceMesh, int)."
)
return (tag, rankset, group_size)
def _resolve_group_name(group: RANK_TYPES, tag: str = "") -> str:
"""
Given group in RANK_TYPES, return the group name.
"""
# `tag` will be deprecated. See details in:
# https://github.com/pytorch/pytorch/issues/93173#issuecomment-1907095208
if isinstance(group, dist.ProcessGroup):
return group.group_name
elif isinstance(group, str):
return group
elif isinstance(group, DeviceMesh):
if group.ndim != 1:
raise AssertionError(
"Only 1D mesh is supported, pass in (DeviceMesh, int) together if mesh > 1D"
)
return group._dim_group_names[0]
elif isinstance(group, tuple):
if (
len(group) == 2
and isinstance(group[0], DeviceMesh)
and isinstance(group[1], int)
):
dmesh = group[0]
dim = group[1]
return dmesh._dim_group_names[dim]
else:
raise ValueError("Invalid tuple for group must be (DeviceMesh, int)")
elif isinstance(group, list):
if not is_torchdynamo_compiling():
warnings.warn(
"The combination of ranks + tag as process group "
"identifier has been deprecated. Please switch to "
"using ProcessGroup, DeviceMesh, or group name instead.",
FutureWarning,
stacklevel=3,
)
# pyrefly: ignore [redundant-cast]
return c10d._resolve_group_name_by_ranks_and_tag(cast(list[int], group), tag)
else:
raise ValueError(f"Unsupported group type: {type(group)}, {group}")
| AsyncCollectiveTensor |
python | doocs__leetcode | lcof2/剑指 Offer II 038. 每日温度/Solution2.py | {
"start": 0,
"end": 391
} | class ____:
def dailyTemperatures(self, temperatures: List[int]) -> List[int]:
n = len(temperatures)
stk = []
ans = [0] * n
for i in range(n - 1, -1, -1):
while stk and temperatures[stk[-1]] <= temperatures[i]:
stk.pop()
if stk:
ans[i] = stk[-1] - i
stk.append(i)
return ans
| Solution |
python | scrapy__scrapy | scrapy/spidermiddlewares/depth.py | {
"start": 660,
"end": 3153
} | class ____(BaseSpiderMiddleware):
crawler: Crawler
def __init__( # pylint: disable=super-init-not-called
self,
maxdepth: int,
stats: StatsCollector,
verbose_stats: bool = False,
prio: int = 1,
):
self.maxdepth = maxdepth
self.stats = stats
self.verbose_stats = verbose_stats
self.prio = prio
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
settings = crawler.settings
maxdepth = settings.getint("DEPTH_LIMIT")
verbose = settings.getbool("DEPTH_STATS_VERBOSE")
prio = settings.getint("DEPTH_PRIORITY")
assert crawler.stats
o = cls(maxdepth, crawler.stats, verbose, prio)
o.crawler = crawler
return o
@_warn_spider_arg
def process_spider_output(
self, response: Response, result: Iterable[Any], spider: Spider | None = None
) -> Iterable[Any]:
self._init_depth(response)
yield from super().process_spider_output(response, result)
@_warn_spider_arg
async def process_spider_output_async(
self,
response: Response,
result: AsyncIterator[Any],
spider: Spider | None = None,
) -> AsyncIterator[Any]:
self._init_depth(response)
async for o in super().process_spider_output_async(response, result):
yield o
def _init_depth(self, response: Response) -> None:
# base case (depth=0)
if "depth" not in response.meta:
response.meta["depth"] = 0
if self.verbose_stats:
self.stats.inc_value("request_depth_count/0")
def get_processed_request(
self, request: Request, response: Response | None
) -> Request | None:
if response is None:
# start requests
return request
depth = response.meta["depth"] + 1
request.meta["depth"] = depth
if self.prio:
request.priority -= depth * self.prio
if self.maxdepth and depth > self.maxdepth:
logger.debug(
"Ignoring link (depth > %(maxdepth)d): %(requrl)s ",
{"maxdepth": self.maxdepth, "requrl": request.url},
extra={"spider": self.crawler.spider},
)
return None
if self.verbose_stats:
self.stats.inc_value(f"request_depth_count/{depth}")
self.stats.max_value("request_depth_max", depth)
return request
| DepthMiddleware |
python | matplotlib__matplotlib | tools/gh_api.py | {
"start": 402,
"end": 9526
} | class ____(dict):
"""Dictionary with attribute access to names."""
def __getattr__(self, name):
try:
return self[name]
except KeyError as err:
raise AttributeError(name) from err
def __setattr__(self, name, val):
self[name] = val
token = None
def get_auth_token():
global token
if token is not None:
return token
try:
with open(os.path.join(os.path.expanduser('~'), '.ghoauth')) as f:
token, = f
return token
except Exception:
pass
import keyring
token = keyring.get_password('github', fake_username)
if token is not None:
return token
print("Please enter your github username and password. These are not "
"stored, only used to get an oAuth token. You can revoke this at "
"any time on GitHub.")
user = input("Username: ")
pw = getpass.getpass("Password: ")
auth_request = {
"scopes": [
"public_repo",
"gist"
],
"note": "IPython tools",
"note_url": "https://github.com/ipython/ipython/tree/master/tools",
}
response = requests.post('https://api.github.com/authorizations',
auth=(user, pw), data=json.dumps(auth_request))
response.raise_for_status()
token = json.loads(response.text)['token']
keyring.set_password('github', fake_username, token)
return token
def make_auth_header():
return {'Authorization': 'token ' + get_auth_token().replace("\n","")}
def post_issue_comment(project, num, body):
url = f'https://api.github.com/repos/{project}/issues/{num}/comments'
payload = json.dumps({'body': body})
requests.post(url, data=payload, headers=make_auth_header())
def post_gist(content, description='', filename='file', auth=False):
"""Post some text to a Gist, and return the URL."""
post_data = json.dumps({
"description": description,
"public": True,
"files": {
filename: {
"content": content
}
}
}).encode('utf-8')
headers = make_auth_header() if auth else {}
response = requests.post("https://api.github.com/gists", data=post_data, headers=headers)
response.raise_for_status()
response_data = json.loads(response.text)
return response_data['html_url']
def get_pull_request(project, num, auth=False):
"""Return the pull request info for a given PR number."""
url = f"https://api.github.com/repos/{project}/pulls/{num}"
if auth:
header = make_auth_header()
else:
header = None
print("fetching %s" % url, file=sys.stderr)
response = requests.get(url, headers=header)
response.raise_for_status()
return json.loads(response.text, object_hook=Obj)
def get_pull_request_files(project, num, auth=False):
"""get list of files in a pull request"""
url = f"https://api.github.com/repos/{project}/pulls/{num}/files"
if auth:
header = make_auth_header()
else:
header = None
return get_paged_request(url, headers=header)
element_pat = re.compile(r'<(.+?)>')
rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
def get_paged_request(url, headers=None, **params):
"""get a full list, handling APIv3's paging"""
results = []
params.setdefault("per_page", 100)
while True:
if '?' in url:
params = None
print(f"fetching {url}", file=sys.stderr)
else:
print(f"fetching {url} with {params}", file=sys.stderr)
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
results.extend(response.json())
if 'next' in response.links:
url = response.links['next']['url']
else:
break
return results
def get_pulls_list(project, auth=False, **params):
"""get pull request list"""
params.setdefault("state", "closed")
url = f"https://api.github.com/repos/{project}/pulls"
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_issues_list(project, auth=False, **params):
"""get issues list"""
params.setdefault("state", "closed")
url = f"https://api.github.com/repos/{project}/issues"
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_milestones(project, auth=False, **params):
params.setdefault('state', 'all')
url = f"https://api.github.com/repos/{project}/milestones"
if auth:
headers = make_auth_header()
else:
headers = None
milestones = get_paged_request(url, headers=headers, **params)
return milestones
def get_milestone_id(project, milestone, auth=False, **params):
milestones = get_milestones(project, auth=auth, **params)
for mstone in milestones:
if mstone['title'] == milestone:
return mstone['number']
raise ValueError("milestone %s not found" % milestone)
def is_pull_request(issue):
"""Return True if the given issue is a pull request."""
return bool(issue.get('pull_request', {}).get('html_url', None))
def get_authors(pr):
print("getting authors for #%i" % pr['number'], file=sys.stderr)
h = make_auth_header()
r = requests.get(pr['commits_url'], headers=h)
r.raise_for_status()
commits = r.json()
authors = []
for commit in commits:
author = commit['commit']['author']
authors.append(f"{author['name']} <{author['email']}>")
return authors
# encode_multipart_formdata is from urllib3.filepost
# The only change is to iter_fields, to enforce S3's required key ordering
def iter_fields(fields):
fields = fields.copy()
for key in [
'key', 'acl', 'Filename', 'success_action_status',
'AWSAccessKeyId', 'Policy', 'Signature', 'Content-Type', 'file']:
yield key, fields.pop(key)
yield from fields.items()
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data mime format.
:param fields:
Dictionary of fields or list of (key, value) field tuples. The key is
treated as the field name, and the value as the body of the form-data
bytes. If the value is a tuple of two elements, then the first element
is treated as the filename of the form-data section.
Field names and filenames must be str.
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
# copy requests imports in here:
from io import BytesIO
from requests.packages.urllib3.filepost import (
choose_boundary, writer, b, get_content_type
)
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for fieldname, value in iter_fields(fields):
body.write(b('--%s\r\n' % (boundary)))
if isinstance(value, tuple):
filename, data = value
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
body.write(b('Content-Type: %s\r\n\r\n' %
(get_content_type(filename))))
else:
data = value
writer(body).write('Content-Disposition: form-data; name="%s"\r\n'
% (fieldname))
body.write(b'Content-Type: text/plain\r\n\r\n')
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, str):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = b('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
def post_download(project, filename, name=None, description=""):
"""Upload a file to the GitHub downloads area"""
if name is None:
name = os.path.basename(filename)
with open(filename, 'rb') as f:
filedata = f.read()
url = f"https://api.github.com/repos/{project}/downloads"
payload = json.dumps(dict(name=name, size=len(filedata),
description=description))
response = requests.post(url, data=payload, headers=make_auth_header())
response.raise_for_status()
reply = json.loads(response.content)
s3_url = reply['s3_url']
fields = dict(
key=reply['path'],
acl=reply['acl'],
success_action_status=201,
Filename=reply['name'],
AWSAccessKeyId=reply['accesskeyid'],
Policy=reply['policy'],
Signature=reply['signature'],
file=(reply['name'], filedata),
)
fields['Content-Type'] = reply['mime_type']
data, content_type = encode_multipart_formdata(fields)
s3r = requests.post(s3_url, data=data, headers={'Content-Type': content_type})
return s3r
| Obj |
python | qdrant__qdrant-client | tests/congruence_tests/test_scroll.py | {
"start": 341,
"end": 2817
} | class ____:
@classmethod
def scroll_all(cls, client: QdrantBase) -> list[models.Record]:
all_records = []
records, next_page = client.scroll(
collection_name=COLLECTION_NAME,
limit=10,
with_payload=True,
)
all_records.extend(records)
while next_page:
records, next_page = client.scroll(
collection_name=COLLECTION_NAME,
limit=20,
offset=next_page,
with_payload=True,
)
all_records.extend(records)
return all_records
def test_simple_search() -> None:
fixture_points = generate_fixtures(200)
scroller = TestSimpleScroller()
local_client = init_local()
init_client(local_client, fixture_points)
remote_client = init_remote()
init_client(remote_client, fixture_points)
compare_client_results(local_client, remote_client, scroller.scroll_all)
def test_simple_sparse_scroll() -> None:
fixture_points = generate_sparse_fixtures(200)
local_client = init_local()
init_client(local_client, fixture_points, sparse_vectors_config=sparse_vectors_config)
remote_client = init_remote()
init_client(remote_client, fixture_points, sparse_vectors_config=sparse_vectors_config)
scroller = TestSimpleScroller()
compare_client_results(local_client, remote_client, scroller.scroll_all)
def test_mixed_ids() -> None:
fixture_points = generate_fixtures(100, random_ids=True) + generate_fixtures(
100, random_ids=False
)
random.shuffle(fixture_points)
scroller = TestSimpleScroller()
local_client = init_local()
init_client(local_client, fixture_points)
remote_client = init_remote()
init_client(remote_client, fixture_points)
compare_client_results(local_client, remote_client, scroller.scroll_all)
def test_sparse_mixed_ids() -> None:
fixture_points = generate_sparse_fixtures(100, random_ids=True) + generate_sparse_fixtures(
100, random_ids=False
)
random.shuffle(fixture_points)
scroller = TestSimpleScroller()
local_client = init_local()
init_client(local_client, fixture_points, sparse_vectors_config=sparse_vectors_config)
remote_client = init_remote()
init_client(remote_client, fixture_points, sparse_vectors_config=sparse_vectors_config)
compare_client_results(local_client, remote_client, scroller.scroll_all)
| TestSimpleScroller |
python | walkccc__LeetCode | solutions/1966. Binary Searchable Numbers in an Unsorted Array/1966.py | {
"start": 0,
"end": 600
} | class ____:
def binarySearchableNumbers(self, nums: list[int]) -> int:
n = len(nums)
# prefixMaxs[i] := max(nums[0..i))
prefixMaxs = [0] * n
# suffixMins[i] := min(nums[i + 1..n))
suffixMins = [0] * n
# Fill in `prefixMaxs`.
prefixMaxs[0] = -math.inf
for i in range(1, n):
prefixMaxs[i] = max(prefixMaxs[i - 1], nums[i - 1])
# Fill in `suffixMins`.
suffixMins[n - 1] = math.inf
for i in range(n - 2, -1, -1):
suffixMins[i] = min(suffixMins[i + 1], nums[i + 1])
return sum(prefixMaxs[i] < nums[i] < suffixMins[i] for i in range(n))
| Solution |
python | python__mypy | mypy/dmypy/client.py | {
"start": 8774,
"end": 25061
} | class ____(Exception):
"""Exception raised when there is something wrong with the status file.
For example:
- No status file found
- Status file malformed
- Process whose pid is in the status file does not exist
"""
def main(argv: list[str]) -> None:
"""The code is top-down."""
check_python_version("dmypy")
# set recursion limit consistent with mypy/main.py
sys.setrecursionlimit(RECURSION_LIMIT)
args = parser.parse_args(argv)
if not args.action:
parser.print_usage()
else:
try:
args.action(args)
except BadStatus as err:
fail(err.args[0])
except Exception:
# We do this explicitly to avoid exceptions percolating up
# through mypy.api invocations
traceback.print_exc()
sys.exit(2)
def fail(msg: str) -> NoReturn:
print(msg, file=sys.stderr)
sys.exit(2)
ActionFunction = Callable[[argparse.Namespace], None]
def action(subparser: argparse.ArgumentParser) -> Callable[[ActionFunction], ActionFunction]:
"""Decorator to tie an action function to a subparser."""
def register(func: ActionFunction) -> ActionFunction:
subparser.set_defaults(action=func)
return func
return register
# Action functions (run in client from command line).
@action(start_parser)
def do_start(args: argparse.Namespace) -> None:
"""Start daemon (it must not already be running).
This is where mypy flags are set from the command line.
Setting flags is a bit awkward; you have to use e.g.:
dmypy start -- --strict
since we don't want to duplicate mypy's huge list of flags.
"""
try:
get_status(args.status_file)
except BadStatus:
# Bad or missing status file or dead process; good to start.
pass
else:
fail("Daemon is still alive")
start_server(args)
@action(restart_parser)
def do_restart(args: argparse.Namespace) -> None:
"""Restart daemon (it may or may not be running; but not hanging).
We first try to stop it politely if it's running. This also sets
mypy flags from the command line (see do_start()).
"""
restart_server(args)
def restart_server(args: argparse.Namespace, allow_sources: bool = False) -> None:
"""Restart daemon (it may or may not be running; but not hanging)."""
try:
do_stop(args)
except BadStatus:
# Bad or missing status file or dead process; good to start.
pass
start_server(args, allow_sources)
def start_server(args: argparse.Namespace, allow_sources: bool = False) -> None:
"""Start the server from command arguments and wait for it."""
# Lazy import so this import doesn't slow down other commands.
from mypy.dmypy_server import daemonize, process_start_options
start_options = process_start_options(args.flags, allow_sources)
if daemonize(start_options, args.status_file, timeout=args.timeout, log_file=args.log_file):
sys.exit(2)
wait_for_server(args.status_file)
def wait_for_server(status_file: str, timeout: float = 5.0) -> None:
"""Wait until the server is up.
Exit if it doesn't happen within the timeout.
"""
endtime = time.time() + timeout
while time.time() < endtime:
try:
data = read_status(status_file)
except BadStatus:
# If the file isn't there yet, retry later.
time.sleep(0.1)
continue
# If the file's content is bogus or the process is dead, fail.
check_status(data)
print("Daemon started")
return
fail("Timed out waiting for daemon to start")
@action(run_parser)
def do_run(args: argparse.Namespace) -> None:
"""Do a check, starting (or restarting) the daemon as necessary
Restarts the daemon if the running daemon reports that it is
required (due to a configuration change, for example).
Setting flags is a bit awkward; you have to use e.g.:
dmypy run -- --strict a.py b.py ...
since we don't want to duplicate mypy's huge list of flags.
(The -- is only necessary if flags are specified.)
"""
if not is_running(args.status_file):
# Bad or missing status file or dead process; good to start.
start_server(args, allow_sources=True)
t0 = time.time()
response = request(
args.status_file,
"run",
version=__version__,
args=args.flags,
export_types=args.export_types,
)
# If the daemon signals that a restart is necessary, do it
if "restart" in response:
print(f"Restarting: {response['restart']}")
restart_server(args, allow_sources=True)
response = request(
args.status_file,
"run",
version=__version__,
args=args.flags,
export_types=args.export_types,
)
t1 = time.time()
response["roundtrip_time"] = t1 - t0
check_output(response, args.verbose, args.junit_xml, args.perf_stats_file)
@action(status_parser)
def do_status(args: argparse.Namespace) -> None:
"""Print daemon status.
This verifies that it is responsive to requests.
"""
status = read_status(args.status_file)
if args.verbose:
show_stats(status)
# Both check_status() and request() may raise BadStatus,
# which will be handled by main().
check_status(status)
response = request(
args.status_file, "status", fswatcher_dump_file=args.fswatcher_dump_file, timeout=5
)
if args.verbose or "error" in response:
show_stats(response)
if "error" in response:
fail(f"Daemon may be busy processing; if this persists, consider {sys.argv[0]} kill")
print("Daemon is up and running")
@action(stop_parser)
def do_stop(args: argparse.Namespace) -> None:
"""Stop daemon via a 'stop' request."""
# May raise BadStatus, which will be handled by main().
response = request(args.status_file, "stop", timeout=5)
if "error" in response:
show_stats(response)
fail(f"Daemon may be busy processing; if this persists, consider {sys.argv[0]} kill")
else:
print("Daemon stopped")
@action(kill_parser)
def do_kill(args: argparse.Namespace) -> None:
"""Kill daemon process with SIGKILL."""
pid, _ = get_status(args.status_file)
try:
kill(pid)
except OSError as err:
fail(str(err))
else:
print("Daemon killed")
@action(check_parser)
def do_check(args: argparse.Namespace) -> None:
"""Ask the daemon to check a list of files."""
t0 = time.time()
response = request(args.status_file, "check", files=args.files, export_types=args.export_types)
t1 = time.time()
response["roundtrip_time"] = t1 - t0
check_output(response, args.verbose, args.junit_xml, args.perf_stats_file)
@action(recheck_parser)
def do_recheck(args: argparse.Namespace) -> None:
"""Ask the daemon to recheck the previous list of files, with optional modifications.
If at least one of --remove or --update is given, the server will
update the list of files to check accordingly and assume that any other files
are unchanged. If none of these flags are given, the server will call stat()
on each file last checked to determine its status.
Files given in --update ought to exist. Files given in --remove need not exist;
if they don't they will be ignored.
The lists may be empty but oughtn't contain duplicates or overlap.
NOTE: The list of files is lost when the daemon is restarted.
"""
t0 = time.time()
if args.remove is not None or args.update is not None:
response = request(
args.status_file,
"recheck",
export_types=args.export_types,
remove=args.remove,
update=args.update,
)
else:
response = request(args.status_file, "recheck", export_types=args.export_types)
t1 = time.time()
response["roundtrip_time"] = t1 - t0
check_output(response, args.verbose, args.junit_xml, args.perf_stats_file)
@action(suggest_parser)
def do_suggest(args: argparse.Namespace) -> None:
"""Ask the daemon for a suggested signature.
This just prints whatever the daemon reports as output.
For now it may be closer to a list of call sites.
"""
response = request(
args.status_file,
"suggest",
function=args.function,
json=args.json,
callsites=args.callsites,
no_errors=args.no_errors,
no_any=args.no_any,
flex_any=args.flex_any,
use_fixme=args.use_fixme,
max_guesses=args.max_guesses,
)
check_output(response, verbose=False, junit_xml=None, perf_stats_file=None)
@action(inspect_parser)
def do_inspect(args: argparse.Namespace) -> None:
"""Ask daemon to print the type of an expression."""
response = request(
args.status_file,
"inspect",
show=args.show,
location=args.location,
verbosity=args.verbose,
limit=args.limit,
include_span=args.include_span,
include_kind=args.include_kind,
include_object_attrs=args.include_object_attrs,
union_attrs=args.union_attrs,
force_reload=args.force_reload,
)
check_output(response, verbose=False, junit_xml=None, perf_stats_file=None)
def check_output(
response: dict[str, Any], verbose: bool, junit_xml: str | None, perf_stats_file: str | None
) -> None:
"""Print the output from a check or recheck command.
Call sys.exit() unless the status code is zero.
"""
if os.name == "nt":
# Enable ANSI color codes for Windows cmd using this strange workaround
# ( see https://github.com/python/cpython/issues/74261 )
os.system("")
if "error" in response:
fail(response["error"])
try:
out, err, status_code = response["out"], response["err"], response["status"]
except KeyError:
fail(f"Response: {str(response)}")
sys.stdout.write(out)
sys.stdout.flush()
sys.stderr.write(err)
sys.stderr.flush()
if verbose:
show_stats(response)
if junit_xml:
# Lazy import so this import doesn't slow things down when not writing junit
from mypy.util import write_junit_xml
messages = (out + err).splitlines()
write_junit_xml(
response["roundtrip_time"],
bool(err),
{None: messages} if messages else {},
junit_xml,
response["python_version"],
response["platform"],
)
if perf_stats_file:
telemetry = response.get("stats", {})
with open(perf_stats_file, "w") as f:
json.dump(telemetry, f)
if status_code:
sys.exit(status_code)
def show_stats(response: Mapping[str, object]) -> None:
for key, value in sorted(response.items()):
if key in ("out", "err", "stdout", "stderr"):
# Special case text output to display just 40 characters of text
value = repr(value)[1:-1]
if len(value) > 50:
value = f"{value[:40]} ... {len(value)-40} more characters"
print("%-24s: %s" % (key, value))
continue
print("%-24s: %10s" % (key, "%.3f" % value if isinstance(value, float) else value))
@action(hang_parser)
def do_hang(args: argparse.Namespace) -> None:
"""Hang for 100 seconds, as a debug hack."""
print(request(args.status_file, "hang", timeout=1))
@action(daemon_parser)
def do_daemon(args: argparse.Namespace) -> None:
"""Serve requests in the foreground."""
# Lazy import so this import doesn't slow down other commands.
from mypy.dmypy_server import Server, process_start_options
if args.log_file:
sys.stdout = sys.stderr = open(args.log_file, "a", buffering=1)
fd = sys.stdout.fileno()
os.dup2(fd, 2)
os.dup2(fd, 1)
if args.options_data:
from mypy.options import Options
options_dict = pickle.loads(base64.b64decode(args.options_data))
options_obj = Options()
options = options_obj.apply_changes(options_dict)
else:
options = process_start_options(args.flags, allow_sources=False)
Server(options, args.status_file, timeout=args.timeout).serve()
@action(help_parser)
def do_help(args: argparse.Namespace) -> None:
"""Print full help (same as dmypy --help)."""
parser.print_help()
# Client-side infrastructure.
def request(
status_file: str, command: str, *, timeout: int | None = None, **kwds: object
) -> dict[str, Any]:
"""Send a request to the daemon.
Return the JSON dict with the response.
Raise BadStatus if there is something wrong with the status file
or if the process whose pid is in the status file has died.
Return {'error': <message>} if an IPC operation or receive()
raised OSError. This covers cases such as connection refused or
closed prematurely as well as invalid JSON received.
"""
response: dict[str, str] = {}
args = dict(kwds)
args["command"] = command
# Tell the server whether this request was initiated from a human-facing terminal,
# so that it can format the type checking output accordingly.
args["is_tty"] = sys.stdout.isatty() or should_force_color()
args["terminal_width"] = get_terminal_width()
_, name = get_status(status_file)
try:
with IPCClient(name, timeout) as client:
send(client, args)
final = False
while not final:
response = receive(client)
final = bool(response.pop("final", False))
# Display debugging output written to stdout/stderr in the server process for convenience.
# This should not be confused with "out" and "err" fields in the response.
# Those fields hold the output of the "check" command, and are handled in check_output().
stdout = response.pop("stdout", None)
if stdout:
sys.stdout.write(stdout)
stderr = response.pop("stderr", None)
if stderr:
sys.stderr.write(stderr)
except (OSError, IPCException) as err:
return {"error": str(err)}
# TODO: Other errors, e.g. ValueError, UnicodeError
return response
def get_status(status_file: str) -> tuple[int, str]:
"""Read status file and check if the process is alive.
Return (pid, connection_name) on success.
Raise BadStatus if something's wrong.
"""
data = read_status(status_file)
return check_status(data)
def check_status(data: dict[str, Any]) -> tuple[int, str]:
"""Check if the process is alive.
Return (pid, connection_name) on success.
Raise BadStatus if something's wrong.
"""
if "pid" not in data:
raise BadStatus("Invalid status file (no pid field)")
pid = data["pid"]
if not isinstance(pid, int):
raise BadStatus("pid field is not an int")
if not alive(pid):
raise BadStatus("Daemon has died")
if "connection_name" not in data:
raise BadStatus("Invalid status file (no connection_name field)")
connection_name = data["connection_name"]
if not isinstance(connection_name, str):
raise BadStatus("connection_name field is not a string")
return pid, connection_name
def read_status(status_file: str) -> dict[str, object]:
"""Read status file.
Raise BadStatus if the status file doesn't exist or contains
invalid JSON or the JSON is not a dict.
"""
if not os.path.isfile(status_file):
raise BadStatus("No status file found")
with open(status_file) as f:
try:
data = json.load(f)
except Exception as e:
raise BadStatus("Malformed status file (not JSON)") from e
if not isinstance(data, dict):
raise BadStatus("Invalid status file (not a dict)")
return data
def is_running(status_file: str) -> bool:
"""Check if the server is running cleanly"""
try:
get_status(status_file)
except BadStatus:
return False
return True
# Run main().
def console_entry() -> None:
main(sys.argv[1:])
| BadStatus |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_bytes.py | {
"start": 299,
"end": 485
} | class ____:
def __bytes__(self):
print("ruff") # [invalid-bytes-return]
# TODO: Once Ruff has better type checking
def return_bytes():
return "some string"
| BytesNoReturn |
python | sympy__sympy | sympy/stats/matrix_distributions.py | {
"start": 5088,
"end": 7087
} | class ____:
"""Returns the sample from pymc of the given distribution"""
def __new__(cls, dist, size, seed=None):
return cls._sample_pymc(dist, size, seed)
@classmethod
def _sample_pymc(cls, dist, size, seed):
"""Sample from PyMC."""
try:
import pymc
except ImportError:
import pymc3 as pymc
pymc_rv_map = {
'MatrixNormalDistribution': lambda dist: pymc.MatrixNormal('X',
mu=matrix2numpy(dist.location_matrix, float),
rowcov=matrix2numpy(dist.scale_matrix_1, float),
colcov=matrix2numpy(dist.scale_matrix_2, float),
shape=dist.location_matrix.shape),
'WishartDistribution': lambda dist: pymc.WishartBartlett('X',
nu=int(dist.n), S=matrix2numpy(dist.scale_matrix, float))
}
sample_shape = {
'WishartDistribution': lambda dist: dist.scale_matrix.shape,
'MatrixNormalDistribution' : lambda dist: dist.location_matrix.shape
}
dist_list = pymc_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
import logging
logging.getLogger("pymc").setLevel(logging.ERROR)
with pymc.Model():
pymc_rv_map[dist.__class__.__name__](dist)
samps = pymc.sample(draws=prod(size), chains=1, progressbar=False, random_seed=seed, return_inferencedata=False, compute_convergence_checks=False)['X']
return samps.reshape(size + sample_shape[dist.__class__.__name__](dist))
_get_sample_class_matrixrv = {
'scipy': SampleMatrixScipy,
'pymc3': SampleMatrixPymc,
'pymc': SampleMatrixPymc,
'numpy': SampleMatrixNumpy
}
################################################################################
#-------------------------Matrix Distribution----------------------------------#
################################################################################
| SampleMatrixPymc |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/supervised_learning/regression.py | {
"start": 2829,
"end": 4271
} | class ____(Regression):
"""Linear model.
Parameters:
-----------
n_iterations: float
The number of training iterations the algorithm will tune the weights for.
learning_rate: float
The step length that will be used when updating the weights.
gradient_descent: boolean
True or false depending if gradient descent should be used when training. If
false then we use batch optimization by least squares.
"""
def __init__(self, n_iterations=100, learning_rate=0.001, gradient_descent=True):
self.gradient_descent = gradient_descent
# No regularization
self.regularization = lambda x: 0
self.regularization.grad = lambda x: 0
super(LinearRegression, self).__init__(n_iterations=n_iterations,
learning_rate=learning_rate)
def fit(self, X, y):
# If not gradient descent => Least squares approximation of w
if not self.gradient_descent:
# Insert constant ones for bias weights
X = np.insert(X, 0, 1, axis=1)
# Calculate weights by least squares (using Moore-Penrose pseudoinverse)
U, S, V = np.linalg.svd(X.T.dot(X))
S = np.diag(S)
X_sq_reg_inv = V.dot(np.linalg.pinv(S)).dot(U.T)
self.w = X_sq_reg_inv.dot(X.T).dot(y)
else:
super(LinearRegression, self).fit(X, y)
| LinearRegression |
python | doocs__leetcode | solution/1200-1299/1249.Minimum Remove to Make Valid Parentheses/Solution.py | {
"start": 0,
"end": 581
} | class ____:
def minRemoveToMakeValid(self, s: str) -> str:
stk = []
x = 0
for c in s:
if c == ')' and x == 0:
continue
if c == '(':
x += 1
elif c == ')':
x -= 1
stk.append(c)
x = 0
ans = []
for c in stk[::-1]:
if c == '(' and x == 0:
continue
if c == ')':
x += 1
elif c == '(':
x -= 1
ans.append(c)
return ''.join(ans[::-1])
| Solution |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/triggers/glue.py | {
"start": 7181,
"end": 8652
} | class ____(AwsBaseWaiterTrigger):
"""
Trigger when a AWS Glue data quality evaluation run complete.
:param evaluation_run_id: The AWS Glue data quality ruleset evaluation run identifier.
:param waiter_delay: The amount of time in seconds to wait between attempts. (default: 60)
:param waiter_max_attempts: The maximum number of attempts to be made. (default: 75)
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
evaluation_run_id: str,
waiter_delay: int = 60,
waiter_max_attempts: int = 75,
aws_conn_id: str | None = "aws_default",
):
super().__init__(
serialized_fields={"evaluation_run_id": evaluation_run_id},
waiter_name="data_quality_ruleset_evaluation_run_complete",
waiter_args={"RunId": evaluation_run_id},
failure_message="AWS Glue data quality ruleset evaluation run failed.",
status_message="Status of AWS Glue data quality ruleset evaluation run is",
status_queries=["Status"],
return_key="evaluation_run_id",
return_value=evaluation_run_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return GlueDataQualityHook(aws_conn_id=self.aws_conn_id)
| GlueDataQualityRuleSetEvaluationRunCompleteTrigger |
python | django__django | tests/utils_tests/test_decorators.py | {
"start": 734,
"end": 1479
} | class ____:
def __init__(self, get_response):
self.get_response = get_response
def process_request(self, request):
request.process_request_reached = True
def process_view(self, request, view_func, view_args, view_kwargs):
request.process_view_reached = True
def process_template_response(self, request, response):
request.process_template_response_reached = True
return response
def process_response(self, request, response):
# This should never receive unrendered content.
request.process_response_content = response.content
request.process_response_reached = True
return response
full_dec = decorator_from_middleware(FullMiddleware)
| FullMiddleware |
python | pypa__pip | src/pip/_vendor/pygments/lexer.py | {
"start": 1104,
"end": 1457
} | class ____(type):
"""
This metaclass automagically converts ``analyse_text`` methods into
static methods which always return float values.
"""
def __new__(mcs, name, bases, d):
if 'analyse_text' in d:
d['analyse_text'] = make_analysator(d['analyse_text'])
return type.__new__(mcs, name, bases, d)
| LexerMeta |
python | mlflow__mlflow | mlflow/models/model.py | {
"start": 61006,
"end": 68196
} | class ____(NamedTuple):
requirements: Path
conda: Path
def get_model_requirements_files(resolved_uri: str) -> Files:
requirements_txt_file = _download_artifact_from_uri(
artifact_uri=append_to_uri_path(resolved_uri, _REQUIREMENTS_FILE_NAME)
)
conda_yaml_file = _download_artifact_from_uri(
artifact_uri=append_to_uri_path(resolved_uri, _CONDA_ENV_FILE_NAME)
)
return Files(
Path(requirements_txt_file),
Path(conda_yaml_file),
)
def update_model_requirements(
model_uri: str,
operation: Literal["add", "remove"],
requirement_list: list[str],
) -> None:
"""
Add or remove requirements from a model's conda.yaml and requirements.txt files.
The process involves downloading these two files from the model artifacts
(if they're non-local), updating them with the specified requirements,
and then overwriting the existing files. Should the artifact repository
associated with the model artifacts disallow overwriting, this function will
fail.
Note that model registry URIs (i.e. URIs in the form ``models:/``) are not
supported, as artifacts in the model registry are intended to be read-only.
If adding requirements, the function will overwrite any existing requirements
that overlap, or else append the new requirements to the existing list.
If removing requirements, the function will ignore any version specifiers,
and remove all the specified package names. Any requirements that are not
found in the existing files will be ignored.
Args:
model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
operation: The operation to perform. Must be one of "add" or "remove".
requirement_list: A list of requirements to add or remove from the model.
For example: ["numpy==1.20.3", "pandas>=1.3.3"]
"""
resolved_uri = model_uri
if ModelsArtifactRepository.is_models_uri(model_uri):
if not ModelsArtifactRepository._is_logged_model_uri(model_uri):
raise MlflowException(
f'Failed to set requirements on "{model_uri}". '
+ "Model URIs with the `models:/` scheme are not supported.",
INVALID_PARAMETER_VALUE,
)
resolved_uri = ModelsArtifactRepository.get_underlying_uri(model_uri)
elif RunsArtifactRepository.is_runs_uri(model_uri):
resolved_uri = RunsArtifactRepository.get_underlying_uri(model_uri)
_logger.info(f"Retrieving model requirements files from {resolved_uri}...")
local_paths = get_model_requirements_files(resolved_uri)
conda_yaml_path = local_paths.conda
requirements_txt_path = local_paths.requirements
old_conda_reqs = _get_requirements_from_file(conda_yaml_path)
old_requirements_reqs = _get_requirements_from_file(requirements_txt_path)
requirements = []
invalid_requirements = {}
for s in requirement_list:
try:
requirements.append(Requirement(s.strip().lower()))
except InvalidRequirement as e:
invalid_requirements[s] = e
if invalid_requirements:
raise MlflowException.invalid_parameter_value(
f"Found invalid requirements: {invalid_requirements}"
)
if operation == "add":
updated_conda_reqs = _add_or_overwrite_requirements(requirements, old_conda_reqs)
updated_requirements_reqs = _add_or_overwrite_requirements(
requirements, old_requirements_reqs
)
else:
updated_conda_reqs = _remove_requirements(requirements, old_conda_reqs)
updated_requirements_reqs = _remove_requirements(requirements, old_requirements_reqs)
_write_requirements_to_file(conda_yaml_path, updated_conda_reqs)
_write_requirements_to_file(requirements_txt_path, updated_requirements_reqs)
# just print conda reqs here to avoid log spam
# it should be the same as requirements.txt anyway
_logger.info(
"Done updating requirements!\n\n"
f"Old requirements:\n{pformat([str(req) for req in old_conda_reqs])}\n\n"
f"Updated requirements:\n{pformat(updated_conda_reqs)}\n"
)
_logger.info(f"Uploading updated requirements files to {resolved_uri}...")
_upload_artifact_to_uri(conda_yaml_path, resolved_uri)
_upload_artifact_to_uri(requirements_txt_path, resolved_uri)
__mlflow_model__ = None
def _validate_langchain_model(model):
from langchain_core.runnables.base import Runnable
from mlflow.models.utils import _validate_and_get_model_code_path
if isinstance(model, str):
return _validate_and_get_model_code_path(model, None)
if not isinstance(model, Runnable):
raise MlflowException.invalid_parameter_value(
"Model must be a Langchain Runnable type or path to a Langchain model, "
f"got {type(model)}"
)
return model
def _validate_llama_index_model(model):
from mlflow.llama_index.model import _validate_and_prepare_llama_index_model_or_path
return _validate_and_prepare_llama_index_model_or_path(model, None)
def set_model(model) -> None:
"""
When logging model as code, this function can be used to set the model object
to be logged.
Args:
model: The model object to be logged. Supported model types are:
- A Python function or callable object.
- A Langchain model or path to a Langchain model.
- A Llama Index model or path to a Llama Index model.
"""
from mlflow.pyfunc import PythonModel
if isinstance(model, str):
raise mlflow.MlflowException(SET_MODEL_ERROR)
if isinstance(model, PythonModel) or callable(model):
globals()["__mlflow_model__"] = model
return
for validate_function in [_validate_langchain_model, _validate_llama_index_model]:
try:
globals()["__mlflow_model__"] = validate_function(model)
return
except Exception:
pass
raise mlflow.MlflowException(SET_MODEL_ERROR)
def _update_active_model_id_based_on_mlflow_model(mlflow_model: Model):
"""
Update the current active model ID based on the provided MLflow model.
Only set the active model ID if it is not already set by the user.
This is useful for setting the active model ID when loading a model
to ensure traces generated are associated with the loaded model.
"""
if mlflow_model.model_id is None:
return
amc = _get_active_model_context()
# only set the active model if the model is not set by the user
if amc.model_id != mlflow_model.model_id and not amc.set_by_user:
_set_active_model_id(model_id=mlflow_model.model_id)
| Files |
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 2780,
"end": 3414
} | class ____(models.Model):
value_limit_field = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(10)])
length_limit_field = models.CharField(validators=[MinLengthValidator(3)], max_length=12)
blank_field = models.CharField(blank=True, max_length=10)
null_field = models.IntegerField(null=True)
default_field = models.IntegerField(default=0)
descriptive_field = models.IntegerField(help_text='Some help text', verbose_name='A label')
choices_field = models.CharField(max_length=100, choices=COLOR_CHOICES)
text_choices_field = models.TextField(choices=COLOR_CHOICES)
| FieldOptionsModel |
python | spack__spack | lib/spack/spack/builder.py | {
"start": 9054,
"end": 9224
} | class ____(
spack.phase_callbacks.PhaseCallbacksMeta,
spack.multimethod.MultiMethodMeta,
type(collections.abc.Sequence), # type: ignore
):
pass
| BuilderMeta |
python | PrefectHQ__prefect | src/prefect/server/schemas/actions.py | {
"start": 2435,
"end": 3446
} | class ____(ActionBaseModel):
active: bool = Field(
default=True, description="Whether or not the schedule is active."
)
schedule: schemas.schedules.SCHEDULE_TYPES = Field(
default=..., description="The schedule for the deployment."
)
max_scheduled_runs: Optional[PositiveInteger] = Field(
default=None,
description="The maximum number of scheduled runs for the schedule.",
)
parameters: dict[str, Any] = Field(
default_factory=dict, description="A dictionary of parameter value overrides."
)
slug: Optional[str] = Field(
default=None,
description="A unique identifier for the schedule.",
)
@field_validator("max_scheduled_runs")
@classmethod
def validate_max_scheduled_runs(
cls, v: PositiveInteger | None
) -> PositiveInteger | None:
return validate_schedule_max_scheduled_runs(
v, PREFECT_DEPLOYMENT_SCHEDULE_MAX_SCHEDULED_RUNS.value()
)
| DeploymentScheduleCreate |
python | pyca__cryptography | tests/test_fernet.py | {
"start": 1042,
"end": 6071
} | class ____:
@json_parametrize(
("secret", "now", "iv", "src", "token"),
"generate.json",
)
def test_generate(self, secret, now, iv, src, token, backend):
f = Fernet(secret.encode("ascii"), backend=backend)
actual_token = f._encrypt_from_parts(
src.encode("ascii"),
int(datetime.datetime.fromisoformat(now).timestamp()),
bytes(iv),
)
assert actual_token == token.encode("ascii")
@json_parametrize(
("secret", "now", "src", "ttl_sec", "token"),
"verify.json",
)
def test_verify(
self, secret, now, src, ttl_sec, token, backend, monkeypatch
):
# secret & token are both str
f = Fernet(secret.encode("ascii"), backend=backend)
current_time = int(datetime.datetime.fromisoformat(now).timestamp())
payload = f.decrypt_at_time(
token, # str
ttl=ttl_sec,
current_time=current_time,
)
assert payload == src.encode("ascii")
payload = f.decrypt_at_time(
token.encode("ascii"), # bytes
ttl=ttl_sec,
current_time=current_time,
)
assert payload == src.encode("ascii")
monkeypatch.setattr(time, "time", lambda: current_time)
payload = f.decrypt(token, ttl=ttl_sec) # str
assert payload == src.encode("ascii")
payload = f.decrypt(token.encode("ascii"), ttl=ttl_sec) # bytes
assert payload == src.encode("ascii")
@json_parametrize(("secret", "token", "now", "ttl_sec"), "invalid.json")
def test_invalid(self, secret, token, now, ttl_sec, backend, monkeypatch):
f = Fernet(secret.encode("ascii"), backend=backend)
current_time = int(datetime.datetime.fromisoformat(now).timestamp())
with pytest.raises(InvalidToken):
f.decrypt_at_time(
token.encode("ascii"),
ttl=ttl_sec,
current_time=current_time,
)
monkeypatch.setattr(time, "time", lambda: current_time)
with pytest.raises(InvalidToken):
f.decrypt(token.encode("ascii"), ttl=ttl_sec)
def test_invalid_start_byte(self, backend):
f = Fernet(base64.urlsafe_b64encode(b"\x00" * 32), backend=backend)
with pytest.raises(InvalidToken):
f.decrypt(base64.urlsafe_b64encode(b"\x81"))
def test_timestamp_too_short(self, backend):
f = Fernet(base64.urlsafe_b64encode(b"\x00" * 32), backend=backend)
with pytest.raises(InvalidToken):
f.decrypt(base64.urlsafe_b64encode(b"\x80abc"))
def test_non_base64_token(self, backend):
f = Fernet(base64.urlsafe_b64encode(b"\x00" * 32), backend=backend)
with pytest.raises(InvalidToken):
f.decrypt(b"\x00")
with pytest.raises(InvalidToken):
f.decrypt("nonsensetoken")
def test_invalid_types(self, backend):
f = Fernet(base64.urlsafe_b64encode(b"\x00" * 32), backend=backend)
with pytest.raises(TypeError):
f.encrypt("") # type: ignore[arg-type]
with pytest.raises(TypeError):
f.decrypt(12345) # type: ignore[arg-type]
def test_timestamp_ignored_no_ttl(self, monkeypatch, backend):
f = Fernet(base64.urlsafe_b64encode(b"\x00" * 32), backend=backend)
pt = b"encrypt me"
token = f.encrypt(pt)
monkeypatch.setattr(time, "time", pretend.raiser(ValueError))
assert f.decrypt(token, ttl=None) == pt
def test_ttl_required_in_decrypt_at_time(self, backend):
f = Fernet(base64.urlsafe_b64encode(b"\x00" * 32), backend=backend)
pt = b"encrypt me"
token = f.encrypt(pt)
with pytest.raises(ValueError):
f.decrypt_at_time(
token,
ttl=None, # type: ignore[arg-type]
current_time=int(time.time()),
)
@pytest.mark.parametrize("message", [b"", b"Abc!", b"\x00\xff\x00\x80"])
def test_roundtrips(self, message, backend):
f = Fernet(Fernet.generate_key(), backend=backend)
assert f.decrypt(f.encrypt(message)) == message
@pytest.mark.parametrize("key", [base64.urlsafe_b64encode(b"abc"), b"abc"])
def test_bad_key(self, backend, key):
with pytest.raises(ValueError):
Fernet(key, backend=backend)
def test_extract_timestamp(self, backend):
f = Fernet(base64.urlsafe_b64encode(b"\x00" * 32), backend=backend)
current_time = 1526138327
token = f.encrypt_at_time(b"encrypt me", current_time)
assert f.extract_timestamp(token) == current_time
assert f.extract_timestamp(token.decode("ascii")) == current_time
with pytest.raises(InvalidToken):
f.extract_timestamp(b"nonsensetoken")
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES(b"\x00" * 32), modes.CBC(b"\x00" * 16)
),
skip_message="Does not support AES CBC",
)
| TestFernet |
python | spyder-ide__spyder | spyder/api/utils.py | {
"start": 1668,
"end": 1970
} | class ____(property):
"""
Decorator to declare class constants as properties that require additional
computation.
Taken from: https://stackoverflow.com/a/7864317/438386
"""
def __get__(self, cls, owner):
return classmethod(self.fget).__get__(None, owner)()
| classproperty |
python | huggingface__transformers | src/transformers/models/siglip2/modular_siglip2.py | {
"start": 4912,
"end": 4975
} | class ____(SiglipVisionModelOutput):
pass
| Siglip2VisionOutput |
python | django__django | tests/admin_widgets/tests.py | {
"start": 26401,
"end": 32208
} | class ____(TestCase):
def test_render(self):
band = Band.objects.create(name="Linkin Park")
band.album_set.create(
name="Hybrid Theory", cover_art=r"albums\hybrid_theory.jpg"
)
rel_uuid = Album._meta.get_field("band").remote_field
w = widgets.ForeignKeyRawIdWidget(rel_uuid, widget_admin_site)
self.assertHTMLEqual(
w.render("test", band.uuid, attrs={}),
'<div><input type="text" name="test" value="%(banduuid)s" '
'class="vForeignKeyRawIdAdminField vUUIDField">'
'<a href="/admin_widgets/band/?_to_field=uuid" class="related-lookup" '
'id="lookup_id_test" title="Lookup"></a> <strong>'
'<a href="/admin_widgets/band/%(bandpk)s/change/">Linkin Park</a>'
"</strong></div>" % {"banduuid": band.uuid, "bandpk": band.pk},
)
rel_id = ReleaseEvent._meta.get_field("album").remote_field
w = widgets.ForeignKeyRawIdWidget(rel_id, widget_admin_site)
self.assertHTMLEqual(
w.render("test", None, attrs={}),
'<div><input type="text" name="test" class="vForeignKeyRawIdAdminField">'
'<a href="/admin_widgets/album/?_to_field=id" class="related-lookup" '
'id="lookup_id_test" title="Lookup"></a></div>',
)
def test_relations_to_non_primary_key(self):
# ForeignKeyRawIdWidget works with fields which aren't related to
# the model's primary key.
apple = Inventory.objects.create(barcode=86, name="Apple")
Inventory.objects.create(barcode=22, name="Pear")
core = Inventory.objects.create(barcode=87, name="Core", parent=apple)
rel = Inventory._meta.get_field("parent").remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render("test", core.parent_id, attrs={}),
'<div><input type="text" name="test" value="86" '
'class="vForeignKeyRawIdAdminField">'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong><a href="/admin_widgets/inventory/%(pk)s/change/">'
"Apple</a></strong></div>" % {"pk": apple.pk},
)
def test_fk_related_model_not_in_admin(self):
# FK to a model not registered with admin site. Raw ID widget should
# have no magnifying glass link. See #16542
big_honeycomb = Honeycomb.objects.create(location="Old tree")
big_honeycomb.bee_set.create()
rel = Bee._meta.get_field("honeycomb").remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render("honeycomb_widget", big_honeycomb.pk, attrs={}),
'<input type="text" name="honeycomb_widget" value="%(hcombpk)s">'
" <strong>%(hcomb)s</strong>"
% {"hcombpk": big_honeycomb.pk, "hcomb": big_honeycomb},
)
def test_fk_to_self_model_not_in_admin(self):
# FK to self, not registered with admin site. Raw ID widget should have
# no magnifying glass link. See #16542
subject1 = Individual.objects.create(name="Subject #1")
Individual.objects.create(name="Child", parent=subject1)
rel = Individual._meta.get_field("parent").remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render("individual_widget", subject1.pk, attrs={}),
'<input type="text" name="individual_widget" value="%(subj1pk)s">'
" <strong>%(subj1)s</strong>"
% {"subj1pk": subject1.pk, "subj1": subject1},
)
def test_proper_manager_for_label_lookup(self):
# see #9258
rel = Inventory._meta.get_field("parent").remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
hidden = Inventory.objects.create(barcode=93, name="Hidden", hidden=True)
child_of_hidden = Inventory.objects.create(
barcode=94, name="Child of hidden", parent=hidden
)
self.assertHTMLEqual(
w.render("test", child_of_hidden.parent_id, attrs={}),
'<div><input type="text" name="test" value="93" '
' class="vForeignKeyRawIdAdminField">'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong><a href="/admin_widgets/inventory/%(pk)s/change/">'
"Hidden</a></strong></div>" % {"pk": hidden.pk},
)
def test_render_unsafe_limit_choices_to(self):
rel = UnsafeLimitChoicesTo._meta.get_field("band").remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render("test", None),
'<div><input type="text" name="test" class="vForeignKeyRawIdAdminField">'
'<a href="/admin_widgets/band/?name=%22%26%3E%3Cescapeme&'
'_to_field=artist_ptr" class="related-lookup" id="lookup_id_test" '
'title="Lookup"></a></div>',
)
def test_render_fk_as_pk_model(self):
rel = VideoStream._meta.get_field("release_event").remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render("test", None),
'<div><input type="text" name="test" class="vForeignKeyRawIdAdminField">'
'<a href="/admin_widgets/releaseevent/?_to_field=album" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a></div>',
)
@override_settings(ROOT_URLCONF="admin_widgets.urls")
| ForeignKeyRawIdWidgetTest |
python | django-debug-toolbar__django-debug-toolbar | debug_toolbar/panels/sql/tracking.py | {
"start": 4195,
"end": 10129
} | class ____(DjDTCursorWrapperMixin):
"""
Wraps a cursor and logs queries.
"""
def _decode(self, param):
if PostgresJson and isinstance(param, PostgresJson):
# psycopg3
if hasattr(param, "obj"):
return param.dumps(param.obj)
# psycopg2
if hasattr(param, "adapted"):
return param.dumps(param.adapted)
# If a sequence type, decode each element separately
if isinstance(param, (tuple, list)):
return [self._decode(element) for element in param]
# If a dictionary type, decode each value separately
if isinstance(param, dict):
return {key: self._decode(value) for key, value in param.items()}
# make sure datetime, date and time are converted to string by force_str
CONVERT_TYPES = (datetime.datetime, datetime.date, datetime.time)
return force_str(param, strings_only=not isinstance(param, CONVERT_TYPES))
def _last_executed_query(self, sql, params):
"""Get the last executed query from the connection."""
# Django's psycopg3 backend creates a new cursor in its implementation of the
# .last_executed_query() method. To avoid wrapping that cursor, temporarily set
# the DatabaseWrapper's ._djdt_logger attribute to None. This will cause the
# monkey-patched .cursor() and .chunked_cursor() methods to skip the wrapping
# process during the .last_executed_query() call.
self.db._djdt_logger = None
try:
return self.db.ops.last_executed_query(self.cursor, sql, params)
finally:
self.db._djdt_logger = self.logger
def _record(self, method, sql, params):
alias = self.db.alias
vendor = self.db.vendor
if vendor == "postgresql":
# The underlying DB connection (as opposed to Django's wrapper)
conn = self.db.connection
initial_conn_status = conn.info.transaction_status
start_time = perf_counter()
try:
return method(sql, params)
finally:
stop_time = perf_counter()
duration = (stop_time - start_time) * 1000
_params = ""
with contextlib.suppress(TypeError):
# object JSON serializable?
_params = json.dumps(self._decode(params))
template_info = get_template_info()
# Sql might be an object (such as psycopg Composed).
# For logging purposes, make sure it's str.
if vendor == "postgresql" and not isinstance(sql, str):
if isinstance(sql, bytes):
sql = sql.decode("utf-8")
else:
sql = sql.as_string(conn)
else:
sql = str(sql)
kwargs = {
"vendor": vendor,
"alias": alias,
"sql": self._last_executed_query(sql, params),
"duration": duration,
"raw_sql": sql,
"params": _params,
"stacktrace": get_stack_trace(skip=2),
"template_info": template_info,
}
if vendor == "postgresql":
# If an erroneous query was ran on the connection, it might
# be in a state where checking isolation_level raises an
# exception.
try:
iso_level = conn.isolation_level
except conn.InternalError:
iso_level = "unknown"
# PostgreSQL does not expose any sort of transaction ID, so it is
# necessary to generate synthetic transaction IDs here. If the
# connection was not in a transaction when the query started, and was
# after the query finished, a new transaction definitely started, so get
# a new transaction ID from logger.new_transaction_id(). If the query
# was in a transaction both before and after executing, make the
# assumption that it is the same transaction and get the current
# transaction ID from logger.current_transaction_id(). There is an edge
# case where Django can start a transaction before the first query
# executes, so in that case logger.current_transaction_id() will
# generate a new transaction ID since one does not already exist.
final_conn_status = conn.info.transaction_status
if final_conn_status == STATUS_IN_TRANSACTION:
if initial_conn_status == STATUS_IN_TRANSACTION:
trans_id = self.logger.current_transaction_id(alias)
else:
trans_id = self.logger.new_transaction_id(alias)
else:
trans_id = None
kwargs.update(
{
"trans_id": trans_id,
"trans_status": conn.info.transaction_status,
"iso_level": iso_level,
}
)
# Skip tracking for toolbar models by default.
# This can be overridden by setting SKIP_TOOLBAR_QUERIES = False
if not dt_settings.get_config()["SKIP_TOOLBAR_QUERIES"] or not any(
table in sql for table in DDT_MODELS
):
# We keep `sql` to maintain backwards compatibility
self.logger.record(**kwargs)
def callproc(self, procname, params=None):
return self._record(super().callproc, procname, params)
def execute(self, sql, params=None):
return self._record(super().execute, sql, params)
def executemany(self, sql, param_list):
return self._record(super().executemany, sql, param_list)
| NormalCursorMixin |
python | pytorch__pytorch | torch/_inductor/scheduler.py | {
"start": 51617,
"end": 51866
} | class ____(BaseSchedulerNode):
def __init__(self, scheduler: Scheduler, node: ir.Operation) -> None:
super().__init__(scheduler)
self._init_from_node(node)
self.set_read_writes(node.get_read_writes())
| NopKernelSchedulerNode |
python | django__django | tests/auth_tests/urls_custom_user_admin.py | {
"start": 209,
"end": 680
} | class ____(UserAdmin):
def log_change(self, request, obj, message):
# LogEntry.user column doesn't get altered to expect a UUID, so set an
# integer manually to avoid causing an error.
original_pk = request.user.pk
request.user.pk = 1
super().log_change(request, obj, message)
request.user.pk = original_pk
site.register(get_user_model(), CustomUserAdmin)
urlpatterns = [
path("admin/", site.urls),
]
| CustomUserAdmin |
python | mlflow__mlflow | mlflow/store/tracking/dbmodels/models.py | {
"start": 27421,
"end": 34746
} | class ____(Base):
__tablename__ = "assessments"
assessment_id = Column(String(50), nullable=False)
"""
Assessment ID: `String` (limit 50 characters). *Primary Key* for ``assessments`` table.
"""
trace_id = Column(
String(50), ForeignKey("trace_info.request_id", ondelete="CASCADE"), nullable=False
)
"""
Trace ID that a given assessment belongs to. *Foreign Key* into ``trace_info`` table.
"""
name = Column(String(250), nullable=False)
"""
Assessment Name: `String` (limit of 250 characters).
"""
assessment_type = Column(String(50), nullable=False)
"""
Assessment type: `String` (limit 50 characters). Either "feedback" or "expectation".
"""
value = Column(Text, nullable=False)
"""
The assessment's value data stored as JSON: `Text` for the actual value content.
"""
error = Column(Text, nullable=True)
"""
AssessmentError stored as JSON: `Text` for error information (feedback only).
"""
created_timestamp = Column(BigInteger, nullable=False)
"""
The assessment's creation timestamp: `BigInteger`.
"""
last_updated_timestamp = Column(BigInteger, nullable=False)
"""
The update time of an assessment if the assessment has been updated: `BigInteger`.
"""
source_type = Column(String(50), nullable=False)
"""
Assessment source type: `String` (limit 50 characters). e.g., "HUMAN", "CODE", "LLM_JUDGE".
"""
source_id = Column(String(250), nullable=True)
"""
Assessment source ID: `String` (limit 250 characters). e.g., "evaluator@company.com".
"""
run_id = Column(String(32), nullable=True)
"""
Run ID associated with the assessment if generated due to a run event:
`String` (limit of 32 characters).
"""
span_id = Column(String(50), nullable=True)
"""
Span ID if the assessment is applied to a Span within a Trace:
`String` (limit of 50 characters).
"""
rationale = Column(Text, nullable=True)
"""
Justification for the assessment: `Text` for longer explanations.
"""
overrides = Column(String(50), nullable=True)
"""
Overridden assessment_id if an assessment is intended to update and replace an existing
assessment: `String` (limit of 50 characters).
"""
valid = Column(Boolean, nullable=False, default=True)
"""
Indicator for whether an assessment has been marked as invalid: `Boolean`. Defaults to True.
"""
assessment_metadata = Column(Text, nullable=True)
"""
Assessment metadata stored as JSON: `Text` for complex metadata structures.
"""
trace_info = relationship("SqlTraceInfo", backref=backref("assessments", cascade="all"))
"""
SQLAlchemy relationship (many:one) with
:py:class:`mlflow.store.dbmodels.models.SqlTraceInfo`.
"""
__table_args__ = (
PrimaryKeyConstraint("assessment_id", name="assessments_pkey"),
Index(f"index_{__tablename__}_trace_id_created_timestamp", "trace_id", "created_timestamp"),
Index(f"index_{__tablename__}_run_id_created_timestamp", "run_id", "created_timestamp"),
Index(f"index_{__tablename__}_last_updated_timestamp", "last_updated_timestamp"),
Index(f"index_{__tablename__}_assessment_type", "assessment_type"),
)
def to_mlflow_entity(self) -> Assessment:
"""Convert SqlAssessments to Assessment object."""
value_str = self.value
error_str = self.error
assessment_metadata_str = self.assessment_metadata
assessment_type_value = self.assessment_type
parsed_value = json.loads(value_str)
parsed_error = None
if error_str is not None:
error_dict = json.loads(error_str)
parsed_error = AssessmentError.from_dictionary(error_dict)
parsed_metadata = None
if assessment_metadata_str is not None:
parsed_metadata = json.loads(assessment_metadata_str)
source = AssessmentSource(source_type=self.source_type, source_id=self.source_id)
if assessment_type_value == "feedback":
assessment = Feedback(
name=self.name,
value=parsed_value,
error=parsed_error,
source=source,
trace_id=self.trace_id,
rationale=self.rationale,
metadata=parsed_metadata,
span_id=self.span_id,
create_time_ms=self.created_timestamp,
last_update_time_ms=self.last_updated_timestamp,
overrides=self.overrides,
valid=self.valid,
)
elif assessment_type_value == "expectation":
assessment = Expectation(
name=self.name,
value=parsed_value,
source=source,
trace_id=self.trace_id,
metadata=parsed_metadata,
span_id=self.span_id,
create_time_ms=self.created_timestamp,
last_update_time_ms=self.last_updated_timestamp,
)
assessment.overrides = self.overrides
assessment.valid = self.valid
else:
raise ValueError(f"Unknown assessment type: {assessment_type_value}")
assessment.run_id = self.run_id
assessment.assessment_id = self.assessment_id
return assessment
@classmethod
def from_mlflow_entity(cls, assessment: Assessment):
if assessment.assessment_id is None:
assessment.assessment_id = generate_assessment_id()
current_timestamp = get_current_time_millis()
if assessment.feedback is not None:
assessment_type = "feedback"
value_json = json.dumps(assessment.feedback.value)
error_json = (
json.dumps(assessment.feedback.error.to_dictionary())
if assessment.feedback.error
else None
)
elif assessment.expectation is not None:
assessment_type = "expectation"
value_json = json.dumps(assessment.expectation.value)
error_json = None
else:
raise MlflowException.invalid_parameter_value(
"Assessment must have either feedback or expectation value"
)
metadata_json = json.dumps(assessment.metadata) if assessment.metadata else None
return SqlAssessments(
assessment_id=assessment.assessment_id,
trace_id=assessment.trace_id,
name=assessment.name,
assessment_type=assessment_type,
value=value_json,
error=error_json,
created_timestamp=assessment.create_time_ms or current_timestamp,
last_updated_timestamp=assessment.last_update_time_ms or current_timestamp,
source_type=assessment.source.source_type,
source_id=assessment.source.source_id,
run_id=assessment.run_id,
span_id=assessment.span_id,
rationale=assessment.rationale,
overrides=assessment.overrides,
valid=True,
assessment_metadata=metadata_json,
)
def __repr__(self):
return f"<SqlAssessments({self.assessment_id}, {self.name}, {self.assessment_type})>"
| SqlAssessments |
python | openai__openai-python | src/openai/types/realtime/response_output_item_done_event.py | {
"start": 252,
"end": 717
} | class ____(BaseModel):
event_id: str
"""The unique ID of the server event."""
item: ConversationItem
"""A single item within a Realtime conversation."""
output_index: int
"""The index of the output item in the Response."""
response_id: str
"""The ID of the Response to which the item belongs."""
type: Literal["response.output_item.done"]
"""The event type, must be `response.output_item.done`."""
| ResponseOutputItemDoneEvent |
python | getsentry__sentry | src/sentry/api/authentication.py | {
"start": 24050,
"end": 26688
} | class ____(StandardAuthentication):
"""
Authentication for cross-region RPC requests.
Requests are sent with an HMAC signed by a shared private key.
"""
token_name = b"rpcsignature"
def accepts_auth(self, auth: list[bytes]) -> bool:
if not auth or len(auth) < 2:
return False
return auth[0].lower() == self.token_name
def authenticate_token(self, request: Request, token: str) -> tuple[Any, Any]:
if not compare_signature(request.path_info, request.body, token):
raise AuthenticationFailed("Invalid signature")
sentry_sdk.get_isolation_scope().set_tag("rpc_auth", True)
return (AnonymousUser(), token)
def compare_service_signature(
url: str,
body: bytes,
signature: str,
shared_secret_setting: list[str],
service_name: str,
) -> bool:
"""
Generic function to compare request data + signature signed by one of the shared secrets.
Once a key has been able to validate the signature other keys will
not be attempted. We should only have multiple keys during key rotations.
Args:
url: The request URL path
body: The request body
signature: The signature to validate
shared_secret_setting: List of shared secrets from settings
service_name: Name of the service for logging (e.g., "Seer", "Launchpad")
"""
if not shared_secret_setting:
raise RpcAuthenticationSetupException(
f"Cannot validate {service_name} RPC request signatures without shared secret"
)
# Ensure no empty secrets
if any(not secret.strip() for secret in shared_secret_setting):
raise RpcAuthenticationSetupException(
f"Cannot validate {service_name} RPC request signatures with empty shared secret"
)
if not signature.startswith("rpc0:"):
logger.error("%s RPC signature validation failed: invalid signature prefix", service_name)
return False
try:
# We aren't using the version bits currently.
_, signature_data = signature.split(":", 2)
signature_input = body
for key in shared_secret_setting:
computed = hmac.new(key.encode(), signature_input, hashlib.sha256).hexdigest()
is_valid = constant_time_compare(computed.encode(), signature_data.encode())
if is_valid:
return True
except Exception:
logger.exception("%s RPC signature validation failed", service_name)
return False
logger.error("%s RPC signature validation failed", service_name)
return False
| RpcSignatureAuthentication |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 1037431,
"end": 1047304
} | class ____(DatumChannelMixin, core.ScaleDatumDef):
"""
XOffsetDatum schema wrapper.
Parameters
----------
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
datum : str, bool, dict, float, :class:`ExprRef`, :class:`DateTime`, :class:`RepeatRef`, :class:`PrimitiveValue`, None
A constant value in data domain.
scale : dict, :class:`Scale`, None
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`, Literal['quantitative', 'ordinal', 'temporal', 'nominal', 'geojson']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "xOffset"
@overload
def bandPosition(self, _: float, /) -> XOffsetDatum: ...
@overload
def scale(self, _: Scale | None, /) -> XOffsetDatum: ...
@overload
def scale(
self,
*,
align: Optional[float | Parameter | SchemaBase | Map] = Undefined,
base: Optional[float | Parameter | SchemaBase | Map] = Undefined,
bins: Optional[SchemaBase | Sequence[float] | Map] = Undefined,
clamp: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
constant: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domain: Optional[
Parameter
| SchemaBase
| Literal["unaggregated"]
| Sequence[
str | bool | float | Temporal | Parameter | SchemaBase | Map | None
]
| Map
] = Undefined,
domainMax: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainMid: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domainMin: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainRaw: Optional[Parameter | SchemaBase | Map] = Undefined,
exponent: Optional[float | Parameter | SchemaBase | Map] = Undefined,
interpolate: Optional[
Parameter | SchemaBase | Map | ScaleInterpolateEnum_T
] = Undefined,
nice: Optional[
bool | float | Parameter | SchemaBase | Map | TimeInterval_T
] = Undefined,
padding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingInner: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingOuter: Optional[float | Parameter | SchemaBase | Map] = Undefined,
range: Optional[
SchemaBase
| Sequence[str | float | Parameter | SchemaBase | Sequence[float] | Map]
| Map
| RangeEnum_T
] = Undefined,
rangeMax: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
rangeMin: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
reverse: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
round: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
scheme: Optional[Parameter | SchemaBase | Map | ColorScheme_T] = Undefined,
type: Optional[SchemaBase | ScaleType_T] = Undefined,
zero: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
) -> XOffsetDatum: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> XOffsetDatum: ...
@overload
def type(self, _: Type_T, /) -> XOffsetDatum: ...
def __init__(
self,
datum,
bandPosition: Optional[float] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
**kwds,
):
super().__init__(
datum=datum,
bandPosition=bandPosition,
scale=scale,
title=title,
type=type,
**kwds,
)
@with_property_setters
| XOffsetDatum |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_table31.py | {
"start": 315,
"end": 1358
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("table31.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format(
{"bg_color": "#FFFF00", "fg_color": "#FF0000", "pattern": 6}
)
data = [
["Foo", 1234, 2000, 4321],
["Bar", 1256, 4000, 4320],
["Baz", 2234, 3000, 4332],
["Bop", 1324, 1000, 4333],
]
worksheet.set_column("C:F", 10.288)
worksheet.add_table(
"C2:F6",
{
"data": data,
"columns": [
{},
{"format": format1},
],
},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pikepdf__pikepdf | src/pikepdf/objects.py | {
"start": 5140,
"end": 5883
} | class ____(Object, metaclass=_ObjectMeta):
"""Construct an operator for use in a content stream.
An Operator is one of a limited set of commands that can appear in PDF content
streams (roughly the mini-language that draws objects, lines and text on a
virtual PDF canvas). The commands :func:`parse_content_stream` and
:func:`unparse_content_stream` create and expect Operators respectively, along
with their operands.
pikepdf uses the special Operator "INLINE IMAGE" to denote an inline image
in a content stream.
"""
object_type = ObjectType.operator
def __new__(cls, name: str) -> Operator:
"""Construct an operator."""
return cast('Operator', _core._new_operator(name))
| Operator |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/multiple_models/tutorial001_py39.py | {
"start": 433,
"end": 1358
} | class ____(SQLModel):
id: int
name: str
secret_name: str
age: Optional[int] = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=HeroPublic)
def create_hero(hero: HeroCreate):
with Session(engine) as session:
db_hero = Hero.model_validate(hero)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.get("/heroes/", response_model=list[HeroPublic])
def read_heroes():
with Session(engine) as session:
heroes = session.exec(select(Hero)).all()
return heroes
| HeroPublic |
python | run-llama__llama_index | llama-index-core/llama_index/core/query_engine/citation_query_engine.py | {
"start": 3177,
"end": 12998
} | class ____(BaseQueryEngine):
"""
Citation query engine.
Args:
retriever (BaseRetriever): A retriever object.
response_synthesizer (Optional[BaseSynthesizer]):
A BaseSynthesizer object.
citation_chunk_size (int):
Size of citation chunks, default=512. Useful for controlling
granularity of sources.
citation_chunk_overlap (int): Overlap of citation nodes, default=20.
text_splitter (Optional[TextSplitter]):
A text splitter for creating citation source nodes. Default is
a SentenceSplitter.
callback_manager (Optional[CallbackManager]): A callback manager.
metadata_mode (MetadataMode): A MetadataMode object that controls how
metadata is included in the citation prompt.
"""
def __init__(
self,
retriever: BaseRetriever,
llm: Optional[LLM] = None,
response_synthesizer: Optional[BaseSynthesizer] = None,
citation_chunk_size: int = DEFAULT_CITATION_CHUNK_SIZE,
citation_chunk_overlap: int = DEFAULT_CITATION_CHUNK_OVERLAP,
text_splitter: Optional[TextSplitter] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
callback_manager: Optional[CallbackManager] = None,
metadata_mode: MetadataMode = MetadataMode.NONE,
) -> None:
self.text_splitter = text_splitter or SentenceSplitter(
chunk_size=citation_chunk_size, chunk_overlap=citation_chunk_overlap
)
self._retriever = retriever
callback_manager = callback_manager or Settings.callback_manager
llm = llm or Settings.llm
self._response_synthesizer = response_synthesizer or get_response_synthesizer(
llm=llm,
callback_manager=callback_manager,
text_qa_template=CITATION_QA_TEMPLATE,
refine_template=CITATION_REFINE_TEMPLATE,
response_mode=ResponseMode.COMPACT,
use_async=False,
streaming=False,
)
self._node_postprocessors = node_postprocessors or []
self._metadata_mode = metadata_mode
for node_postprocessor in self._node_postprocessors:
node_postprocessor.callback_manager = callback_manager
super().__init__(callback_manager=callback_manager)
@classmethod
def from_args(
cls,
index: BaseGPTIndex,
llm: Optional[LLM] = None,
response_synthesizer: Optional[BaseSynthesizer] = None,
citation_chunk_size: int = DEFAULT_CITATION_CHUNK_SIZE,
citation_chunk_overlap: int = DEFAULT_CITATION_CHUNK_OVERLAP,
text_splitter: Optional[TextSplitter] = None,
citation_qa_template: BasePromptTemplate = CITATION_QA_TEMPLATE,
citation_refine_template: BasePromptTemplate = CITATION_REFINE_TEMPLATE,
retriever: Optional[BaseRetriever] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
# response synthesizer args
response_mode: ResponseMode = ResponseMode.COMPACT,
use_async: bool = False,
streaming: bool = False,
# class-specific args
metadata_mode: MetadataMode = MetadataMode.NONE,
**kwargs: Any,
) -> "CitationQueryEngine":
"""
Initialize a CitationQueryEngine object.".
Args:
index: (BastGPTIndex): index to use for querying
llm: (Optional[LLM]): LLM object to use for response generation.
citation_chunk_size (int):
Size of citation chunks, default=512. Useful for controlling
granularity of sources.
citation_chunk_overlap (int): Overlap of citation nodes, default=20.
text_splitter (Optional[TextSplitter]):
A text splitter for creating citation source nodes. Default is
a SentenceSplitter.
citation_qa_template (BasePromptTemplate): Template for initial citation QA
citation_refine_template (BasePromptTemplate):
Template for citation refinement.
retriever (BaseRetriever): A retriever object.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): A list of
node postprocessors.
verbose (bool): Whether to print out debug info.
response_mode (ResponseMode): A ResponseMode object.
use_async (bool): Whether to use async.
streaming (bool): Whether to use streaming.
optimizer (Optional[BaseTokenUsageOptimizer]): A BaseTokenUsageOptimizer
object.
"""
retriever = retriever or index.as_retriever(**kwargs)
response_synthesizer = response_synthesizer or get_response_synthesizer(
llm=llm,
text_qa_template=citation_qa_template,
refine_template=citation_refine_template,
response_mode=response_mode,
use_async=use_async,
streaming=streaming,
)
return cls(
retriever=retriever,
llm=llm,
response_synthesizer=response_synthesizer,
callback_manager=Settings.callback_manager,
citation_chunk_size=citation_chunk_size,
citation_chunk_overlap=citation_chunk_overlap,
text_splitter=text_splitter,
node_postprocessors=node_postprocessors,
metadata_mode=metadata_mode,
)
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {"response_synthesizer": self._response_synthesizer}
def _create_citation_nodes(self, nodes: List[NodeWithScore]) -> List[NodeWithScore]:
"""Modify retrieved nodes to be granular sources."""
new_nodes: List[NodeWithScore] = []
for node in nodes:
text_chunks = self.text_splitter.split_text(
node.node.get_content(metadata_mode=self._metadata_mode)
)
for text_chunk in text_chunks:
text = f"Source {len(new_nodes) + 1}:\n{text_chunk}\n"
new_node = NodeWithScore(
node=TextNode.model_validate(node.node.model_dump()),
score=node.score,
)
new_node.node.set_content(text)
new_nodes.append(new_node)
return new_nodes
def retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = self._retriever.retrieve(query_bundle)
for postprocessor in self._node_postprocessors:
nodes = postprocessor.postprocess_nodes(nodes, query_bundle=query_bundle)
return nodes
async def aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = await self._retriever.aretrieve(query_bundle)
for postprocessor in self._node_postprocessors:
nodes = postprocessor.postprocess_nodes(nodes, query_bundle=query_bundle)
return nodes
@property
def retriever(self) -> BaseRetriever:
"""Get the retriever object."""
return self._retriever
def synthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
nodes = self._create_citation_nodes(nodes)
return self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
additional_source_nodes=additional_source_nodes,
)
async def asynthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
nodes = self._create_citation_nodes(nodes)
return await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
additional_source_nodes=additional_source_nodes,
)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as retrieve_event:
nodes = self.retrieve(query_bundle)
nodes = self._create_citation_nodes(nodes)
retrieve_event.on_end(payload={EventPayload.NODES: nodes})
response = self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as retrieve_event:
nodes = await self.aretrieve(query_bundle)
nodes = self._create_citation_nodes(nodes)
retrieve_event.on_end(payload={EventPayload.NODES: nodes})
response = await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
| CitationQueryEngine |
python | getsentry__sentry | src/sentry/integrations/msteams/card_builder/block.py | {
"start": 2535,
"end": 2661
} | class ____(_ColumnBlockNotRequired):
type: Literal["Column"]
items: list[Block]
width: ColumnWidth | str
| ColumnBlock |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_emr_serverless_job.py | {
"start": 2448,
"end": 3150
} | class ____(TestEmrServerlessJobSensor):
@pytest.mark.parametrize("state", ["FAILED", "CANCELLING", "CANCELLED"])
def test_poke_raises_airflow_exception_with_specified_states(self, state):
state_details = f"mock {state}"
exception_msg = f"EMR Serverless job failed: {state_details}"
get_job_run_return_value = {"jobRun": {"state": state, "stateDetails": state_details}}
self.set_get_job_run_return_value(get_job_run_return_value)
with pytest.raises(AirflowException) as ctx:
self.sensor.poke(None)
assert exception_msg == str(ctx.value)
self.assert_get_job_run_was_called_once_with_app_and_run_id()
| TestPokeRaisesAirflowException |
python | plotly__plotly.py | plotly/io/_base_renderers.py | {
"start": 12232,
"end": 13031
} | class ____(HtmlRenderer):
"""
Renderer to display interactive figures in Azure Notebooks.
Same as NotebookRenderer but with connected=True so that the plotly.js
bundle is loaded from a CDN rather than being embedded in the notebook.
This renderer is enabled by default when running in an Azure notebook.
mime type: 'text/html'
"""
def __init__(
self, config=None, auto_play=False, post_script=None, animation_opts=None
):
super(AzureRenderer, self).__init__(
connected=True,
full_html=False,
global_init=True,
config=config,
auto_play=auto_play,
post_script=post_script,
animation_opts=animation_opts,
include_plotlyjs=False,
)
| AzureRenderer |
python | python__mypy | mypyc/analysis/attrdefined.py | {
"start": 10724,
"end": 12417
} | class ____(BaseAnalysisVisitor[str]):
"""Find attributes that may have been defined via some code path.
Consider initializations in class body and assignments to 'self.x'
and calls to base class '__init__'.
"""
def __init__(self, self_reg: Register) -> None:
self.self_reg = self_reg
def visit_branch(self, op: Branch) -> tuple[set[str], set[str]]:
return set(), set()
def visit_return(self, op: Return) -> tuple[set[str], set[str]]:
return set(), set()
def visit_unreachable(self, op: Unreachable) -> tuple[set[str], set[str]]:
return set(), set()
def visit_register_op(self, op: RegisterOp) -> tuple[set[str], set[str]]:
if isinstance(op, SetAttr) and op.obj is self.self_reg:
return {op.attr}, set()
if isinstance(op, Call) and op.fn.class_name and op.fn.name == "__init__":
return attributes_maybe_initialized_by_init_call(op), set()
return set(), set()
def visit_assign(self, op: Assign) -> tuple[set[str], set[str]]:
return set(), set()
def visit_assign_multi(self, op: AssignMulti) -> tuple[set[str], set[str]]:
return set(), set()
def visit_set_mem(self, op: SetMem) -> tuple[set[str], set[str]]:
return set(), set()
def analyze_maybe_defined_attrs_in_init(
blocks: list[BasicBlock], self_reg: Register, attrs_with_defaults: set[str], cfg: CFG
) -> AnalysisResult[str]:
return run_analysis(
blocks=blocks,
cfg=cfg,
gen_and_kill=AttributeMaybeDefinedVisitor(self_reg),
initial=attrs_with_defaults,
backward=False,
kind=MAYBE_ANALYSIS,
)
| AttributeMaybeDefinedVisitor |
python | realpython__materials | python-guitar-synthesizer/source_code_step_1/src/digitar/temporal.py | {
"start": 217,
"end": 1032
} | class ____:
seconds: Decimal
@classmethod
def from_milliseconds(cls, milliseconds: Numeric) -> Self:
return cls(Decimal(str(float(milliseconds))) / 1000)
def __init__(self, seconds: Numeric) -> None:
match seconds:
case int() | float():
object.__setattr__(self, "seconds", Decimal(str(seconds)))
case Decimal():
object.__setattr__(self, "seconds", seconds)
case Fraction():
object.__setattr__(
self, "seconds", Decimal(str(float(seconds)))
)
case _:
raise TypeError(f"unsupported type '{type(seconds).__name__}'")
def get_num_samples(self, sampling_rate: Hertz) -> int:
return round(self.seconds * round(sampling_rate))
| Time |
python | getsentry__sentry | tests/sentry/apidocs/test_extensions.py | {
"start": 931,
"end": 1130
} | class ____(Serializer):
def serialize(
self, obj: Any, attrs: Mapping[Any, Any], user: Any, **kwargs: Any
) -> BasicSerializerResponse:
raise NotImplementedError
| BasicSerializer |
python | numba__numba | numba/tests/npyufunc/test_gufunc.py | {
"start": 14786,
"end": 18511
} | class ____(MemoryLeakMixin, TestCase):
def test_pickle_gufunc_non_dyanmic(self):
"""Non-dynamic gufunc.
"""
@guvectorize(["f8,f8[:]"], "()->()")
def double(x, out):
out[:] = x * 2
# pickle
ser = pickle.dumps(double)
cloned = pickle.loads(ser)
# attributes carried over
self.assertEqual(cloned._frozen, double._frozen)
self.assertEqual(cloned.identity, double.identity)
self.assertEqual(cloned.is_dynamic, double.is_dynamic)
self.assertEqual(cloned.gufunc_builder._sigs,
double.gufunc_builder._sigs)
# expected value of attributes
self.assertTrue(cloned._frozen)
cloned.disable_compile()
self.assertTrue(cloned._frozen)
# scalar version
self.assertPreciseEqual(double(0.5), cloned(0.5))
# array version
arr = np.arange(10)
self.assertPreciseEqual(double(arr), cloned(arr))
def test_pickle_gufunc_dyanmic_null_init(self):
"""Dynamic gufunc w/o prepopulating before pickling.
"""
@guvectorize("()->()", identity=1)
def double(x, out):
out[:] = x * 2
# pickle
ser = pickle.dumps(double)
cloned = pickle.loads(ser)
# attributes carried over
self.assertEqual(cloned._frozen, double._frozen)
self.assertEqual(cloned.identity, double.identity)
self.assertEqual(cloned.is_dynamic, double.is_dynamic)
self.assertEqual(cloned.gufunc_builder._sigs,
double.gufunc_builder._sigs)
# expected value of attributes
self.assertFalse(cloned._frozen)
# scalar version
expect = np.zeros(1)
got = np.zeros(1)
double(0.5, out=expect)
cloned(0.5, out=got)
self.assertPreciseEqual(expect, got)
# array version
arr = np.arange(10)
expect = np.zeros_like(arr)
got = np.zeros_like(arr)
double(arr, out=expect)
cloned(arr, out=got)
self.assertPreciseEqual(expect, got)
def test_pickle_gufunc_dynamic_initialized(self):
"""Dynamic gufunc prepopulated before pickling.
Once unpickled, we disable compilation to verify that the gufunc
compilation state is carried over.
"""
@guvectorize("()->()", identity=1)
def double(x, out):
out[:] = x * 2
# prepopulate scalar
expect = np.zeros(1)
got = np.zeros(1)
double(0.5, out=expect)
# prepopulate array
arr = np.arange(10)
expect = np.zeros_like(arr)
got = np.zeros_like(arr)
double(arr, out=expect)
# pickle
ser = pickle.dumps(double)
cloned = pickle.loads(ser)
# attributes carried over
self.assertEqual(cloned._frozen, double._frozen)
self.assertEqual(cloned.identity, double.identity)
self.assertEqual(cloned.is_dynamic, double.is_dynamic)
self.assertEqual(cloned.gufunc_builder._sigs,
double.gufunc_builder._sigs)
# expected value of attributes
self.assertFalse(cloned._frozen)
# disable compilation
cloned.disable_compile()
self.assertTrue(cloned._frozen)
# scalar version
expect = np.zeros(1)
got = np.zeros(1)
double(0.5, out=expect)
cloned(0.5, out=got)
self.assertPreciseEqual(expect, got)
# array version
expect = np.zeros_like(arr)
got = np.zeros_like(arr)
double(arr, out=expect)
cloned(arr, out=got)
self.assertPreciseEqual(expect, got)
| TestGUVectorizePickling |
python | pyodide__pyodide | src/py/_pyodide/_core_docs.py | {
"start": 14889,
"end": 15300
} | class ____(JsProxy, Generic[T]):
"""A double proxy created with :py:func:`create_proxy`."""
_js_type_flags = ["IS_DOUBLE_PROXY"]
def destroy(self) -> None:
"""Destroy the proxy."""
pass
def unwrap(self) -> T:
"""Unwrap a double proxy created with :py:func:`create_proxy` into the
wrapped Python object.
"""
raise NotImplementedError
| JsDoubleProxy |
python | mlflow__mlflow | tests/pytorch/iris_data_module.py | {
"start": 154,
"end": 1040
} | class ____(pl.LightningDataModule):
def __init__(self):
super().__init__()
self.columns = None
def _get_iris_as_tensor_dataset(self):
iris = load_iris()
df = iris.data
self.columns = iris.feature_names
target = iris["target"]
data = torch.Tensor(df).float()
labels = torch.Tensor(target).long()
return TensorDataset(data, labels)
def setup(self, stage=None):
# Assign train/val datasets for use in dataloaders
if stage == "fit" or stage is None:
iris_full = self._get_iris_as_tensor_dataset()
self.train_set, self.val_set = random_split(iris_full, [130, 20])
# Assign test dataset for use in dataloader(s)
if stage == "test" or stage is None:
self.train_set, self.test_set = random_split(self.train_set, [110, 20])
| IrisDataModuleBase |
python | mkdocs__mkdocs | mkdocs/tests/structure/file_tests.py | {
"start": 223,
"end": 34458
} | class ____(PathAssertionMixin, unittest.TestCase):
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_src_path_windows(self):
f = File('foo\\a.md', '/path/to/docs', '/path/to/site', use_directory_urls=False)
self.assertEqual(f.src_uri, 'foo/a.md')
self.assertEqual(f.src_path, 'foo\\a.md')
f.src_uri = 'foo/b.md'
self.assertEqual(f.src_uri, 'foo/b.md')
self.assertEqual(f.src_path, 'foo\\b.md')
f.src_path = 'foo/c.md'
self.assertEqual(f.src_uri, 'foo/c.md')
self.assertEqual(f.src_path, 'foo\\c.md')
f.src_path = 'foo\\d.md'
self.assertEqual(f.src_uri, 'foo/d.md')
self.assertEqual(f.src_path, 'foo\\d.md')
f.src_uri = 'foo\\e.md'
self.assertEqual(f.src_uri, 'foo\\e.md')
self.assertEqual(f.src_path, 'foo\\e.md')
def test_sort_files(self):
self.assertEqual(
_sort_files(['b.md', 'bb.md', 'a.md', 'index.md', 'aa.md']),
['index.md', 'a.md', 'aa.md', 'b.md', 'bb.md'],
)
self.assertEqual(
_sort_files(['b.md', 'index.html', 'a.md', 'index.md']),
['index.html', 'index.md', 'a.md', 'b.md'],
)
self.assertEqual(
_sort_files(['a.md', 'index.md', 'b.md', 'index.html']),
['index.html', 'index.md', 'a.md', 'b.md'],
)
self.assertEqual(
_sort_files(['.md', '_.md', 'a.md', 'index.md', '1.md']),
['index.md', '.md', '1.md', '_.md', 'a.md'],
)
self.assertEqual(
_sort_files(['a.md', 'b.md', 'a.md']),
['a.md', 'a.md', 'b.md'],
)
self.assertEqual(
_sort_files(['A.md', 'B.md', 'README.md']),
['README.md', 'A.md', 'B.md'],
)
def test_file_sort_key(self):
for case in [
["a/b.md", "b/index.md", "b/a.md"],
["SUMMARY.md", "foo/z.md", "foo/bar/README.md", "foo/bar/index.md", "foo/bar/a.md"],
]:
with self.subTest(case):
files = [File(f, "", "", use_directory_urls=True) for f in case]
for a, b in zip(files, files[1:]):
self.assertLess(file_sort_key(a), file_sort_key(b))
def test_md_file(self):
for use_directory_urls in True, False:
with self.subTest(use_directory_urls=use_directory_urls):
f = File('foo.md', '/path/to/docs', '/path/to/site', use_directory_urls)
self.assertEqual(f.src_uri, 'foo.md')
self.assertPathsEqual(f.abs_src_path, '/path/to/docs/foo.md')
if use_directory_urls:
self.assertEqual(f.dest_uri, 'foo/index.html')
self.assertPathsEqual(f.abs_dest_path, '/path/to/site/foo/index.html')
self.assertEqual(f.url, 'foo/')
else:
self.assertEqual(f.dest_uri, 'foo.html')
self.assertPathsEqual(f.abs_dest_path, '/path/to/site/foo.html')
self.assertEqual(f.url, 'foo.html')
self.assertEqual(f.name, 'foo')
self.assertTrue(f.is_documentation_page())
self.assertFalse(f.is_static_page())
self.assertFalse(f.is_media_file())
self.assertFalse(f.is_javascript())
self.assertFalse(f.is_css())
def test_md_file_nested(self):
for use_directory_urls in True, False:
with self.subTest(use_directory_urls=use_directory_urls):
f = File('foo/bar.md', '/path/to/docs', '/path/to/site', use_directory_urls)
self.assertEqual(f.src_uri, 'foo/bar.md')
self.assertPathsEqual(f.abs_src_path, '/path/to/docs/foo/bar.md')
if use_directory_urls:
self.assertEqual(f.dest_uri, 'foo/bar/index.html')
self.assertPathsEqual(f.abs_dest_path, '/path/to/site/foo/bar/index.html')
self.assertEqual(f.url, 'foo/bar/')
else:
self.assertEqual(f.dest_uri, 'foo/bar.html')
self.assertPathsEqual(f.abs_dest_path, '/path/to/site/foo/bar.html')
self.assertEqual(f.url, 'foo/bar.html')
self.assertEqual(f.name, 'bar')
self.assertTrue(f.is_documentation_page())
self.assertFalse(f.is_static_page())
self.assertFalse(f.is_media_file())
self.assertFalse(f.is_javascript())
self.assertFalse(f.is_css())
def test_md_index_file(self):
for use_directory_urls in True, False:
with self.subTest(use_directory_urls=use_directory_urls):
f = File('index.md', '/path/to/docs', '/path/to/site', use_directory_urls)
self.assertEqual(f.src_uri, 'index.md')
self.assertPathsEqual(f.abs_src_path, '/path/to/docs/index.md')
self.assertEqual(f.dest_uri, 'index.html')
self.assertPathsEqual(f.abs_dest_path, '/path/to/site/index.html')
if use_directory_urls:
self.assertEqual(f.url, './')
else:
self.assertEqual(f.url, 'index.html')
self.assertEqual(f.name, 'index')
self.assertTrue(f.is_documentation_page())
self.assertFalse(f.is_static_page())
self.assertFalse(f.is_media_file())
self.assertFalse(f.is_javascript())
self.assertFalse(f.is_css())
def test_md_readme_index_file(self):
for use_directory_urls in True, False:
with self.subTest(use_directory_urls=use_directory_urls):
f = File('README.md', '/path/to/docs', '/path/to/site', use_directory_urls)
self.assertEqual(f.src_uri, 'README.md')
self.assertPathsEqual(f.abs_src_path, '/path/to/docs/README.md')
self.assertEqual(f.dest_uri, 'index.html')
self.assertPathsEqual(f.abs_dest_path, '/path/to/site/index.html')
if use_directory_urls:
self.assertEqual(f.url, './')
else:
self.assertEqual(f.url, 'index.html')
self.assertEqual(f.name, 'index')
self.assertTrue(f.is_documentation_page())
self.assertFalse(f.is_static_page())
self.assertFalse(f.is_media_file())
self.assertFalse(f.is_javascript())
self.assertFalse(f.is_css())
def test_md_index_file_nested(self):
for use_directory_urls in True, False:
with self.subTest(use_directory_urls=use_directory_urls):
f = File('foo/index.md', '/path/to/docs', '/path/to/site', use_directory_urls)
self.assertEqual(f.src_uri, 'foo/index.md')
self.assertPathsEqual(f.abs_src_path, '/path/to/docs/foo/index.md')
self.assertEqual(f.dest_uri, 'foo/index.html')
self.assertPathsEqual(f.abs_dest_path, '/path/to/site/foo/index.html')
if use_directory_urls:
self.assertEqual(f.url, 'foo/')
else:
self.assertEqual(f.url, 'foo/index.html')
self.assertEqual(f.name, 'index')
self.assertTrue(f.is_documentation_page())
self.assertFalse(f.is_static_page())
self.assertFalse(f.is_media_file())
self.assertFalse(f.is_javascript())
self.assertFalse(f.is_css())
def test_static_file(self):
for use_directory_urls in True, False:
with self.subTest(use_directory_urls=use_directory_urls):
f = File('foo/bar.html', '/path/to/docs', '/path/to/site', use_directory_urls)
self.assertEqual(f.src_uri, 'foo/bar.html')
self.assertPathsEqual(f.abs_src_path, '/path/to/docs/foo/bar.html')
self.assertEqual(f.dest_uri, 'foo/bar.html')
self.assertPathsEqual(f.abs_dest_path, '/path/to/site/foo/bar.html')
self.assertEqual(f.url, 'foo/bar.html')
self.assertEqual(f.name, 'bar')
self.assertFalse(f.is_documentation_page())
self.assertTrue(f.is_static_page())
self.assertFalse(f.is_media_file())
self.assertFalse(f.is_javascript())
self.assertFalse(f.is_css())
def test_media_file(self):
for use_directory_urls in True, False:
with self.subTest(use_directory_urls=use_directory_urls):
f = File('foo/bar.jpg', '/path/to/docs', '/path/to/site', use_directory_urls)
self.assertEqual(f.src_uri, 'foo/bar.jpg')
self.assertPathsEqual(f.abs_src_path, '/path/to/docs/foo/bar.jpg')
self.assertEqual(f.dest_uri, 'foo/bar.jpg')
self.assertPathsEqual(f.abs_dest_path, '/path/to/site/foo/bar.jpg')
self.assertEqual(f.url, 'foo/bar.jpg')
self.assertEqual(f.name, 'bar')
self.assertFalse(f.is_documentation_page())
self.assertFalse(f.is_static_page())
self.assertTrue(f.is_media_file())
self.assertFalse(f.is_javascript())
self.assertFalse(f.is_css())
def test_javascript_file(self):
for use_directory_urls in True, False:
with self.subTest(use_directory_urls=use_directory_urls):
f = File('foo/bar.js', '/path/to/docs', '/path/to/site', use_directory_urls)
self.assertEqual(f.src_uri, 'foo/bar.js')
self.assertPathsEqual(f.abs_src_path, '/path/to/docs/foo/bar.js')
self.assertEqual(f.dest_uri, 'foo/bar.js')
self.assertPathsEqual(f.abs_dest_path, '/path/to/site/foo/bar.js')
self.assertEqual(f.url, 'foo/bar.js')
self.assertEqual(f.name, 'bar')
self.assertFalse(f.is_documentation_page())
self.assertFalse(f.is_static_page())
self.assertTrue(f.is_media_file())
self.assertTrue(f.is_javascript())
self.assertFalse(f.is_css())
def test_css_file(self):
for use_directory_urls in True, False:
with self.subTest(use_directory_urls=use_directory_urls):
f = File('foo/bar.css', '/path/to/docs', '/path/to/site', use_directory_urls)
self.assertEqual(f.src_uri, 'foo/bar.css')
self.assertPathsEqual(f.abs_src_path, '/path/to/docs/foo/bar.css')
self.assertEqual(f.dest_uri, 'foo/bar.css')
self.assertPathsEqual(f.abs_dest_path, '/path/to/site/foo/bar.css')
self.assertEqual(f.url, 'foo/bar.css')
self.assertEqual(f.name, 'bar')
self.assertFalse(f.is_documentation_page())
self.assertFalse(f.is_static_page())
self.assertTrue(f.is_media_file())
self.assertFalse(f.is_javascript())
self.assertTrue(f.is_css())
def test_file_name_with_space(self):
f = File('foo bar.md', '/path/to/docs', '/path/to/site', use_directory_urls=False)
self.assertEqual(f.src_uri, 'foo bar.md')
self.assertPathsEqual(f.abs_src_path, '/path/to/docs/foo bar.md')
self.assertEqual(f.dest_uri, 'foo bar.html')
self.assertPathsEqual(f.abs_dest_path, '/path/to/site/foo bar.html')
self.assertEqual(f.url, 'foo%20bar.html')
self.assertEqual(f.name, 'foo bar')
def test_file_name_with_custom_dest_uri(self):
for use_directory_urls in True, False:
with self.subTest(use_directory_urls=use_directory_urls):
f = File(
'stuff/foo.md',
src_dir='/path/to/docs',
dest_dir='/path/to/site',
use_directory_urls=use_directory_urls,
dest_uri='stuff/1-foo/index.html',
)
self.assertEqual(f.src_uri, 'stuff/foo.md')
self.assertPathsEqual(f.abs_src_path, '/path/to/docs/stuff/foo.md')
self.assertEqual(f.dest_uri, 'stuff/1-foo/index.html')
self.assertPathsEqual(f.abs_dest_path, '/path/to/site/stuff/1-foo/index.html')
if use_directory_urls:
self.assertEqual(f.url, 'stuff/1-foo/')
else:
self.assertEqual(f.url, 'stuff/1-foo/index.html')
self.assertEqual(f.name, 'foo')
def test_file_overwrite_attrs(self):
f = File('foo.md', '/path/to/docs', '/path/to/site', use_directory_urls=False)
self.assertEqual(f.src_uri, 'foo.md')
self.assertPathsEqual(f.abs_src_path, '/path/to/docs/foo.md')
f.abs_src_path = '/tmp/foo.md'
self.assertPathsEqual(f.abs_src_path, '/tmp/foo.md')
del f.abs_src_path
self.assertPathsEqual(f.abs_src_path, '/path/to/docs/foo.md')
f.dest_uri = 'a.html'
self.assertPathsEqual(f.abs_dest_path, '/path/to/site/a.html')
self.assertEqual(f.url, 'a.html')
f.abs_dest_path = '/path/to/site/b.html'
self.assertPathsEqual(f.abs_dest_path, '/path/to/site/b.html')
self.assertEqual(f.url, 'a.html')
del f.url
del f.dest_uri
del f.abs_dest_path
self.assertPathsEqual(f.abs_dest_path, '/path/to/site/foo.html')
self.assertTrue(f.is_documentation_page())
f.src_uri = 'foo.html'
del f.name
self.assertFalse(f.is_documentation_page())
def test_generated_file(self):
f = File(
'foo/bar.md',
src_dir=None,
dest_dir='/path/to/site',
use_directory_urls=False,
)
f.content_string = 'вміст'
f.generated_by = 'some-plugin'
self.assertEqual(f.generated_by, 'some-plugin')
self.assertEqual(f.src_uri, 'foo/bar.md')
self.assertIsNone(f.abs_src_path)
self.assertIsNone(f.src_dir)
self.assertEqual(f.dest_uri, 'foo/bar.html')
self.assertPathsEqual(f.abs_dest_path, '/path/to/site/foo/bar.html')
self.assertEqual(f.content_string, 'вміст')
self.assertEqual(f.edit_uri, None)
@tempdir(files={'x.md': 'вміст'})
def test_generated_file_constructor(self, tdir) -> None:
config = load_config(site_dir='/path/to/site', use_directory_urls=False)
config.plugins._current_plugin = 'foo'
for f in [
File.generated(config, 'foo/bar.md', content='вміст'),
File.generated(config, 'foo/bar.md', content='вміст'.encode()),
File.generated(config, 'foo/bar.md', abs_src_path=os.path.join(tdir, 'x.md')),
]:
self.assertEqual(f.src_uri, 'foo/bar.md')
self.assertIsNone(f.src_dir)
self.assertEqual(f.dest_uri, 'foo/bar.html')
self.assertPathsEqual(f.abs_dest_path, os.path.abspath('/path/to/site/foo/bar.html'))
self.assertEqual(f.content_string, 'вміст')
self.assertEqual(f.content_bytes, 'вміст'.encode())
self.assertEqual(f.edit_uri, None)
def test_files(self):
fs = [
File('index.md', '/path/to/docs', '/path/to/site', use_directory_urls=True),
File('foo/bar.md', '/path/to/docs', '/path/to/site', use_directory_urls=True),
File('foo/bar.html', '/path/to/docs', '/path/to/site', use_directory_urls=True),
File('foo/bar.jpg', '/path/to/docs', '/path/to/site', use_directory_urls=True),
File('foo/bar.js', '/path/to/docs', '/path/to/site', use_directory_urls=True),
File('foo/bar.css', '/path/to/docs', '/path/to/site', use_directory_urls=True),
]
files = Files(fs)
self.assertEqual(list(files), fs)
self.assertEqual(len(files), 6)
self.assertEqual(files.documentation_pages(), [fs[0], fs[1]])
self.assertEqual(files.static_pages(), [fs[2]])
self.assertEqual(files.media_files(), [fs[3], fs[4], fs[5]])
self.assertEqual(files.javascript_files(), [fs[4]])
self.assertEqual(files.css_files(), [fs[5]])
self.assertEqual(files.get_file_from_path('foo/bar.jpg'), fs[3])
self.assertEqual(files.get_file_from_path('foo/bar.jpg'), fs[3])
self.assertEqual(files.get_file_from_path('missing.jpg'), None)
self.assertTrue(fs[2].src_uri in files.src_uris)
extra_file = File('extra.md', '/path/to/docs', '/path/to/site', use_directory_urls=True)
self.assertFalse(extra_file.src_uri in files.src_uris)
files.append(extra_file)
self.assertEqual(len(files), 7)
self.assertTrue(extra_file.src_uri in files.src_uris)
self.assertEqual(files.documentation_pages(), [fs[0], fs[1], extra_file])
files.remove(fs[1])
self.assertEqual(files.documentation_pages(), [fs[0], extra_file])
@tempdir(
files=[
'favicon.ico',
'index.md',
]
)
@tempdir(
files=[
'base.html',
'favicon.ico',
'style.css',
'foo.md',
'README',
'.ignore.txt',
'.ignore/file.txt',
'foo/.ignore.txt',
'foo/.ignore/file.txt',
]
)
def test_add_files_from_theme(self, tdir, ddir):
config = load_config(docs_dir=ddir, theme={'name': None, 'custom_dir': tdir})
env = config.theme.get_env()
files = get_files(config)
self.assertEqual(
[file.src_uri for file in files],
['index.md', 'favicon.ico'],
)
files.add_files_from_theme(env, config)
self.assertEqual(
[file.src_uri for file in files],
['index.md', 'favicon.ico', 'style.css'],
)
# Ensure theme file does not override docs_dir file
self.assertEqual(
files.get_file_from_path('favicon.ico').abs_src_path,
os.path.normpath(os.path.join(ddir, 'favicon.ico')),
)
def test_get_relative_url_use_directory_urls(self):
to_files = [
'index.md',
'foo/index.md',
'foo/bar/index.md',
'foo/bar/baz/index.md',
'foo.md',
'foo/bar.md',
'foo/bar/baz.md',
]
to_file_urls = [
'./',
'foo/',
'foo/bar/',
'foo/bar/baz/',
'foo/',
'foo/bar/',
'foo/bar/baz/',
]
from_file = File('img.jpg', '/path/to/docs', '/path/to/site', use_directory_urls=True)
self.assertEqual(from_file.url, 'img.jpg')
expected = [
'img.jpg', # img.jpg relative to .
'../img.jpg', # img.jpg relative to foo/
'../../img.jpg', # img.jpg relative to foo/bar/
'../../../img.jpg', # img.jpg relative to foo/bar/baz/
'../img.jpg', # img.jpg relative to foo
'../../img.jpg', # img.jpg relative to foo/bar
'../../../img.jpg', # img.jpg relative to foo/bar/baz
]
for i, filename in enumerate(to_files):
file = File(filename, '/path/to/docs', '/path/to/site', use_directory_urls=True)
self.assertEqual(file.url, to_file_urls[i])
self.assertEqual(from_file.url_relative_to(file.url), expected[i])
self.assertEqual(from_file.url_relative_to(file), expected[i])
from_file = File('foo/img.jpg', '/path/to/docs', '/path/to/site', use_directory_urls=True)
self.assertEqual(from_file.url, 'foo/img.jpg')
expected = [
'foo/img.jpg', # foo/img.jpg relative to .
'img.jpg', # foo/img.jpg relative to foo/
'../img.jpg', # foo/img.jpg relative to foo/bar/
'../../img.jpg', # foo/img.jpg relative to foo/bar/baz/
'img.jpg', # foo/img.jpg relative to foo
'../img.jpg', # foo/img.jpg relative to foo/bar
'../../img.jpg', # foo/img.jpg relative to foo/bar/baz
]
for i, filename in enumerate(to_files):
file = File(filename, '/path/to/docs', '/path/to/site', use_directory_urls=True)
self.assertEqual(file.url, to_file_urls[i])
self.assertEqual(from_file.url_relative_to(file.url), expected[i])
self.assertEqual(from_file.url_relative_to(file), expected[i])
from_file = File('index.html', '/path/to/docs', '/path/to/site', use_directory_urls=True)
self.assertEqual(from_file.url, './')
expected = [
'./', # . relative to .
'../', # . relative to foo/
'../../', # . relative to foo/bar/
'../../../', # . relative to foo/bar/baz/
'../', # . relative to foo
'../../', # . relative to foo/bar
'../../../', # . relative to foo/bar/baz
]
for i, filename in enumerate(to_files):
file = File(filename, '/path/to/docs', '/path/to/site', use_directory_urls=True)
self.assertEqual(file.url, to_file_urls[i])
self.assertEqual(from_file.url_relative_to(file.url), expected[i])
self.assertEqual(from_file.url_relative_to(file), expected[i])
from_file = File('file.md', '/path/to/docs', '/path/to/site', use_directory_urls=True)
self.assertEqual(from_file.url, 'file/')
expected = [
'file/', # file relative to .
'../file/', # file relative to foo/
'../../file/', # file relative to foo/bar/
'../../../file/', # file relative to foo/bar/baz/
'../file/', # file relative to foo
'../../file/', # file relative to foo/bar
'../../../file/', # file relative to foo/bar/baz
]
for i, filename in enumerate(to_files):
file = File(filename, '/path/to/docs', '/path/to/site', use_directory_urls=True)
self.assertEqual(file.url, to_file_urls[i])
self.assertEqual(from_file.url_relative_to(file.url), expected[i])
self.assertEqual(from_file.url_relative_to(file), expected[i])
def test_get_relative_url(self):
to_files = [
'index.md',
'foo/index.md',
'foo/bar/index.md',
'foo/bar/baz/index.md',
'foo.md',
'foo/bar.md',
'foo/bar/baz.md',
]
to_file_urls = [
'index.html',
'foo/index.html',
'foo/bar/index.html',
'foo/bar/baz/index.html',
'foo.html',
'foo/bar.html',
'foo/bar/baz.html',
]
from_file = File('img.jpg', '/path/to/docs', '/path/to/site', use_directory_urls=False)
self.assertEqual(from_file.url, 'img.jpg')
expected = [
'img.jpg', # img.jpg relative to .
'../img.jpg', # img.jpg relative to foo/
'../../img.jpg', # img.jpg relative to foo/bar/
'../../../img.jpg', # img.jpg relative to foo/bar/baz/
'img.jpg', # img.jpg relative to foo.html
'../img.jpg', # img.jpg relative to foo/bar.html
'../../img.jpg', # img.jpg relative to foo/bar/baz.html
]
for i, filename in enumerate(to_files):
with self.subTest(from_file=from_file.src_path, to_file=filename):
file = File(filename, '/path/to/docs', '/path/to/site', use_directory_urls=False)
self.assertEqual(file.url, to_file_urls[i])
self.assertEqual(from_file.url_relative_to(file.url), expected[i])
self.assertEqual(from_file.url_relative_to(file), expected[i])
from_file = File('foo/img.jpg', '/path/to/docs', '/path/to/site', use_directory_urls=False)
self.assertEqual(from_file.url, 'foo/img.jpg')
expected = [
'foo/img.jpg', # foo/img.jpg relative to .
'img.jpg', # foo/img.jpg relative to foo/
'../img.jpg', # foo/img.jpg relative to foo/bar/
'../../img.jpg', # foo/img.jpg relative to foo/bar/baz/
'foo/img.jpg', # foo/img.jpg relative to foo.html
'img.jpg', # foo/img.jpg relative to foo/bar.html
'../img.jpg', # foo/img.jpg relative to foo/bar/baz.html
]
for i, filename in enumerate(to_files):
with self.subTest(from_file=from_file.src_path, to_file=filename):
file = File(filename, '/path/to/docs', '/path/to/site', use_directory_urls=False)
self.assertEqual(file.url, to_file_urls[i])
self.assertEqual(from_file.url_relative_to(file.url), expected[i])
self.assertEqual(from_file.url_relative_to(file), expected[i])
from_file = File('index.html', '/path/to/docs', '/path/to/site', use_directory_urls=False)
self.assertEqual(from_file.url, 'index.html')
expected = [
'index.html', # index.html relative to .
'../index.html', # index.html relative to foo/
'../../index.html', # index.html relative to foo/bar/
'../../../index.html', # index.html relative to foo/bar/baz/
'index.html', # index.html relative to foo.html
'../index.html', # index.html relative to foo/bar.html
'../../index.html', # index.html relative to foo/bar/baz.html
]
for i, filename in enumerate(to_files):
with self.subTest(from_file=from_file.src_path, to_file=filename):
file = File(filename, '/path/to/docs', '/path/to/site', use_directory_urls=False)
self.assertEqual(file.url, to_file_urls[i])
self.assertEqual(from_file.url_relative_to(file.url), expected[i])
self.assertEqual(from_file.url_relative_to(file), expected[i])
from_file = File('file.html', '/path/to/docs', '/path/to/site', use_directory_urls=False)
self.assertEqual(from_file.url, 'file.html')
expected = [
'file.html', # file.html relative to .
'../file.html', # file.html relative to foo/
'../../file.html', # file.html relative to foo/bar/
'../../../file.html', # file.html relative to foo/bar/baz/
'file.html', # file.html relative to foo.html
'../file.html', # file.html relative to foo/bar.html
'../../file.html', # file.html relative to foo/bar/baz.html
]
for i, filename in enumerate(to_files):
with self.subTest(from_file=from_file.src_path, to_file=filename):
file = File(filename, '/path/to/docs', '/path/to/site', use_directory_urls=False)
self.assertEqual(file.url, to_file_urls[i])
self.assertEqual(from_file.url_relative_to(file.url), expected[i])
self.assertEqual(from_file.url_relative_to(file), expected[i])
@tempdir(
files=[
'index.md',
'readme.md',
'bar.css',
'bar.html',
'bar.jpg',
'bar.js',
'bar.md',
'.dotfile',
'templates/foo.html',
]
)
def test_get_files(self, tdir):
config = load_config(docs_dir=tdir, extra_css=['bar.css'], extra_javascript=['bar.js'])
files = get_files(config)
self.assertIsInstance(files, Files)
self.assertEqual(
[f.src_uri for f in files if f.inclusion.is_included()],
['index.md', 'bar.css', 'bar.html', 'bar.jpg', 'bar.js', 'bar.md', 'readme.md'],
)
self.assertEqual(
[f.src_uri for f in files if f.inclusion.is_excluded()],
['.dotfile', 'templates/foo.html'],
)
@tempdir(
files=[
'README.md',
'foo.md',
]
)
def test_get_files_include_readme_without_index(self, tdir):
config = load_config(docs_dir=tdir)
files = get_files(config)
self.assertIsInstance(files, Files)
self.assertEqual([f.src_uri for f in files], ['README.md', 'foo.md'])
@tempdir(
files=[
'index.md',
'README.md',
'foo.md',
]
)
def test_get_files_exclude_readme_with_index(self, tdir):
config = load_config(docs_dir=tdir)
with self.assertLogs('mkdocs') as cm:
files = get_files(config)
self.assertRegex(
'\n'.join(cm.output),
r"^WARNING:mkdocs.structure.files:"
r"Excluding 'README.md' from the site because it conflicts with 'index.md'.$",
)
self.assertIsInstance(files, Files)
self.assertEqual([f.src_uri for f in files], ['index.md', 'foo.md'])
@tempdir()
@tempdir(files={'test.txt': 'source content'})
def test_copy_file(self, src_dir, dest_dir):
file = File('test.txt', src_dir, dest_dir, use_directory_urls=False)
dest_path = os.path.join(dest_dir, 'test.txt')
self.assertPathNotExists(dest_path)
file.copy_file()
self.assertPathIsFile(dest_path)
@tempdir(files={'test.txt': 'source content'})
def test_copy_file_same_file(self, dest_dir):
file = File('test.txt', dest_dir, dest_dir, use_directory_urls=False)
dest_path = os.path.join(dest_dir, 'test.txt')
file.copy_file()
self.assertPathIsFile(dest_path)
with open(dest_path, encoding='utf-8') as f:
self.assertEqual(f.read(), 'source content')
@tempdir(files={'test.txt': 'destination content'})
@tempdir(files={'test.txt': 'source content'})
def test_copy_file_clean_modified(self, src_dir, dest_dir):
file = File('test.txt', src_dir, dest_dir, use_directory_urls=False)
self.assertEqual(file.content_string, 'source content')
self.assertEqual(file.content_bytes, b'source content')
file.is_modified = mock.Mock(return_value=True)
dest_path = os.path.join(dest_dir, 'test.txt')
file.copy_file(dirty=False)
self.assertPathIsFile(dest_path)
with open(dest_path, encoding='utf-8') as f:
self.assertEqual(f.read(), 'source content')
@tempdir(files={'test.txt': 'destination content'})
@tempdir(files={'test.txt': 'source content'})
def test_copy_file_dirty_modified(self, src_dir, dest_dir):
file = File('test.txt', src_dir, dest_dir, use_directory_urls=False)
file.is_modified = mock.Mock(return_value=True)
dest_path = os.path.join(dest_dir, 'test.txt')
file.copy_file(dirty=True)
self.assertPathIsFile(dest_path)
with open(dest_path, encoding='utf-8') as f:
self.assertEqual(f.read(), 'source content')
@tempdir(files={'test.txt': 'destination content'})
@tempdir(files={'test.txt': 'source content'})
def test_copy_file_dirty_not_modified(self, src_dir, dest_dir):
file = File('test.txt', src_dir, dest_dir, use_directory_urls=False)
file.is_modified = mock.Mock(return_value=False)
dest_path = os.path.join(dest_dir, 'test.txt')
file.copy_file(dirty=True)
self.assertPathIsFile(dest_path)
with open(dest_path, encoding='utf-8') as f:
self.assertEqual(f.read(), 'destination content')
@tempdir()
def test_copy_file_from_content(self, dest_dir):
file = File('test.txt', src_dir='unused', dest_dir=dest_dir, use_directory_urls=False)
file.content_string = 'ö'
self.assertIsNone(file.abs_src_path)
dest_path = os.path.join(dest_dir, 'test.txt')
file.copy_file()
self.assertPathIsFile(dest_path)
with open(dest_path, encoding='utf-8') as f:
self.assertEqual(f.read(), 'ö')
file.content_bytes = b'\x01\x02\x03'
file.copy_file()
with open(dest_path, 'rb') as f:
self.assertEqual(f.read(), b'\x01\x02\x03')
file.content_bytes = b'\xc3\xb6'
file.copy_file()
with open(dest_path, encoding='utf-8') as f:
self.assertEqual(f.read(), 'ö')
def test_files_append_remove_src_paths(self):
fs = [
File('index.md', '/path/to/docs', '/path/to/site', use_directory_urls=True),
File('foo/bar.md', '/path/to/docs', '/path/to/site', use_directory_urls=True),
File('foo/bar.html', '/path/to/docs', '/path/to/site', use_directory_urls=True),
File('foo/bar.jpg', '/path/to/docs', '/path/to/site', use_directory_urls=True),
File('foo/bar.js', '/path/to/docs', '/path/to/site', use_directory_urls=True),
File('foo/bar.css', '/path/to/docs', '/path/to/site', use_directory_urls=True),
]
files = Files(fs)
self.assertEqual(len(files), 6)
self.assertEqual(len(files.src_uris), 6)
extra_file = File('extra.md', '/path/to/docs', '/path/to/site', use_directory_urls=True)
self.assertFalse(extra_file.src_uri in files.src_uris)
files.append(extra_file)
self.assertEqual(len(files), 7)
self.assertEqual(len(files.src_uris), 7)
self.assertTrue(extra_file.src_uri in files.src_uris)
files.remove(extra_file)
self.assertEqual(len(files), 6)
self.assertEqual(len(files.src_uris), 6)
self.assertFalse(extra_file.src_uri in files.src_uris)
def test_files_move_to_end(self):
fs = [
File('a.md', '/path/to/docs', '/path/to/site', use_directory_urls=True),
File('b.jpg', '/path/to/docs', '/path/to/site', use_directory_urls=True),
]
files = Files(fs)
self.assertEqual(len(files), 2)
self.assertEqual(list(files)[0].src_uri, 'a.md')
with self.assertWarns(DeprecationWarning):
files.append(fs[0])
self.assertEqual(len(files), 2)
self.assertEqual(list(files)[0].src_uri, 'b.jpg')
self.assertEqual(list(files)[1].src_uri, 'a.md')
| TestFiles |
python | huggingface__transformers | src/transformers/models/encodec/feature_extraction_encodec.py | {
"start": 953,
"end": 9877
} | class ____(SequenceFeatureExtractor):
r"""
Constructs an EnCodec feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods.
Instantiating a feature extractor with the defaults will yield a similar configuration to that of the
[facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture.
Args:
feature_size (`int`, *optional*, defaults to 1):
The feature dimension of the extracted features. Use 1 for mono, 2 for stereo.
sampling_rate (`int`, *optional*, defaults to 24000):
The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
padding_value (`float`, *optional*, defaults to 0.0):
The value that is used to fill the padding values.
chunk_length_s (`float`, *optional*):
If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded.
overlap (`float`, *optional*):
Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following
formulae : `int((1.0 - self.overlap) * self.chunk_length)`.
"""
model_input_names = ["input_values", "padding_mask"]
def __init__(
self,
feature_size: int = 1,
sampling_rate: int = 24000,
padding_value: float = 0.0,
chunk_length_s: Optional[float] = None,
overlap: Optional[float] = None,
**kwargs,
):
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.chunk_length_s = chunk_length_s
self.overlap = overlap
# This is a property because you might want to change the chunk_length_s on the fly
@property
def chunk_length(self) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
# This is a property because you might want to change the chunk_length_s on the fly
@property
def chunk_stride(self) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length))
def __call__(
self,
raw_audio: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]],
padding: Optional[Union[bool, str, PaddingStrategy]] = None,
truncation: Optional[bool] = False,
max_length: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
sampling_rate: Optional[int] = None,
) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_audio (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`):
The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape
`(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio
(`feature_size = 2`).
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, *optional*, defaults to `False`):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. "
"Failing to do so can result in silent errors that might be hard to debug."
)
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one.")
elif padding is None:
# by default let's pad the inputs
padding = True
is_batched = bool(
isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list)))
)
if is_batched:
raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio]
elif not is_batched and not isinstance(raw_audio, np.ndarray):
raw_audio = np.asarray(raw_audio, dtype=np.float32)
elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64):
raw_audio = raw_audio.astype(np.float32)
# always return batch
if not is_batched:
raw_audio = [np.asarray(raw_audio).T]
# verify inputs are valid
for idx, example in enumerate(raw_audio):
if example.ndim > 2:
raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}")
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels")
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels")
padded_inputs = None
input_values = BatchFeature({"input_values": raw_audio})
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
max_length = min(array.shape[0] for array in raw_audio)
nb_step = int(np.floor(max_length / self.chunk_stride))
max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
max_length = max(array.shape[0] for array in raw_audio)
nb_step = int(np.ceil(max_length / self.chunk_stride))
max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length
padding = "max_length"
else:
padded_inputs = input_values
# normal padding on batch
if padded_inputs is None:
padded_inputs = self.pad(
input_values,
max_length=max_length,
truncation=truncation,
padding=padding,
return_attention_mask=padding,
)
if padding:
padded_inputs["padding_mask"] = padded_inputs.pop("attention_mask")
input_values = []
for example in padded_inputs.pop("input_values"):
if self.feature_size == 1:
example = example[..., None]
input_values.append(example.T)
padded_inputs["input_values"] = input_values
if return_tensors is not None:
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
return padded_inputs
__all__ = ["EncodecFeatureExtractor"]
| EncodecFeatureExtractor |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.