language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | tests/sentry/incidents/models/test_alert_rule.py | {
"start": 11082,
"end": 11201
} | class ____(AlertRuleTriggerActionActivateBaseTest, unittest.TestCase):
method = "fire"
| AlertRuleTriggerActionFireTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI049.py | {
"start": 153,
"end": 203
} | class ____(TypedDict):
foo: bytes
| _UsedTypedDict |
python | numba__numba | numba/core/codegen.py | {
"start": 39621,
"end": 41706
} | class ____(object):
"""
For tracking unresolved symbols generated at runtime due to recursion.
"""
PREFIX = '.numba.unresolved$'
def __init__(self):
self._unresolved = utils.UniqueDict()
self._defined = set()
self._resolved = []
def scan_unresolved_symbols(self, module, engine):
"""
Scan and track all unresolved external symbols in the module and
allocate memory for it.
"""
prefix = self.PREFIX
for gv in module.global_variables:
if gv.name.startswith(prefix):
sym = gv.name[len(prefix):]
# Avoid remapping to existing GV
if engine.is_symbol_defined(gv.name):
continue
# Allocate a memory space for the pointer
abortfn = rtsys.library.get_pointer_to_function("nrt_unresolved_abort")
ptr = ctypes.c_void_p(abortfn)
engine.add_global_mapping(gv, ctypes.addressof(ptr))
self._unresolved[sym] = ptr
def scan_defined_symbols(self, module):
"""
Scan and track all defined symbols.
"""
for fn in module.functions:
if not fn.is_declaration:
self._defined.add(fn.name)
def resolve(self, engine):
"""
Fix unresolved symbols if they are defined.
"""
# An iterator to get all unresolved but available symbols
pending = [name for name in self._unresolved if name in self._defined]
# Resolve pending symbols
for name in pending:
# Get runtime address
fnptr = engine.get_function_address(name)
# Fix all usage
ptr = self._unresolved[name]
ptr.value = fnptr
self._resolved.append((name, ptr)) # keep ptr alive
# Delete resolved
del self._unresolved[name]
def _proxy(old):
@functools.wraps(old)
def wrapper(self, *args, **kwargs):
return old(self._ee, *args, **kwargs)
return wrapper
| RuntimeLinker |
python | doocs__leetcode | solution/3200-3299/3210.Find the Encrypted String/Solution.py | {
"start": 0,
"end": 202
} | class ____:
def getEncryptedString(self, s: str, k: int) -> str:
cs = list(s)
n = len(s)
for i in range(n):
cs[i] = s[(i + k) % n]
return "".join(cs)
| Solution |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 178014,
"end": 178197
} | class ____:
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
assert_(isinstance(x[0], np.int_))
assert_(type(x[0, ...]) is np.ndarray)
| TestSubscripting |
python | pytorch__pytorch | test/test_testing.py | {
"start": 41017,
"end": 44030
} | class ____(TestCase):
def test_matching_coalesced(self):
indices = (
(0, 1),
(1, 0),
)
values = (1, 2)
actual = torch.sparse_coo_tensor(indices, values, size=(2, 2)).coalesce()
expected = actual.clone()
for fn in assert_close_with_inputs(actual, expected):
fn()
def test_matching_uncoalesced(self):
indices = (
(0, 1),
(1, 0),
)
values = (1, 2)
actual = torch.sparse_coo_tensor(indices, values, size=(2, 2))
expected = actual.clone()
for fn in assert_close_with_inputs(actual, expected):
fn()
def test_mismatching_sparse_dims(self):
t = torch.randn(2, 3, 4)
actual = t.to_sparse()
expected = t.to_sparse(2)
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("number of sparse dimensions in sparse COO tensors")):
fn()
def test_mismatching_nnz(self):
actual_indices = (
(0, 1),
(1, 0),
)
actual_values = (1, 2)
actual = torch.sparse_coo_tensor(actual_indices, actual_values, size=(2, 2))
expected_indices = (
(0, 1, 1,),
(1, 0, 0,),
)
expected_values = (1, 1, 1)
expected = torch.sparse_coo_tensor(expected_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("number of specified values in sparse COO tensors")):
fn()
def test_mismatching_indices_msg(self):
actual_indices = (
(0, 1),
(1, 0),
)
actual_values = (1, 2)
actual = torch.sparse_coo_tensor(actual_indices, actual_values, size=(2, 2))
expected_indices = (
(0, 1),
(1, 1),
)
expected_values = (1, 2)
expected = torch.sparse_coo_tensor(expected_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Sparse COO indices")):
fn()
def test_mismatching_values_msg(self):
actual_indices = (
(0, 1),
(1, 0),
)
actual_values = (1, 2)
actual = torch.sparse_coo_tensor(actual_indices, actual_values, size=(2, 2))
expected_indices = (
(0, 1),
(1, 0),
)
expected_values = (1, 3)
expected = torch.sparse_coo_tensor(expected_indices, expected_values, size=(2, 2))
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, re.escape("Sparse COO values")):
fn()
@unittest.skipIf(IS_FBCODE or IS_SANDCASTLE, "Not all sandcastle jobs support CSR testing")
| TestAssertCloseSparseCOO |
python | pytorch__pytorch | test/inductor/test_lookup_table.py | {
"start": 33483,
"end": 42104
} | class ____(BaseE2ELookupTableTest):
"""E2E tests for lookup table functionality"""
@parametrize("max_autotune", [True, False])
@fresh_cache()
def test_no_lookup_table_entry_autotune_modes(self, max_autotune):
"""Test when there's no lookup table entry with different autotune modes"""
tensors = self.create_tensors("mm")
# Setup lookup table with different key to force no match
self.setup_lookup_table(
"mm",
[
torch.randn(64, 64, device=self.device),
torch.randn(64, 64, device=self.device),
],
[],
)
# Inline validation function
def validate_choices(choices):
if max_autotune:
assert len(choices) > 2, (
f"Max-autotune should have >2 choices, got {len(choices)}"
)
assert any(isinstance(c, ExternKernelCaller) for c in choices), (
"Should have ExternKernelCaller"
)
assert any(isinstance(c, TritonTemplateCaller) for c in choices), (
"Should have TritonTemplateCaller"
)
else:
assert len(choices) == 1, (
f"No max-autotune should have 1 choice, got {len(choices)}"
)
assert isinstance(choices[0], ExternKernelCaller), (
f"Should be ExternKernelCaller, got {type(choices[0])}"
)
return choices
add_preprocessing_fn(validate_choices)
self.run_model(
"mm",
tensors,
{"max_autotune_gemm": max_autotune, "max_autotune": max_autotune},
)
@parametrize("operation", ["mm", "addmm", "bmm", "mm_plus_mm"])
@fresh_cache()
def test_valid_lookup_table_entry(self, operation):
"""Test when there's a valid entry for the operation"""
k = 256 if operation == "mm_plus_mm" else 64
tensors = self.create_tensors(operation, k=k)
# Map operation to actual template UID
template_mapping = {
"mm": torch._inductor.kernel.mm.mm_template.uid,
"addmm": torch._inductor.kernel.mm.mm_template.uid,
"bmm": torch._inductor.kernel.bmm.bmm_template.uid,
"mm_plus_mm": torch._inductor.kernel.mm_plus_mm.mm_plus_mm_template.uid,
}
template_id = template_mapping[operation]
config = self.create_basic_config(template_id)
self.setup_lookup_table(operation, tensors, [config])
add_preprocessing_fn(
partial(verify_choice_names, pattern="triton_", expected_count=1)
)
self.run_model(operation, tensors)
@unittest.skipIf(not has_triton_tma_device(), "Need TMA support")
@parametrize("operation", ["mm", "addmm"])
@fresh_cache()
def test_tma_lookup_table_entry(self, operation):
"""Test TMA template entry"""
tensors = self.create_tensors(operation)
config = self.create_basic_config(
torch._inductor.kernel.mm.persistent_tma_mm_template.uid
)
self.setup_lookup_table(operation, tensors, [config])
add_preprocessing_fn(
partial(
verify_choice_names,
pattern="triton_mm_persistent_tma_",
expected_count=1,
)
)
self.run_model(
operation, tensors, {"triton.enable_persistent_tma_matmul": True}
)
@fresh_cache()
def test_decompose_k_lookup_table_entry(self):
"""Test decompose_k template entry"""
tensors = self.create_tensors("mm", m=32, n=32, k=32 * 32)
config = self.create_basic_config(
torch._inductor.kernel.mm.decompose_k_subgraph_template.uid
)
self.setup_lookup_table("mm", tensors, [config])
add_preprocessing_fn(
partial(
verify_choice_names, pattern="decompose_k|bmm_dtype", expected_count=1
)
)
self.run_model("mm", tensors)
@fresh_cache()
def test_bias_addmm_lookup_table_entry(self):
"""Test bias_addmm template entry"""
# Create bias with stride[0] == 0 for bias_addmm eligibility
bias_unexpanded = torch.randn(64, device=self.device, dtype=torch.float16)
expanded_bias = bias_unexpanded.expand(64, 64)
tensors = [
expanded_bias,
torch.randn(64, 32, device=self.device, dtype=torch.float16),
torch.randn(32, 64, device=self.device, dtype=torch.float16),
]
config = self.create_basic_config(torch._inductor.kernel.mm.aten_bias_addmm.uid)
self.setup_lookup_table("addmm", tensors, [config])
add_preprocessing_fn(
partial(verify_choice_names, pattern="bias_addmm", expected_count=1)
)
# Run with original unexpanded bias
with inductor_config.patch(
{"max_autotune_gemm": True, "triton.autotune_cublasLt": True}
):
model = UnifiedModel("addmm")
compiled_model = torch.compile(model.to(self.device), mode="max-autotune")
compiled_model(bias_unexpanded, tensors[1], tensors[2])
@unittest.skipIf(not has_triton_tma_device(), "Need TMA support")
@fresh_cache()
def test_multiple_configs_same_template(self):
"""Test multiple configurations for same template"""
tensors = self.create_tensors("mm")
config1 = self.create_basic_config(
torch._inductor.kernel.mm.persistent_tma_mm_template.uid
)
config1.update({"BLOCK_M": 128, "BLOCK_N": 128, "num_warps": 8})
config2 = self.create_basic_config(
torch._inductor.kernel.mm.persistent_tma_mm_template.uid
)
config2.update({"BLOCK_M": 64, "BLOCK_N": 64, "num_warps": 4})
self.setup_lookup_table("mm", tensors, [config1, config2])
add_preprocessing_fn(
partial(
verify_choice_names,
pattern="triton_mm_persistent_tma_",
expected_count=2,
)
)
self.run_model("mm", tensors, {"triton.enable_persistent_tma_matmul": True})
@unittest.skipIf(not has_triton_tma_device(), "Need TMA support")
@fresh_cache()
def test_mixed_template_configs(self):
"""Test mixing different template types"""
tensors = self.create_tensors("mm")
triton_config = self.create_basic_config(
torch._inductor.kernel.mm.mm_template.uid
)
triton_config.update({"BLOCK_M": 128, "num_warps": 8})
tma_config = self.create_basic_config(
torch._inductor.kernel.mm.persistent_tma_mm_template.uid
)
tma_config.update({"BLOCK_M": 256, "num_warps": 4})
self.setup_lookup_table("mm", tensors, [triton_config, tma_config])
add_preprocessing_fn(
partial(verify_choice_names, pattern="triton_", expected_count=2)
)
self.run_model("mm", tensors, {"triton.enable_persistent_tma_matmul": True})
@fresh_cache()
def test_template_hash_filtering_e2e(self):
"""Test end-to-end template hash filtering in real MM operation"""
tensors = self.create_tensors("mm")
# Get the actual src_hash from the template
actual_hash = torch._inductor.kernel.mm.mm_template.src_hash
# Create configs - one with correct hash, one with wrong hash
correct_config = self.create_basic_config(
torch._inductor.kernel.mm.mm_template.uid
)
correct_config.update(
{"BLOCK_M": 128, "template_hash": actual_hash} # Use actual hash
)
wrong_config = self.create_basic_config(
torch._inductor.kernel.mm.mm_template.uid
)
wrong_config.update(
{
"BLOCK_M": 64,
"template_hash": "definitely_wrong_hash_12345", # Wrong hash
}
)
self.setup_lookup_table("mm", tensors, [correct_config, wrong_config])
# Should only get 1 choice since the wrong hash config gets filtered
add_preprocessing_fn(
partial(verify_choice_names, pattern="triton_", expected_count=1)
)
# Ensure hash checking is enabled
with patch.object(inductor_config.lookup_table, "check_src_hash", True):
self.run_model("mm", tensors)
if __name__ == "__main__":
from torch._inductor.utils import is_big_gpu
if HAS_GPU and HAS_CPU and is_big_gpu():
run_tests()
| TestLookupTableE2E |
python | apache__airflow | airflow-core/src/airflow/utils/file.py | {
"start": 2843,
"end": 13855
} | class ____(NamedTuple):
"""Typed namedtuple with utility functions for glob ignore rules."""
wild_match_pattern: GitWildMatchPattern
relative_to: Path | None = None
@staticmethod
def compile(pattern: str, base_dir: Path, definition_file: Path) -> _IgnoreRule | None:
"""Build an ignore rule from the supplied glob pattern and log a useful warning if it is invalid."""
relative_to: Path | None = None
if pattern.strip() == "/":
# "/" doesn't match anything in gitignore
log.warning("Ignoring no-op glob pattern '/' from %s", definition_file)
return None
if pattern.startswith("/") or "/" in pattern.rstrip("/"):
# See https://git-scm.com/docs/gitignore
# > If there is a separator at the beginning or middle (or both) of the pattern, then the
# > pattern is relative to the directory level of the particular .gitignore file itself.
# > Otherwise the pattern may also match at any level below the .gitignore level.
relative_to = definition_file.parent
ignore_pattern = GitWildMatchPattern(pattern)
return _GlobIgnoreRule(wild_match_pattern=ignore_pattern, relative_to=relative_to)
@staticmethod
def match(path: Path, rules: list[_IgnoreRule]) -> bool:
"""Match a list of ignore rules against the supplied path, accounting for exclusion rules and ordering."""
matched = False
for rule in rules:
if not isinstance(rule, _GlobIgnoreRule):
raise ValueError(f"_GlobIgnoreRule cannot match rules of type: {type(rule)}")
rel_path = str(path.relative_to(rule.relative_to) if rule.relative_to else path.name)
if (
rule.wild_match_pattern.include is not None
and rule.wild_match_pattern.match_file(rel_path) is not None
):
matched = rule.wild_match_pattern.include
return matched
ZIP_REGEX = re.compile(rf"((.*\.zip){re.escape(os.sep)})?(.*)")
@overload
def correct_maybe_zipped(fileloc: None) -> None: ...
@overload
def correct_maybe_zipped(fileloc: str | Path) -> str | Path: ...
def correct_maybe_zipped(fileloc: None | str | Path) -> None | str | Path:
"""If the path contains a folder with a .zip suffix, treat it as a zip archive and return path."""
if not fileloc:
return fileloc
search_ = ZIP_REGEX.search(str(fileloc))
if not search_:
return fileloc
_, archive, _ = search_.groups()
if archive and zipfile.is_zipfile(archive):
return archive
return fileloc
def open_maybe_zipped(fileloc, mode="r"):
"""
Open the given file.
If the path contains a folder with a .zip suffix, then the folder
is treated as a zip archive, opening the file inside the archive.
:return: a file object, as in `open`, or as in `ZipFile.open`.
"""
_, archive, filename = ZIP_REGEX.search(fileloc).groups()
if archive and zipfile.is_zipfile(archive):
return TextIOWrapper(zipfile.ZipFile(archive, mode=mode).open(filename))
return open(fileloc, mode=mode)
def _find_path_from_directory(
base_dir_path: str | os.PathLike[str],
ignore_file_name: str,
ignore_rule_type: type[_IgnoreRule],
) -> Generator[str, None, None]:
"""
Recursively search the base path and return the list of file paths that should not be ignored.
:param base_dir_path: the base path to be searched
:param ignore_file_name: the file name containing regular expressions for files that should be ignored.
:param ignore_rule_type: the concrete class for ignore rules, which implements the _IgnoreRule interface.
:return: a generator of file paths which should not be ignored.
"""
# A Dict of patterns, keyed using resolved, absolute paths
patterns_by_dir: dict[Path, list[_IgnoreRule]] = {}
for root, dirs, files in os.walk(base_dir_path, followlinks=True):
patterns: list[_IgnoreRule] = patterns_by_dir.get(Path(root).resolve(), [])
ignore_file_path = Path(root) / ignore_file_name
if ignore_file_path.is_file():
with open(ignore_file_path) as ifile:
patterns_to_match_excluding_comments = [
re.sub(r"\s*#.*", "", line) for line in ifile.read().split("\n")
]
# append new patterns and filter out "None" objects, which are invalid patterns
patterns += [
p
for p in [
ignore_rule_type.compile(pattern, Path(base_dir_path), ignore_file_path)
for pattern in patterns_to_match_excluding_comments
if pattern
]
if p is not None
]
# evaluation order of patterns is important with negation
# so that later patterns can override earlier patterns
dirs[:] = [subdir for subdir in dirs if not ignore_rule_type.match(Path(root) / subdir, patterns)]
# explicit loop for infinite recursion detection since we are following symlinks in this walk
for sd in dirs:
dirpath = (Path(root) / sd).resolve()
if dirpath in patterns_by_dir:
raise RuntimeError(
"Detected recursive loop when walking DAG directory "
f"{base_dir_path}: {dirpath} has appeared more than once."
)
patterns_by_dir.update({dirpath: patterns.copy()})
for file in files:
if file != ignore_file_name:
abs_file_path = Path(root) / file
if not ignore_rule_type.match(abs_file_path, patterns):
yield str(abs_file_path)
def find_path_from_directory(
base_dir_path: str | os.PathLike[str],
ignore_file_name: str,
ignore_file_syntax: str = conf.get_mandatory_value("core", "DAG_IGNORE_FILE_SYNTAX", fallback="glob"),
) -> Generator[str, None, None]:
"""
Recursively search the base path for a list of file paths that should not be ignored.
:param base_dir_path: the base path to be searched
:param ignore_file_name: the file name in which specifies the patterns of files/dirs to be ignored
:param ignore_file_syntax: the syntax of patterns in the ignore file: regexp or glob
:return: a generator of file paths.
"""
if ignore_file_syntax == "glob" or not ignore_file_syntax:
return _find_path_from_directory(base_dir_path, ignore_file_name, _GlobIgnoreRule)
if ignore_file_syntax == "regexp":
return _find_path_from_directory(base_dir_path, ignore_file_name, _RegexpIgnoreRule)
raise ValueError(f"Unsupported ignore_file_syntax: {ignore_file_syntax}")
def list_py_file_paths(
directory: str | os.PathLike[str] | None,
safe_mode: bool = conf.getboolean("core", "DAG_DISCOVERY_SAFE_MODE", fallback=True),
) -> list[str]:
"""
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions. If not provided, use the
core.DAG_DISCOVERY_SAFE_MODE configuration setting. If not set, default
to safe.
:return: a list of paths to Python files in the specified directory
"""
file_paths: list[str] = []
if directory is None:
file_paths = []
elif os.path.isfile(directory):
file_paths = [str(directory)]
elif os.path.isdir(directory):
file_paths.extend(find_dag_file_paths(directory, safe_mode))
return file_paths
def find_dag_file_paths(directory: str | os.PathLike[str], safe_mode: bool) -> list[str]:
"""Find file paths of all DAG files."""
file_paths = []
for file_path in find_path_from_directory(directory, ".airflowignore"):
path = Path(file_path)
try:
if path.is_file() and (path.suffix == ".py" or zipfile.is_zipfile(path)):
if might_contain_dag(file_path, safe_mode):
file_paths.append(file_path)
except Exception:
log.exception("Error while examining %s", file_path)
return file_paths
COMMENT_PATTERN = re.compile(r"\s*#.*")
def might_contain_dag(file_path: str, safe_mode: bool, zip_file: zipfile.ZipFile | None = None) -> bool:
"""
Check whether a Python file contains Airflow DAGs.
When safe_mode is off (with False value), this function always returns True.
If might_contain_dag_callable isn't specified, it uses airflow default heuristic
"""
if not safe_mode:
return True
might_contain_dag_callable = conf.getimport(
"core",
"might_contain_dag_callable",
fallback="airflow.utils.file.might_contain_dag_via_default_heuristic",
)
return might_contain_dag_callable(file_path=file_path, zip_file=zip_file)
def might_contain_dag_via_default_heuristic(file_path: str, zip_file: zipfile.ZipFile | None = None) -> bool:
"""
Heuristic that guesses whether a Python file contains an Airflow DAG definition.
:param file_path: Path to the file to be checked.
:param zip_file: if passed, checks the archive. Otherwise, check local filesystem.
:return: True, if file might contain DAGs.
"""
if zip_file:
with zip_file.open(file_path) as current_file:
content = current_file.read()
else:
if zipfile.is_zipfile(file_path):
return True
with open(file_path, "rb") as dag_file:
content = dag_file.read()
content = content.lower()
if b"airflow" not in content:
return False
return any(s in content for s in (b"dag", b"asset"))
def _find_imported_modules(module: ast.Module) -> Generator[str, None, None]:
for st in module.body:
if isinstance(st, ast.Import):
for n in st.names:
yield n.name
elif isinstance(st, ast.ImportFrom) and st.module is not None:
yield st.module
def iter_airflow_imports(file_path: str) -> Generator[str, None, None]:
"""Find Airflow modules imported in the given file."""
try:
parsed = ast.parse(Path(file_path).read_bytes())
except Exception:
return
for m in _find_imported_modules(parsed):
if m.startswith("airflow."):
yield m
def get_unique_dag_module_name(file_path: str) -> str:
"""Return a unique module name in the format unusual_prefix_{sha1 of module's file path}_{original module name}."""
if isinstance(file_path, str):
path_hash = hashlib.sha1(file_path.encode("utf-8"), usedforsecurity=False).hexdigest()
org_mod_name = re.sub(r"[.-]", "_", Path(file_path).stem)
return MODIFIED_DAG_MODULE_NAME.format(path_hash=path_hash, module_name=org_mod_name)
raise ValueError("file_path should be a string to generate unique module name")
| _GlobIgnoreRule |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 105025,
"end": 105477
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseServerUserAccountsUploadOrderField),
graphql_name="field",
)
direction = sgqlc.types.Field(
sgqlc.types.non_null(OrderDirection), graphql_name="direction"
)
| EnterpriseServerUserAccountsUploadOrder |
python | Textualize__textual | src/textual/app.py | {
"start": 6342,
"end": 6431
} | class ____(Exception):
"""Base class for exceptions relating to actions."""
| ActionError |
python | marshmallow-code__marshmallow | tests/test_serialization.py | {
"start": 694,
"end": 35621
} | class ____:
@pytest.fixture
def user(self):
return User("Foo", email="foo@bar.com", age=42)
def test_function_field_passed_func(self, user):
field = fields.Function(lambda obj: obj.name.upper())
assert field.serialize("key", user) == "FOO"
def test_function_field_passed_serialize_only_is_dump_only(self, user):
field = fields.Function(serialize=lambda obj: obj.name.upper())
assert field.dump_only is True
def test_function_field_passed_deserialize_and_serialize_is_not_dump_only(self):
field = fields.Function(
serialize=lambda val: val.lower(), deserialize=lambda val: val.upper()
)
assert field.dump_only is False
def test_function_field_passed_serialize(self, user):
field = fields.Function(serialize=lambda obj: obj.name.upper())
assert field.serialize("key", user) == "FOO"
# https://github.com/marshmallow-code/marshmallow/issues/395
def test_function_field_does_not_swallow_attribute_error(self, user):
def raise_error(obj):
raise AttributeError
field = fields.Function(serialize=raise_error)
with pytest.raises(AttributeError):
field.serialize("key", user)
def test_serialize_with_load_only_param(self):
class AliasingUserSerializer(Schema):
name = fields.String()
years = fields.Integer(load_only=True)
size = fields.Integer(dump_only=True, load_only=True)
nicknames = fields.List(fields.Str(), load_only=True)
data = {
"name": "Mick",
"years": "42",
"size": "12",
"nicknames": ["Your Majesty", "Brenda"],
}
result = AliasingUserSerializer().dump(data)
assert result["name"] == "Mick"
assert "years" not in result
assert "size" not in result
assert "nicknames" not in result
def test_function_field_load_only(self):
field = fields.Function(deserialize=lambda obj: None)
assert field.load_only
def test_function_field_passed_uncallable_object(self):
with pytest.raises(TypeError):
fields.Function("uncallable") # type: ignore[arg-type]
def test_integer_field(self, user):
field = fields.Integer()
assert field.serialize("age", user) == 42
def test_integer_as_string_field(self, user):
field = fields.Integer(as_string=True)
assert field.serialize("age", user) == "42"
def test_integer_field_default(self, user):
user.age = None
field = fields.Integer(dump_default=0)
assert field.serialize("age", user) is None
# missing
assert field.serialize("age", {}) == 0
def test_integer_field_default_set_to_none(self, user):
user.age = None
field = fields.Integer(dump_default=None)
assert field.serialize("age", user) is None
def test_uuid_field(self, user):
user.uuid1 = uuid.UUID("12345678123456781234567812345678")
user.uuid2 = None
field = fields.UUID()
assert isinstance(field.serialize("uuid1", user), str)
assert field.serialize("uuid1", user) == "12345678-1234-5678-1234-567812345678"
assert field.serialize("uuid2", user) is None
def test_ip_address_field(self, user):
ipv4_string = "192.168.0.1"
ipv6_string = "ffff::ffff"
ipv6_exploded_string = ipaddress.ip_address("ffff::ffff").exploded
user.ipv4 = ipaddress.ip_address(ipv4_string)
user.ipv6 = ipaddress.ip_address(ipv6_string)
user.empty_ip = None
field_compressed = fields.IP()
assert isinstance(field_compressed.serialize("ipv4", user), str)
assert field_compressed.serialize("ipv4", user) == ipv4_string
assert isinstance(field_compressed.serialize("ipv6", user), str)
assert field_compressed.serialize("ipv6", user) == ipv6_string
assert field_compressed.serialize("empty_ip", user) is None
field_exploded = fields.IP(exploded=True)
assert isinstance(field_exploded.serialize("ipv6", user), str)
assert field_exploded.serialize("ipv6", user) == ipv6_exploded_string
def test_ipv4_address_field(self, user):
ipv4_string = "192.168.0.1"
user.ipv4 = ipaddress.ip_address(ipv4_string)
user.empty_ip = None
field = fields.IPv4()
assert isinstance(field.serialize("ipv4", user), str)
assert field.serialize("ipv4", user) == ipv4_string
assert field.serialize("empty_ip", user) is None
def test_ipv6_address_field(self, user):
ipv6_string = "ffff::ffff"
ipv6_exploded_string = ipaddress.ip_address("ffff::ffff").exploded
user.ipv6 = ipaddress.ip_address(ipv6_string)
user.empty_ip = None
field_compressed = fields.IPv6()
assert isinstance(field_compressed.serialize("ipv6", user), str)
assert field_compressed.serialize("ipv6", user) == ipv6_string
assert field_compressed.serialize("empty_ip", user) is None
field_exploded = fields.IPv6(exploded=True)
assert isinstance(field_exploded.serialize("ipv6", user), str)
assert field_exploded.serialize("ipv6", user) == ipv6_exploded_string
def test_ip_interface_field(self, user):
ipv4interface_string = "192.168.0.1/24"
ipv6interface_string = "ffff::ffff/128"
ipv6interface_exploded_string = ipaddress.ip_interface(
"ffff::ffff/128"
).exploded
user.ipv4interface = ipaddress.ip_interface(ipv4interface_string)
user.ipv6interface = ipaddress.ip_interface(ipv6interface_string)
user.empty_ipinterface = None
field_compressed = fields.IPInterface()
assert isinstance(field_compressed.serialize("ipv4interface", user), str)
assert field_compressed.serialize("ipv4interface", user) == ipv4interface_string
assert isinstance(field_compressed.serialize("ipv6interface", user), str)
assert field_compressed.serialize("ipv6interface", user) == ipv6interface_string
assert field_compressed.serialize("empty_ipinterface", user) is None
field_exploded = fields.IPInterface(exploded=True)
assert isinstance(field_exploded.serialize("ipv6interface", user), str)
assert (
field_exploded.serialize("ipv6interface", user)
== ipv6interface_exploded_string
)
def test_ipv4_interface_field(self, user):
ipv4interface_string = "192.168.0.1/24"
user.ipv4interface = ipaddress.ip_interface(ipv4interface_string)
user.empty_ipinterface = None
field = fields.IPv4Interface()
assert isinstance(field.serialize("ipv4interface", user), str)
assert field.serialize("ipv4interface", user) == ipv4interface_string
assert field.serialize("empty_ipinterface", user) is None
def test_ipv6_interface_field(self, user):
ipv6interface_string = "ffff::ffff/128"
ipv6interface_exploded_string = ipaddress.ip_interface(
"ffff::ffff/128"
).exploded
user.ipv6interface = ipaddress.ip_interface(ipv6interface_string)
user.empty_ipinterface = None
field_compressed = fields.IPv6Interface()
assert isinstance(field_compressed.serialize("ipv6interface", user), str)
assert field_compressed.serialize("ipv6interface", user) == ipv6interface_string
assert field_compressed.serialize("empty_ipinterface", user) is None
field_exploded = fields.IPv6Interface(exploded=True)
assert isinstance(field_exploded.serialize("ipv6interface", user), str)
assert (
field_exploded.serialize("ipv6interface", user)
== ipv6interface_exploded_string
)
def test_enum_field_by_symbol_serialization(self, user):
user.sex = GenderEnum.male
field = fields.Enum(GenderEnum)
assert field.serialize("sex", user) == "male"
def test_enum_field_by_value_true_serialization(self, user):
user.hair_color = HairColorEnum.black
field = fields.Enum(HairColorEnum, by_value=True)
assert field.serialize("hair_color", user) == "black hair"
user.sex = GenderEnum.male
field2 = fields.Enum(GenderEnum, by_value=True)
assert field2.serialize("sex", user) == 1
user.some_date = DateEnum.date_1
def test_enum_field_by_value_field_serialization(self, user):
user.hair_color = HairColorEnum.black
field = fields.Enum(HairColorEnum, by_value=fields.String)
assert field.serialize("hair_color", user) == "black hair"
user.sex = GenderEnum.male
field2 = fields.Enum(GenderEnum, by_value=fields.Integer)
assert field2.serialize("sex", user) == 1
user.some_date = DateEnum.date_1
field3 = fields.Enum(DateEnum, by_value=fields.Date(format="%d/%m/%Y"))
assert field3.serialize("some_date", user) == "29/02/2004"
def test_decimal_field(self, user):
user.m1 = 12
user.m2 = "12.355"
user.m3 = decimal.Decimal(1)
user.m4 = None
field = fields.Decimal()
assert isinstance(field.serialize("m1", user), decimal.Decimal)
assert field.serialize("m1", user) == decimal.Decimal(12)
assert isinstance(field.serialize("m2", user), decimal.Decimal)
assert field.serialize("m2", user) == decimal.Decimal("12.355")
assert isinstance(field.serialize("m3", user), decimal.Decimal)
assert field.serialize("m3", user) == decimal.Decimal(1)
assert field.serialize("m4", user) is None
field = fields.Decimal(1)
assert isinstance(field.serialize("m1", user), decimal.Decimal)
assert field.serialize("m1", user) == decimal.Decimal(12)
assert isinstance(field.serialize("m2", user), decimal.Decimal)
assert field.serialize("m2", user) == decimal.Decimal("12.4")
assert isinstance(field.serialize("m3", user), decimal.Decimal)
assert field.serialize("m3", user) == decimal.Decimal(1)
assert field.serialize("m4", user) is None
field = fields.Decimal(1, decimal.ROUND_DOWN)
assert isinstance(field.serialize("m1", user), decimal.Decimal)
assert field.serialize("m1", user) == decimal.Decimal(12)
assert isinstance(field.serialize("m2", user), decimal.Decimal)
assert field.serialize("m2", user) == decimal.Decimal("12.3")
assert isinstance(field.serialize("m3", user), decimal.Decimal)
assert field.serialize("m3", user) == decimal.Decimal(1)
assert field.serialize("m4", user) is None
def test_decimal_field_string(self, user):
user.m1 = 12
user.m2 = "12.355"
user.m3 = decimal.Decimal(1)
user.m4 = None
field = fields.Decimal(as_string=True)
assert isinstance(field.serialize("m1", user), str)
assert field.serialize("m1", user) == "12"
assert isinstance(field.serialize("m2", user), str)
assert field.serialize("m2", user) == "12.355"
assert isinstance(field.serialize("m3", user), str)
assert field.serialize("m3", user) == "1"
assert field.serialize("m4", user) is None
field = fields.Decimal(1, as_string=True)
assert isinstance(field.serialize("m1", user), str)
assert field.serialize("m1", user) == "12.0"
assert isinstance(field.serialize("m2", user), str)
assert field.serialize("m2", user) == "12.4"
assert isinstance(field.serialize("m3", user), str)
assert field.serialize("m3", user) == "1.0"
assert field.serialize("m4", user) is None
field = fields.Decimal(1, decimal.ROUND_DOWN, as_string=True)
assert isinstance(field.serialize("m1", user), str)
assert field.serialize("m1", user) == "12.0"
assert isinstance(field.serialize("m2", user), str)
assert field.serialize("m2", user) == "12.3"
assert isinstance(field.serialize("m3", user), str)
assert field.serialize("m3", user) == "1.0"
assert field.serialize("m4", user) is None
def test_decimal_field_special_values(self, user):
user.m1 = "-NaN"
user.m2 = "NaN"
user.m3 = "-sNaN"
user.m4 = "sNaN"
user.m5 = "-Infinity"
user.m6 = "Infinity"
user.m7 = "-0"
field = fields.Decimal(places=2, allow_nan=True)
m1s = field.serialize("m1", user)
assert isinstance(m1s, decimal.Decimal)
assert m1s.is_qnan()
assert not m1s.is_signed()
m2s = field.serialize("m2", user)
assert isinstance(m2s, decimal.Decimal)
assert m2s.is_qnan()
assert not m2s.is_signed()
m3s = field.serialize("m3", user)
assert isinstance(m3s, decimal.Decimal)
assert m3s.is_qnan()
assert not m3s.is_signed()
m4s = field.serialize("m4", user)
assert isinstance(m4s, decimal.Decimal)
assert m4s.is_qnan()
assert not m4s.is_signed()
m5s = field.serialize("m5", user)
assert isinstance(m5s, decimal.Decimal)
assert m5s.is_infinite()
assert m5s.is_signed()
m6s = field.serialize("m6", user)
assert isinstance(m6s, decimal.Decimal)
assert m6s.is_infinite()
assert not m6s.is_signed()
m7s = field.serialize("m7", user)
assert isinstance(m7s, decimal.Decimal)
assert m7s.is_zero()
assert m7s.is_signed()
field = fields.Decimal(as_string=True, allow_nan=True)
m2s = field.serialize("m2", user)
assert isinstance(m2s, str)
assert m2s == user.m2
m5s = field.serialize("m5", user)
assert isinstance(m5s, str)
assert m5s == user.m5
m6s = field.serialize("m6", user)
assert isinstance(m6s, str)
assert m6s == user.m6
def test_decimal_field_special_values_not_permitted(self, user):
user.m7 = "-0"
field = fields.Decimal(places=2)
m7s = field.serialize("m7", user)
assert isinstance(m7s, decimal.Decimal)
assert m7s.is_zero()
assert m7s.is_signed()
def test_decimal_field_fixed_point_representation(self, user):
"""
Test we get fixed-point string representation for a Decimal number that would normally
output in engineering notation.
"""
user.m1 = "0.00000000100000000"
field = fields.Decimal()
s = field.serialize("m1", user)
assert isinstance(s, decimal.Decimal)
assert s == decimal.Decimal("1.00000000E-9")
field = fields.Decimal(as_string=True)
s = field.serialize("m1", user)
assert isinstance(s, str)
assert s == user.m1
field = fields.Decimal(as_string=True, places=2)
s = field.serialize("m1", user)
assert isinstance(s, str)
assert s == "0.00"
def test_email_field_serialize_none(self, user):
user.email = None
field = fields.Email()
assert field.serialize("email", user) is None
def test_dict_field_serialize_none(self, user):
user.various_data = None
field = fields.Dict()
assert field.serialize("various_data", user) is None
def test_dict_field_serialize(self, user):
user.various_data = {"foo": "bar"}
field = fields.Dict()
dump = field.serialize("various_data", user)
assert dump == {"foo": "bar"}
# Check dump is a distinct object
dump["foo"] = "baz"
assert user.various_data["foo"] == "bar"
def test_dict_field_serialize_ordereddict(self, user):
user.various_data = OrderedDict([("foo", "bar"), ("bar", "baz")])
field = fields.Dict()
assert field.serialize("various_data", user) == OrderedDict(
[("foo", "bar"), ("bar", "baz")]
)
def test_structured_dict_value_serialize(self, user):
user.various_data = {"foo": decimal.Decimal(1)}
field = fields.Dict(values=fields.Decimal)
assert field.serialize("various_data", user) == {"foo": 1}
def test_structured_dict_key_serialize(self, user):
user.various_data = {1: "bar"}
field = fields.Dict(keys=fields.Str)
assert field.serialize("various_data", user) == {"1": "bar"}
def test_structured_dict_key_value_serialize(self, user):
user.various_data = {1: decimal.Decimal(1)}
field = fields.Dict(keys=fields.Str, values=fields.Decimal)
assert field.serialize("various_data", user) == {"1": 1}
def test_url_field_serialize_none(self, user):
user.homepage = None
field = fields.Url()
assert field.serialize("homepage", user) is None
def test_method_field_with_method_missing(self):
class BadSerializer(Schema):
bad_field = fields.Method("invalid")
with pytest.raises(AttributeError):
BadSerializer()
def test_method_field_passed_serialize_only_is_dump_only(self, user):
field = fields.Method(serialize="method")
assert field.dump_only is True
assert field.load_only is False
def test_method_field_passed_deserialize_only_is_load_only(self):
field = fields.Method(deserialize="somemethod")
assert field.load_only is True
assert field.dump_only is False
def test_method_field_with_uncallable_attribute(self):
class BadSerializer(Schema):
foo = "not callable"
bad_field = fields.Method("foo")
with pytest.raises(TypeError):
BadSerializer()
# https://github.com/marshmallow-code/marshmallow/issues/395
def test_method_field_does_not_swallow_attribute_error(self):
class MySchema(Schema):
mfield = fields.Method("raise_error")
def raise_error(self, obj):
raise AttributeError
with pytest.raises(AttributeError):
MySchema().dump({})
def test_method_with_no_serialize_is_missing(self):
m = fields.Method()
m.parent = Schema()
assert m.serialize("", "", None) is missing_
def test_serialize_with_data_key_param(self):
class DumpToSchema(Schema):
name = fields.String(data_key="NamE")
years = fields.Integer(data_key="YearS")
data = {"name": "Richard", "years": 11}
result = DumpToSchema().dump(data)
assert result == {"NamE": "Richard", "YearS": 11}
def test_serialize_with_data_key_as_empty_string(self):
class MySchema(Schema):
name = fields.Raw(data_key="")
schema = MySchema()
assert schema.dump({"name": "Grace"}) == {"": "Grace"}
def test_serialize_with_attribute_and_data_key_uses_data_key(self):
class ConfusedDumpToAndAttributeSerializer(Schema):
name = fields.String(data_key="FullName")
username = fields.String(attribute="uname", data_key="UserName")
years = fields.Integer(attribute="le_wild_age", data_key="Years")
data = {"name": "Mick", "uname": "mick_the_awesome", "le_wild_age": 999}
result = ConfusedDumpToAndAttributeSerializer().dump(data)
assert result == {
"FullName": "Mick",
"UserName": "mick_the_awesome",
"Years": 999,
}
@pytest.mark.parametrize("fmt", ["rfc", "rfc822"])
@pytest.mark.parametrize(
("value", "expected"),
[
(dt.datetime(2013, 11, 10, 1, 23, 45), "Sun, 10 Nov 2013 01:23:45 -0000"),
(
dt.datetime(2013, 11, 10, 1, 23, 45, tzinfo=dt.timezone.utc),
"Sun, 10 Nov 2013 01:23:45 +0000",
),
(
dt.datetime(2013, 11, 10, 1, 23, 45, tzinfo=central),
"Sun, 10 Nov 2013 01:23:45 -0600",
),
],
)
def test_datetime_field_rfc822(self, fmt, value, expected):
field = fields.DateTime(format=fmt)
assert field.serialize("d", {"d": value}) == expected
@pytest.mark.parametrize(
("fmt", "value", "expected"),
[
("timestamp", dt.datetime(1970, 1, 1), 0),
("timestamp", dt.datetime(2013, 11, 10, 0, 23, 45), 1384043025),
(
"timestamp",
dt.datetime(2013, 11, 10, 0, 23, 45, tzinfo=dt.timezone.utc),
1384043025,
),
(
"timestamp",
dt.datetime(2013, 11, 10, 0, 23, 45, tzinfo=central),
1384064625,
),
("timestamp_ms", dt.datetime(2013, 11, 10, 0, 23, 45), 1384043025000),
(
"timestamp_ms",
dt.datetime(2013, 11, 10, 0, 23, 45, tzinfo=dt.timezone.utc),
1384043025000,
),
(
"timestamp_ms",
dt.datetime(2013, 11, 10, 0, 23, 45, tzinfo=central),
1384064625000,
),
],
)
def test_datetime_field_timestamp(self, fmt, value, expected):
field = fields.DateTime(format=fmt)
assert field.serialize("d", {"d": value}) == expected
@pytest.mark.parametrize("fmt", ["iso", "iso8601", None])
@pytest.mark.parametrize(
("value", "expected"),
[
(dt.datetime(2013, 11, 10, 1, 23, 45), "2013-11-10T01:23:45"),
(
dt.datetime(2013, 11, 10, 1, 23, 45, 123456, tzinfo=dt.timezone.utc),
"2013-11-10T01:23:45.123456+00:00",
),
(
dt.datetime(2013, 11, 10, 1, 23, 45, tzinfo=dt.timezone.utc),
"2013-11-10T01:23:45+00:00",
),
(
dt.datetime(2013, 11, 10, 1, 23, 45, tzinfo=central),
"2013-11-10T01:23:45-06:00",
),
],
)
def test_datetime_field_iso8601(self, fmt, value, expected):
if fmt is None:
# Test default is ISO
field = fields.DateTime()
else:
field = fields.DateTime(format=fmt)
assert field.serialize("d", {"d": value}) == expected
def test_datetime_field_format(self, user):
datetimeformat = "%Y-%m-%d"
field = fields.DateTime(format=datetimeformat)
assert field.serialize("created", user) == user.created.strftime(datetimeformat)
def test_string_field(self):
field = fields.String()
user = User(name=b"foo")
assert field.serialize("name", user) == "foo"
field = fields.String(allow_none=True)
user.name = None
assert field.serialize("name", user) is None
def test_string_field_default_to_empty_string(self, user):
field = fields.String(dump_default="")
assert field.serialize("notfound", {}) == ""
def test_time_field(self, user):
field = fields.Time()
expected = user.time_registered.isoformat()[:15]
assert field.serialize("time_registered", user) == expected
user.time_registered = None
assert field.serialize("time_registered", user) is None
@pytest.mark.parametrize("fmt", ["iso", "iso8601", None])
@pytest.mark.parametrize(
("value", "expected"),
[
(dt.time(1, 23, 45), "01:23:45"),
(dt.time(1, 23, 45, 123000), "01:23:45.123000"),
(dt.time(1, 23, 45, 123456), "01:23:45.123456"),
],
)
def test_time_field_iso8601(self, fmt, value, expected):
if fmt is None:
# Test default is ISO
field = fields.Time()
else:
field = fields.Time(format=fmt)
assert field.serialize("d", {"d": value}) == expected
def test_time_field_format(self, user):
fmt = "%H:%M:%S"
field = fields.Time(format=fmt)
assert field.serialize("birthtime", user) == user.birthtime.strftime(fmt)
def test_date_field(self, user):
field = fields.Date()
assert field.serialize("birthdate", user) == user.birthdate.isoformat()
user.birthdate = None
assert field.serialize("birthdate", user) is None
def test_timedelta_field(self, user):
user.d1 = dt.timedelta(days=1, seconds=1, microseconds=1)
user.d2 = dt.timedelta(days=0, seconds=86401, microseconds=1)
user.d3 = dt.timedelta(days=0, seconds=0, microseconds=86401000001)
user.d4 = dt.timedelta(days=0, seconds=0, microseconds=0)
user.d5 = dt.timedelta(days=-1, seconds=0, microseconds=0)
user.d6 = dt.timedelta(
days=1,
seconds=1,
microseconds=1,
milliseconds=1,
minutes=1,
hours=1,
weeks=1,
)
field = fields.TimeDelta(fields.TimeDelta.DAYS)
assert field.serialize("d1", user) == 1.0000115740856481
field = fields.TimeDelta(fields.TimeDelta.SECONDS)
assert field.serialize("d1", user) == 86401.000001
field = fields.TimeDelta(fields.TimeDelta.MICROSECONDS)
assert field.serialize("d1", user) == 86401000001
field = fields.TimeDelta(fields.TimeDelta.HOURS)
assert field.serialize("d1", user) == 24.000277778055555
field = fields.TimeDelta(fields.TimeDelta.DAYS)
assert field.serialize("d2", user) == 1.0000115740856481
field = fields.TimeDelta(fields.TimeDelta.SECONDS)
assert field.serialize("d2", user) == 86401.000001
field = fields.TimeDelta(fields.TimeDelta.MICROSECONDS)
assert field.serialize("d2", user) == 86401000001
field = fields.TimeDelta(fields.TimeDelta.DAYS)
assert field.serialize("d3", user) == 1.0000115740856481
field = fields.TimeDelta(fields.TimeDelta.SECONDS)
assert field.serialize("d3", user) == 86401.000001
field = fields.TimeDelta(fields.TimeDelta.MICROSECONDS)
assert field.serialize("d3", user) == 86401000001
field = fields.TimeDelta(fields.TimeDelta.DAYS)
assert field.serialize("d4", user) == 0
field = fields.TimeDelta(fields.TimeDelta.SECONDS)
assert field.serialize("d4", user) == 0
field = fields.TimeDelta(fields.TimeDelta.MICROSECONDS)
assert field.serialize("d4", user) == 0
field = fields.TimeDelta(fields.TimeDelta.DAYS)
assert field.serialize("d5", user) == -1
field = fields.TimeDelta(fields.TimeDelta.SECONDS)
assert field.serialize("d5", user) == -86400
field = fields.TimeDelta(fields.TimeDelta.MICROSECONDS)
assert field.serialize("d5", user) == -86400000000
field = fields.TimeDelta(fields.TimeDelta.WEEKS)
assert field.serialize("d6", user) == 1.1489103852529763
field = fields.TimeDelta(fields.TimeDelta.DAYS)
assert field.serialize("d6", user) == 8.042372696770833
field = fields.TimeDelta(fields.TimeDelta.HOURS)
assert field.serialize("d6", user) == 193.0169447225
field = fields.TimeDelta(fields.TimeDelta.MINUTES)
assert field.serialize("d6", user) == 11581.01668335
field = fields.TimeDelta(fields.TimeDelta.SECONDS)
assert field.serialize("d6", user) == 694861.001001
field = fields.TimeDelta(fields.TimeDelta.MILLISECONDS)
assert field.serialize("d6", user) == 694861001.001
field = fields.TimeDelta(fields.TimeDelta.MICROSECONDS)
assert field.serialize("d6", user) == 694861001001
user.d7 = None
assert field.serialize("d7", user) is None
user.d8 = dt.timedelta(milliseconds=345)
field = fields.TimeDelta(fields.TimeDelta.MILLISECONDS)
assert field.serialize("d8", user) == 345
user.d9 = dt.timedelta(milliseconds=1999)
field = fields.TimeDelta(fields.TimeDelta.SECONDS)
assert field.serialize("d9", user) == 1.999
user.d10 = dt.timedelta(
weeks=1,
days=6,
hours=2,
minutes=5,
seconds=51,
milliseconds=10,
microseconds=742,
)
field = fields.TimeDelta(fields.TimeDelta.MICROSECONDS)
unit_value = dt.timedelta(microseconds=1).total_seconds()
assert math.isclose(
field.serialize("d10", user), user.d10.total_seconds() / unit_value
)
field = fields.TimeDelta(fields.TimeDelta.MILLISECONDS)
unit_value = dt.timedelta(milliseconds=1).total_seconds()
assert math.isclose(
field.serialize("d10", user), user.d10.total_seconds() / unit_value
)
field = fields.TimeDelta(fields.TimeDelta.SECONDS)
assert math.isclose(field.serialize("d10", user), user.d10.total_seconds())
field = fields.TimeDelta(fields.TimeDelta.MINUTES)
unit_value = dt.timedelta(minutes=1).total_seconds()
assert math.isclose(
field.serialize("d10", user), user.d10.total_seconds() / unit_value
)
field = fields.TimeDelta(fields.TimeDelta.HOURS)
unit_value = dt.timedelta(hours=1).total_seconds()
assert math.isclose(
field.serialize("d10", user), user.d10.total_seconds() / unit_value
)
field = fields.TimeDelta(fields.TimeDelta.DAYS)
unit_value = dt.timedelta(days=1).total_seconds()
assert math.isclose(
field.serialize("d10", user), user.d10.total_seconds() / unit_value
)
field = fields.TimeDelta(fields.TimeDelta.WEEKS)
unit_value = dt.timedelta(weeks=1).total_seconds()
assert math.isclose(
field.serialize("d10", user), user.d10.total_seconds() / unit_value
)
def test_datetime_list_field(self):
obj = DateTimeList([dt.datetime.now(dt.timezone.utc), dt.datetime.now()])
field = fields.List(fields.DateTime)
result = field.serialize("dtimes", obj)
assert all(type(each) is str for each in result)
def test_list_field_serialize_none_returns_none(self):
obj = DateTimeList(None)
field = fields.List(fields.DateTime)
assert field.serialize("dtimes", obj) is None
def test_list_field_work_with_generator_single_value(self):
def custom_generator():
yield dt.datetime.now(dt.timezone.utc)
obj = DateTimeList(custom_generator())
field = fields.List(fields.DateTime)
result = field.serialize("dtimes", obj)
assert len(result) == 1
def test_list_field_work_with_generators_multiple_values(self):
def custom_generator():
yield from [dt.datetime.now(dt.timezone.utc), dt.datetime.now()]
obj = DateTimeList(custom_generator())
field = fields.List(fields.DateTime)
result = field.serialize("dtimes", obj)
assert len(result) == 2
def test_list_field_work_with_generators_empty_generator_returns_none_for_every_non_returning_yield_statement(
self,
):
def custom_generator():
yield
yield
obj = DateTimeList(custom_generator())
field = fields.List(fields.DateTime, allow_none=True)
result = field.serialize("dtimes", obj)
assert len(result) == 2
assert result[0] is None
assert result[1] is None
def test_list_field_work_with_set(self):
custom_set = {1, 2, 3}
obj = IntegerList(custom_set)
field = fields.List(fields.Int)
result = field.serialize("ints", obj)
assert len(result) == 3
assert 1 in result
assert 2 in result
assert 3 in result
def test_list_field_work_with_custom_class_with_iterator_protocol(self):
class IteratorSupportingClass:
def __init__(self, iterable):
self.iterable = iterable
def __iter__(self):
return iter(self.iterable)
ints = IteratorSupportingClass([1, 2, 3])
obj = IntegerList(ints)
field = fields.List(fields.Int)
result = field.serialize("ints", obj)
assert len(result) == 3
assert result[0] == 1
assert result[1] == 2
assert result[2] == 3
def test_bad_list_field(self):
class ASchema(Schema):
id = fields.Int()
with pytest.raises(ValueError):
fields.List("string") # type: ignore[arg-type]
expected_msg = (
"The list elements must be a subclass or instance of "
"marshmallow.fields.Field"
)
with pytest.raises(ValueError, match=expected_msg):
fields.List(ASchema) # type: ignore[arg-type]
def test_datetime_integer_tuple_field(self):
obj = DateTimeIntegerTuple((dt.datetime.now(dt.timezone.utc), 42))
field = fields.Tuple([fields.DateTime, fields.Integer])
result = field.serialize("dtime_int", obj)
assert type(result[0]) is str
assert type(result[1]) is int
def test_tuple_field_serialize_none_returns_none(self):
obj = DateTimeIntegerTuple(None)
field = fields.Tuple([fields.DateTime, fields.Integer])
assert field.serialize("dtime_int", obj) is None
def test_bad_tuple_field(self):
class ASchema(Schema):
id = fields.Int()
with pytest.raises(ValueError):
fields.Tuple(["string"]) # type: ignore[arg-type]
with pytest.raises(ValueError):
fields.Tuple(fields.String) # type: ignore[arg-type]
expected_msg = (
'Elements of "tuple_fields" must be subclasses or '
"instances of marshmallow.fields.Field."
)
with pytest.raises(ValueError, match=expected_msg):
fields.Tuple([ASchema]) # type: ignore[arg-type]
def test_serialize_does_not_apply_validators(self, user):
field = fields.Raw(validate=lambda x: False)
# No validation error raised
assert field.serialize("age", user) == user.age
def test_constant_field_serialization(self, user):
field = fields.Constant("something")
assert field.serialize("whatever", user) == "something"
def test_constant_is_always_included_in_serialized_data(self):
class MySchema(Schema):
foo = fields.Constant(42)
sch = MySchema()
assert sch.dump({"bar": 24})["foo"] == 42
assert sch.dump({"foo": 24})["foo"] == 42
def test_constant_field_serialize_when_omitted(self):
class MiniUserSchema(Schema):
name = fields.Constant("bill")
s = MiniUserSchema()
assert s.dump({})["name"] == "bill"
@pytest.mark.parametrize("FieldClass", ALL_FIELDS)
def test_all_fields_serialize_none_to_none(self, FieldClass):
field = FieldClass(allow_none=True)
res = field.serialize("foo", {"foo": None})
assert res is None
| TestFieldSerialization |
python | django__django | tests/staticfiles_tests/test_management.py | {
"start": 918,
"end": 1139
} | class ____:
def test_no_files_created(self):
"""
Make sure no files were create in the destination directory.
"""
self.assertEqual(os.listdir(settings.STATIC_ROOT), [])
| TestNoFilesCreated |
python | kubernetes-client__python | kubernetes/client/models/v1_device_claim_configuration.py | {
"start": 383,
"end": 4941
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'opaque': 'V1OpaqueDeviceConfiguration',
'requests': 'list[str]'
}
attribute_map = {
'opaque': 'opaque',
'requests': 'requests'
}
def __init__(self, opaque=None, requests=None, local_vars_configuration=None): # noqa: E501
"""V1DeviceClaimConfiguration - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._opaque = None
self._requests = None
self.discriminator = None
if opaque is not None:
self.opaque = opaque
if requests is not None:
self.requests = requests
@property
def opaque(self):
"""Gets the opaque of this V1DeviceClaimConfiguration. # noqa: E501
:return: The opaque of this V1DeviceClaimConfiguration. # noqa: E501
:rtype: V1OpaqueDeviceConfiguration
"""
return self._opaque
@opaque.setter
def opaque(self, opaque):
"""Sets the opaque of this V1DeviceClaimConfiguration.
:param opaque: The opaque of this V1DeviceClaimConfiguration. # noqa: E501
:type: V1OpaqueDeviceConfiguration
"""
self._opaque = opaque
@property
def requests(self):
"""Gets the requests of this V1DeviceClaimConfiguration. # noqa: E501
Requests lists the names of requests where the configuration applies. If empty, it applies to all requests. References to subrequests must include the name of the main request and may include the subrequest using the format <main request>[/<subrequest>]. If just the main request is given, the configuration applies to all subrequests. # noqa: E501
:return: The requests of this V1DeviceClaimConfiguration. # noqa: E501
:rtype: list[str]
"""
return self._requests
@requests.setter
def requests(self, requests):
"""Sets the requests of this V1DeviceClaimConfiguration.
Requests lists the names of requests where the configuration applies. If empty, it applies to all requests. References to subrequests must include the name of the main request and may include the subrequest using the format <main request>[/<subrequest>]. If just the main request is given, the configuration applies to all subrequests. # noqa: E501
:param requests: The requests of this V1DeviceClaimConfiguration. # noqa: E501
:type: list[str]
"""
self._requests = requests
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DeviceClaimConfiguration):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1DeviceClaimConfiguration):
return True
return self.to_dict() != other.to_dict()
| V1DeviceClaimConfiguration |
python | Farama-Foundation__Gymnasium | gymnasium/envs/mujoco/hopper_v5.py | {
"start": 305,
"end": 19804
} | class ____(MujocoEnv, utils.EzPickle):
r"""
## Description
This environment is based on the work of Erez, Tassa, and Todorov in ["Infinite Horizon Model Predictive Control for Nonlinear Periodic Tasks"](http://www.roboticsproceedings.org/rss07/p10.pdf).
The environment aims to increase the number of independent state and control variables compared to classical control environments.
The hopper is a two-dimensional one-legged figure consisting of four main body parts - the torso at the top, the thigh in the middle, the leg at the bottom, and a single foot on which the entire body rests.
The goal is to make hops that move in the forward (right) direction by applying torque to the three hinges that connect the four body parts.
## Action Space
```{figure} action_space_figures/hopper.png
:name: hopper
```
The action space is a `Box(-1, 1, (3,), float32)`. An action represents the torques applied at the hinge joints.
| Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Type (Unit) |
|-----|------------------------------------|-------------|-------------|----------------------------------|-------|--------------|
| 0 | Torque applied on the thigh rotor | -1 | 1 | thigh_joint | hinge | torque (N m) |
| 1 | Torque applied on the leg rotor | -1 | 1 | leg_joint | hinge | torque (N m) |
| 2 | Torque applied on the foot rotor | -1 | 1 | foot_joint | hinge | torque (N m) |
## Observation Space
The observation space consists of the following parts (in order):
- *qpos (5 elements by default):* Position values of the robot's body parts.
- *qvel (6 elements):* The velocities of these individual body parts (their derivatives).
By default, the observation does not include the robot's x-coordinate (`rootx`).
This can be included by passing `exclude_current_positions_from_observation=False` during construction.
In this case, the observation space will be a `Box(-Inf, Inf, (12,), float64)`, where the first observation element is the x-coordinate of the robot.
Regardless of whether `exclude_current_positions_from_observation` is set to `True` or `False`, the x- and y-coordinates are returned in `info` with the keys `"x_position"` and `"y_position"`, respectively.
By default, however, the observation space is a `Box(-Inf, Inf, (11,), float64)` where the elements are as follows:
| Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Type (Unit) |
| --- | -------------------------------------------------- | ---- | --- | -------------------------------- | ----- | ------------------------ |
| 0 | z-coordinate of the torso (height of hopper) | -Inf | Inf | rootz | slide | position (m) |
| 1 | angle of the torso | -Inf | Inf | rooty | hinge | angle (rad) |
| 2 | angle of the thigh joint | -Inf | Inf | thigh_joint | hinge | angle (rad) |
| 3 | angle of the leg joint | -Inf | Inf | leg_joint | hinge | angle (rad) |
| 4 | angle of the foot joint | -Inf | Inf | foot_joint | hinge | angle (rad) |
| 5 | velocity of the x-coordinate of the torso | -Inf | Inf | rootx | slide | velocity (m/s) |
| 6 | velocity of the z-coordinate (height) of the torso | -Inf | Inf | rootz | slide | velocity (m/s) |
| 7 | angular velocity of the angle of the torso | -Inf | Inf | rooty | hinge | angular velocity (rad/s) |
| 8 | angular velocity of the thigh hinge | -Inf | Inf | thigh_joint | hinge | angular velocity (rad/s) |
| 9 | angular velocity of the leg hinge | -Inf | Inf | leg_joint | hinge | angular velocity (rad/s) |
| 10 | angular velocity of the foot hinge | -Inf | Inf | foot_joint | hinge | angular velocity (rad/s) |
| excluded | x-coordinate of the torso | -Inf | Inf | rootx | slide | position (m) |
## Rewards
The total reward is: ***reward*** *=* *healthy_reward + forward_reward - ctrl_cost*.
- *healthy_reward*:
Every timestep that the Hopper is healthy (see definition in section "Episode End"),
it gets a reward of fixed value `healthy_reward` (default is $1$).
- *forward_reward*:
A reward for moving forward,
this reward would be positive if the Hopper moves forward (in the positive $x$ direction / in the right direction).
$w_{forward} \times \frac{dx}{dt}$, where
$dx$ is the displacement of the "torso" ($x_{after-action} - x_{before-action}$),
$dt$ is the time between actions, which depends on the `frame_skip` parameter (default is $4$),
and `frametime` which is $0.002$ - so the default is $dt = 4 \times 0.002 = 0.008$,
$w_{forward}$ is the `forward_reward_weight` (default is $1$).
- *ctrl_cost*:
A negative reward to penalize the Hopper for taking actions that are too large.
$w_{control} \times \|action\|_2^2$,
where $w_{control}$ is `ctrl_cost_weight` (default is $10^{-3}$).
`info` contains the individual reward terms.
## Starting State
The initial position state is $[0, 1.25, 0, 0, 0, 0] + \mathcal{U}_{[-reset\_noise\_scale \times I_{6}, reset\_noise\_scale \times I_{6}]}$.
The initial velocity state is $\mathcal{U}_{[-reset\_noise\_scale \times I_{6}, reset\_noise\_scale \times I_{6}]}$.
where $\mathcal{U}$ is the multivariate uniform continuous distribution.
Note that the z-coordinate is non-zero so that the hopper can stand up immediately.
## Episode End
### Termination
If `terminate_when_unhealthy is True` (the default), the environment terminates when the Hopper is unhealthy.
The Hopper is unhealthy if any of the following happens:
1. An element of `observation[1:]` (if `exclude_current_positions_from_observation=True`, otherwise `observation[2:]`) is no longer contained in the closed interval specified by the `healthy_state_range` argument (default is $[-100, 100]$).
2. The height of the hopper (`observation[0]` if `exclude_current_positions_from_observation=True`, otherwise `observation[1]`) is no longer contained in the closed interval specified by the `healthy_z_range` argument (default is $[0.7, +\infty]$) (usually meaning that it has fallen).
3. The angle of the torso (`observation[1]` if `exclude_current_positions_from_observation=True`, otherwise `observation[2]`) is no longer contained in the closed interval specified by the `healthy_angle_range` argument (default is $[-0.2, 0.2]$).
### Truncation
The default duration of an episode is 1000 timesteps.
## Arguments
Hopper provides a range of parameters to modify the observation space, reward function, initial state, and termination condition.
These parameters can be applied during `gymnasium.make` in the following way:
```python
import gymnasium as gym
env = gym.make('Hopper-v5', ctrl_cost_weight=1e-3, ....)
```
| Parameter | Type | Default | Description |
| -------------------------------------------- | --------- | --------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `xml_file` | **str** | `"hopper.xml"` | Path to a MuJoCo model |
| `forward_reward_weight` | **float** | `1` | Weight for _forward_reward_ term (see `Rewards` section) |
| `ctrl_cost_weight` | **float** | `1e-3` | Weight for _ctrl_cost_ reward (see `Rewards` section) |
| `healthy_reward` | **float** | `1` | Weight for _healthy_reward_ reward (see `Rewards` section) |
| `terminate_when_unhealthy` | **bool** | `True` | If `True`, issue a `terminated` signal is unhealthy (see `Episode End` section) |
| `healthy_state_range` | **tuple** | `(-100, 100)` | The elements of `observation[1:]` (if `exclude_current_positions_from_observation=True`, else `observation[2:]`) must be in this range for the hopper to be considered healthy (see `Episode End` section) |
| `healthy_z_range` | **tuple** | `(0.7, float("inf"))` | The z-coordinate must be in this range for the hopper to be considered healthy (see `Episode End` section) |
| `healthy_angle_range` | **tuple** | `(-0.2, 0.2)` | The angle given by `observation[1]` (if `exclude_current_positions_from_observation=True`, else `observation[2]`) must be in this range for the hopper to be considered healthy (see `Episode End` section) |
| `reset_noise_scale` | **float** | `5e-3` | Scale of random perturbations of initial position and velocity (see `Starting State` section) |
| `exclude_current_positions_from_observation` | **bool** | `True` | Whether or not to omit the x-coordinate from observations. Excluding the position can serve as an inductive bias to induce position-agnostic behavior in policies(see `Observation Space` section) |
## Version History
* v5:
- Minimum `mujoco` version is now 2.3.3.
- Added support for fully custom/third party `mujoco` models using the `xml_file` argument (previously only a few changes could be made to the existing models).
- Added `default_camera_config` argument, a dictionary for setting the `mj_camera` properties, mainly useful for custom environments.
- Added `env.observation_structure`, a dictionary for specifying the observation space compose (e.g. `qpos`, `qvel`), useful for building tooling and wrappers for the MuJoCo environments.
- Return a non-empty `info` with `reset()`, previously an empty dictionary was returned, the new keys are the same state information as `step()`.
- Added `frame_skip` argument, used to configure the `dt` (duration of `step()`), default varies by environment check environment documentation pages.
- Fixed bug: `healthy_reward` was given on every step (even if the Hopper was unhealthy), now it is only given when the Hopper is healthy. The `info["reward_survive"]` is updated with this change (related [GitHub issue](https://github.com/Farama-Foundation/Gymnasium/issues/526)).
- Restored the `xml_file` argument (was removed in `v4`).
- Added individual reward terms in `info` (`info["reward_forward"]`, `info["reward_ctrl"]`, `info["reward_survive"]`).
- Added `info["z_distance_from_origin"]` which is equal to the vertical distance of the "torso" body from its initial position.
* v4: All MuJoCo environments now use the MuJoCo bindings in mujoco >= 2.1.3.
* v3: Support for `gymnasium.make` kwargs such as `xml_file`, `ctrl_cost_weight`, `reset_noise_scale`, etc. rgb rendering comes from tracking camera (so agent does not run away from screen). Moved to the [gymnasium-robotics repo](https://github.com/Farama-Foundation/gymnasium-robotics).
* v2: All continuous control environments now use mujoco-py >= 1.50. Moved to the [gymnasium-robotics repo](https://github.com/Farama-Foundation/gymnasium-robotics).
* v1: max_time_steps raised to 1000 for robot based tasks. Added reward_threshold to environments.
* v0: Initial versions release.
"""
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
"rgbd_tuple",
],
}
def __init__(
self,
xml_file: str = "hopper.xml",
frame_skip: int = 4,
default_camera_config: dict[str, float | int] = DEFAULT_CAMERA_CONFIG,
forward_reward_weight: float = 1.0,
ctrl_cost_weight: float = 1e-3,
healthy_reward: float = 1.0,
terminate_when_unhealthy: bool = True,
healthy_state_range: tuple[float, float] = (-100.0, 100.0),
healthy_z_range: tuple[float, float] = (0.7, float("inf")),
healthy_angle_range: tuple[float, float] = (-0.2, 0.2),
reset_noise_scale: float = 5e-3,
exclude_current_positions_from_observation: bool = True,
**kwargs,
):
utils.EzPickle.__init__(
self,
xml_file,
frame_skip,
default_camera_config,
forward_reward_weight,
ctrl_cost_weight,
healthy_reward,
terminate_when_unhealthy,
healthy_state_range,
healthy_z_range,
healthy_angle_range,
reset_noise_scale,
exclude_current_positions_from_observation,
**kwargs,
)
self._forward_reward_weight = forward_reward_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._healthy_reward = healthy_reward
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_state_range = healthy_state_range
self._healthy_z_range = healthy_z_range
self._healthy_angle_range = healthy_angle_range
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation
)
MujocoEnv.__init__(
self,
xml_file,
frame_skip,
observation_space=None,
default_camera_config=default_camera_config,
**kwargs,
)
self.metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
"rgbd_tuple",
],
"render_fps": int(np.round(1.0 / self.dt)),
}
obs_size = (
self.data.qpos.size
+ self.data.qvel.size
- exclude_current_positions_from_observation
)
self.observation_space = Box(
low=-np.inf, high=np.inf, shape=(obs_size,), dtype=np.float64
)
self.observation_structure = {
"skipped_qpos": 1 * exclude_current_positions_from_observation,
"qpos": self.data.qpos.size
- 1 * exclude_current_positions_from_observation,
"qvel": self.data.qvel.size,
}
@property
def healthy_reward(self):
return self.is_healthy * self._healthy_reward
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
@property
def is_healthy(self):
z, angle = self.data.qpos[1:3]
state = self.state_vector()[2:]
min_state, max_state = self._healthy_state_range
min_z, max_z = self._healthy_z_range
min_angle, max_angle = self._healthy_angle_range
healthy_state = np.all(np.logical_and(min_state < state, state < max_state))
healthy_z = min_z < z < max_z
healthy_angle = min_angle < angle < max_angle
is_healthy = all((healthy_state, healthy_z, healthy_angle))
return is_healthy
def _get_obs(self):
position = self.data.qpos.flatten()
velocity = np.clip(self.data.qvel.flatten(), -10, 10)
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def step(self, action):
x_position_before = self.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.data.qpos[0]
x_velocity = (x_position_after - x_position_before) / self.dt
observation = self._get_obs()
reward, reward_info = self._get_rew(x_velocity, action)
terminated = (not self.is_healthy) and self._terminate_when_unhealthy
info = {
"x_position": x_position_after,
"z_distance_from_origin": self.data.qpos[1] - self.init_qpos[1],
"x_velocity": x_velocity,
**reward_info,
}
if self.render_mode == "human":
self.render()
# truncation=False as the time limit is handled by the `TimeLimit` wrapper added during `make`
return observation, reward, terminated, False, info
def _get_rew(self, x_velocity: float, action):
forward_reward = self._forward_reward_weight * x_velocity
healthy_reward = self.healthy_reward
rewards = forward_reward + healthy_reward
ctrl_cost = self.control_cost(action)
costs = ctrl_cost
reward = rewards - costs
reward_info = {
"reward_forward": forward_reward,
"reward_ctrl": -ctrl_cost,
"reward_survive": healthy_reward,
}
return reward, reward_info
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq
)
qvel = self.init_qvel + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nv
)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def _get_reset_info(self):
return {
"x_position": self.data.qpos[0],
"z_distance_from_origin": self.data.qpos[1] - self.init_qpos[1],
}
| HopperEnv |
python | google__jax | jax/_src/pallas/mosaic_gpu/core.py | {
"start": 5192,
"end": 6982
} | class ____(enum.Enum):
#: Global memory.
GMEM = "gmem"
#: Shared memory.
SMEM = "smem"
#: Tensor memory. New addition to Blackwell. Not available on Hopper.
TMEM = "tmem"
#: Registers.
REGS = "regs"
def __str__(self) -> str:
return self.value
def __call__(
self,
shape: Sequence[int],
dtype: jnp.dtype,
*,
transforms: Sequence[MemoryRefTransform] = (),
packed: bool | None = None,
collective: bool | None = None,
layout: TMEMLayout | None = None,
) -> pallas_core.MemoryRef:
shape = tuple(shape)
# TODO(sharadmv): Add HiType constructor support.
if self == MemorySpace.TMEM:
if transforms:
raise ValueError("transforms are not supported for TMEM")
if collective is None:
collective = False
if layout is None:
if packed is None:
if dtypes.itemsize_bits(dtype) != 32:
raise ValueError(
"dtypes narrower than 32-bit require either the packed argument"
" or an explicit TMEM layout"
)
packed = False
mgpu_layout = infer_tmem_layout(
shape, dtype, packed=packed, collective=collective
)
else:
if packed is not None:
raise ValueError("packed cannot be specified if layout is specified.")
mgpu_layout = layout.to_mgpu()
else:
if packed is not None or collective is not None or layout is not None:
raise ValueError("packed, collective and layout arguments are only supported for TMEM.")
mgpu_layout = None
return GPUMemoryRef(jax_core.ShapedArray(shape, dtype), memory_space=self,
transforms=transforms, layout=mgpu_layout,
collective=collective)
| MemorySpace |
python | doocs__leetcode | solution/2800-2899/2830.Maximize the Profit as the Salesman/Solution.py | {
"start": 0,
"end": 348
} | class ____:
def maximizeTheProfit(self, n: int, offers: List[List[int]]) -> int:
offers.sort(key=lambda x: x[1])
f = [0] * (len(offers) + 1)
g = [x[1] for x in offers]
for i, (s, _, v) in enumerate(offers, 1):
j = bisect_left(g, s)
f[i] = max(f[i - 1], f[j] + v)
return f[-1]
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/transfers/presto_to_gcs.py | {
"start": 4957,
"end": 7193
} | class ____(BaseSQLToGCSOperator):
"""
Copy data from PrestoDB to Google Cloud Storage in JSON, CSV or Parquet format.
:param presto_conn_id: Reference to a specific Presto hook.
"""
ui_color = "#a0e08c"
type_map = {
"BOOLEAN": "BOOL",
"TINYINT": "INT64",
"SMALLINT": "INT64",
"INTEGER": "INT64",
"BIGINT": "INT64",
"REAL": "FLOAT64",
"DOUBLE": "FLOAT64",
"DECIMAL": "NUMERIC",
"VARCHAR": "STRING",
"CHAR": "STRING",
"VARBINARY": "BYTES",
"JSON": "STRING",
"DATE": "DATE",
"TIME": "TIME",
# BigQuery don't time with timezone native.
"TIME WITH TIME ZONE": "STRING",
"TIMESTAMP": "TIMESTAMP",
# BigQuery supports a narrow range of time zones during import.
# You should use TIMESTAMP function, if you want have TIMESTAMP type
"TIMESTAMP WITH TIME ZONE": "STRING",
"IPADDRESS": "STRING",
"UUID": "STRING",
}
def __init__(self, *, presto_conn_id: str = "presto_default", **kwargs):
super().__init__(**kwargs)
self.presto_conn_id = presto_conn_id
def query(self):
"""Query presto and returns a cursor to the results."""
presto = PrestoHook(presto_conn_id=self.presto_conn_id)
conn = presto.get_conn()
cursor = conn.cursor()
self.log.info("Executing: %s", self.sql)
cursor.execute(self.sql)
return _PrestoToGCSPrestoCursorAdapter(cursor)
def field_to_bigquery(self, field) -> dict[str, str]:
"""Convert presto field type to BigQuery field type."""
clear_field_type = field[1].upper()
# remove type argument e.g. DECIMAL(2, 10) => DECIMAL
clear_field_type, _, _ = clear_field_type.partition("(")
new_field_type = self.type_map.get(clear_field_type, "STRING")
return {"name": field[0], "type": new_field_type}
def convert_type(self, value, schema_type, **kwargs):
"""
Do nothing. Presto uses JSON on the transport layer, so types are simple.
:param value: Presto column value
:param schema_type: BigQuery data type
"""
return value
| PrestoToGCSOperator |
python | mlflow__mlflow | tests/store/tracking/__init__.py | {
"start": 155,
"end": 2562
} | class ____:
def create_test_run(self):
raise Exception("this should be overridden")
def get_store(self):
raise Exception("this should be overridden")
def test_record_logged_model(self):
store = self.get_store()
run_id = self.create_test_run().info.run_id
m = Model(artifact_path="model/path", run_id=run_id, flavors={"tf": "flavor body"})
store.record_logged_model(run_id, m)
self._verify_logged(
store,
run_id=run_id,
params=[],
metrics=[],
tags=[RunTag(MLFLOW_LOGGED_MODELS, json.dumps([m.to_dict()]))],
)
m2 = Model(
artifact_path="some/other/path", run_id=run_id, flavors={"R": {"property": "value"}}
)
store.record_logged_model(run_id, m2)
self._verify_logged(
store,
run_id,
params=[],
metrics=[],
tags=[RunTag(MLFLOW_LOGGED_MODELS, json.dumps([m.to_dict(), m2.to_dict()]))],
)
m3 = Model(
artifact_path="some/other/path2", run_id=run_id, flavors={"R2": {"property": "value"}}
)
store.record_logged_model(run_id, m3)
self._verify_logged(
store,
run_id,
params=[],
metrics=[],
tags=[
RunTag(MLFLOW_LOGGED_MODELS, json.dumps([m.to_dict(), m2.to_dict(), m3.to_dict()]))
],
)
with pytest.raises(
TypeError,
match="Argument 'mlflow_model' should be mlflow.models.Model, got '<class 'dict'>'",
):
store.record_logged_model(run_id, m.to_dict())
@staticmethod
def _verify_logged(store, run_id, metrics, params, tags):
run = store.get_run(run_id)
all_metrics = sum((store.get_metric_history(run_id, key) for key in run.data.metrics), [])
assert len(all_metrics) == len(metrics)
logged_metrics = [(m.key, m.value, m.timestamp, m.step) for m in all_metrics]
assert set(logged_metrics) == {(m.key, m.value, m.timestamp, m.step) for m in metrics}
logged_tags = set(run.data.tags.items())
assert {(tag.key, tag.value) for tag in tags} <= logged_tags
assert len(run.data.params) == len(params)
assert set(run.data.params.items()) == {(param.key, param.value) for param in params}
| AbstractStoreTest |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 31703,
"end": 39707
} | class ____(BaseConfigHeuristic):
"""
Child class for CUDA device specific gemm/flex attention/conv/ configs.
"""
def __init__(self) -> None:
super().__init__()
self.sm_120_default_flex_config = {
(torch.float32, 64): FlexConfig(128, 32, 2, 4),
(torch.float32, 128): FlexConfig(128, 32, 2, 4),
(torch.float32, 256): FlexConfig(64, 16, 2, 4),
(torch.bfloat16, 64): FlexConfig(128, 64, 2, 4),
(torch.bfloat16, 128): FlexConfig(128, 64, 2, 8),
(torch.bfloat16, 256): FlexConfig(32, 64, 2, 4),
(torch.float16, 64): FlexConfig(128, 64, 2, 4),
(torch.float16, 128): FlexConfig(128, 64, 2, 8),
(torch.float16, 256): FlexConfig(32, 64, 2, 4),
}
self.sm_100_default_flex_config = {
(torch.float32, 64): FlexConfig(128, 32, 3, 4),
(torch.float32, 128): FlexConfig(32, 64, 3, 4),
(torch.float32, 192): FlexConfig(32, 64, 2, 4),
(torch.float32, 256): FlexConfig(32, 32, 3, 4),
(torch.bfloat16, 64): FlexConfig(128, 128, 3, 4),
(torch.bfloat16, 128): FlexConfig(128, 64, 3, 8),
(torch.bfloat16, 192): FlexConfig(128, 128, 1, 8),
(torch.bfloat16, 256): FlexConfig(64, 32, 3, 4),
(torch.float16, 64): FlexConfig(128, 128, 3, 4),
(torch.float16, 128): FlexConfig(128, 64, 3, 8),
(torch.float16, 192): FlexConfig(128, 128, 1, 8),
(torch.float16, 256): FlexConfig(64, 32, 3, 4),
}
self.h100_default_flex_config = {
(torch.float32, 64): FlexConfig(128, 32, 3, 4),
(torch.float32, 128): FlexConfig(32, 64, 3, 4),
(torch.float32, 256): FlexConfig(32, 32, 3, 4),
(torch.bfloat16, 64): FlexConfig(128, 128, 3, 4),
(torch.bfloat16, 128): FlexConfig(128, 64, 3, 8),
(torch.bfloat16, 256): FlexConfig(64, 32, 3, 4),
(torch.float16, 64): FlexConfig(128, 128, 3, 4),
(torch.float16, 128): FlexConfig(128, 64, 3, 8),
(torch.float16, 256): FlexConfig(64, 32, 3, 4),
}
self.a100_default_flex_config = {
(torch.float32, 64): FlexConfig(128, 32, 3, 4),
(torch.float32, 128): FlexConfig(128, 32, 3, 4),
(torch.float32, 256): FlexConfig(64, 16, 3, 4),
(torch.bfloat16, 64): FlexConfig(128, 64, 3, 4),
(torch.bfloat16, 128): FlexConfig(128, 64, 3, 8),
(torch.bfloat16, 256): FlexConfig(32, 64, 3, 4),
(torch.float16, 64): FlexConfig(128, 64, 3, 4),
(torch.float16, 128): FlexConfig(128, 64, 3, 8),
(torch.float16, 256): FlexConfig(32, 64, 3, 4),
}
# Overwriting the configs omitting BLOCK_N of size 128 that cause ULFs
self.flex_attn_bwd_autotune_configs: list[FlexBwDConfig] = [
# See Note: flex bwd configs
FlexBwDConfig(BLOCK_M, BLOCK_N, BLOCK_N, BLOCK_M, s, 4)
for BLOCK_M in [32, 64]
for BLOCK_N in [32, 64]
for s in [1, 3, 4, 5] # num_stages
if BLOCK_N % BLOCK_M == 0
]
def get_flex_attn_fwd_configs(self, head_dim: int, dtype: Any) -> list[FlexConfig]:
capability = torch.cuda.get_device_capability()
flex_attn_fwd_configs: list[FlexConfig] = []
if config.max_autotune:
if config.max_autotune_flex_search_space == "EXHAUSTIVE":
return self.exhaustive_flex_attn_fwd_configs
flex_attn_fwd_configs += self.flex_attn_fwd_autotune_configs
if head_dim <= 256:
if dtype == torch.float32:
default_config = FlexConfig(64, 64, 3, 4)
else:
default_config = FlexConfig(128, 64, 3, 4)
if capability >= (12, 0):
default_config = self.sm_120_default_flex_config.get(
(dtype, head_dim), default_config
)
elif capability >= (10, 0):
default_config = self.sm_100_default_flex_config.get(
(dtype, head_dim), default_config
)
elif capability == (9, 0):
default_config = self.h100_default_flex_config.get(
(dtype, head_dim), default_config
)
elif capability >= (8, 0):
default_config = self.a100_default_flex_config.get(
(dtype, head_dim), default_config
)
else:
if dtype == torch.float32:
default_config = FlexConfig(32, 16, 3, 4)
else:
default_config = FlexConfig(64, 32, 3, 4)
if default_config not in flex_attn_fwd_configs:
flex_attn_fwd_configs.append(default_config)
return flex_attn_fwd_configs
def get_flex_attn_bwd_configs(
self, head_dim: int, dtype: Any
) -> list[FlexBwDConfig]:
capability = torch.cuda.get_device_capability()
flex_attn_bwd_configs: list[FlexBwDConfig] = []
if config.max_autotune:
if config.max_autotune_flex_search_space == "EXHAUSTIVE":
return self.exhaustive_flex_attn_bwd_configs
flex_attn_bwd_configs += self.flex_attn_bwd_autotune_configs
major, minor = capability
if dtype == torch.float32:
capability_class = "float32"
elif major >= 10:
capability_class = "sm10x"
elif capability == (9, 0):
capability_class = "sm90"
elif major >= 8:
capability_class = "sm8x"
else:
capability_class = "baseline"
# fmt: off
config_map = {
"float32": lambda h: FlexBwDConfig(16, 16, 16, 16, 1, 4),
"baseline": lambda h: FlexBwDConfig(16, 16, 16, 16, 1, 4),
"sm90": lambda h: (
FlexBwDConfig(64, 64, 64, 64, 3, 4) if h < 64 else
FlexBwDConfig(64, 128, 128, 64, 3, 8) if h <= 128 else
FlexBwDConfig(64, 64, 64, 64, 2, 4)
),
"sm10x": lambda h: (
FlexBwDConfig(64, 128, 128, 64, 3, 4) if h <= 128 else
FlexBwDConfig(64, 64, 64, 64, 1, 8) if h <= 192 else
FlexBwDConfig(64, 64, 64, 64, 1, 4)
),
"sm8x": lambda h: (
FlexBwDConfig(32, 128, 128, 32, 3, 4)
if h < 64
else FlexBwDConfig(
64, 64, 64, 64, 3 if minor == 6 and h == 128 else 2, 4
)
),
}
# fmt: on
if head_dim <= 256:
default_config = config_map[capability_class](head_dim)
else:
default_config = FlexBwDConfig(16, 16, 16, 16, 1, 4)
if default_config not in flex_attn_bwd_configs:
flex_attn_bwd_configs.append(default_config)
return flex_attn_bwd_configs
def get_flex_decode_configs(
self, head_dim: int, dtype: Any
) -> list[FlexDecodeConfig]:
capability = torch.cuda.get_device_capability()
default_config = FlexDecodeConfig(64, 1, 2)
flex_decode_configs: list[FlexDecodeConfig] = []
if config.max_autotune:
if config.max_autotune_flex_search_space == "EXHAUSTIVE":
return self.exhaustive_flex_decode_configs
flex_decode_configs += self.flex_decode_autotune_configs
if capability in [(9, 0), (10, 0), (10, 3)]: # sm_90, sm_100, sm_103
if head_dim > 128 and dtype == torch.float32:
default_config = FlexDecodeConfig(64, 1, 2)
else:
default_config = FlexDecodeConfig(64, 3, 2)
else:
default_config = FlexDecodeConfig(64, 1, 2)
if default_config not in flex_decode_configs:
flex_decode_configs.append(default_config)
return flex_decode_configs
| CUDAConfigHeuristic |
python | aio-libs__aiohttp | examples/token_refresh_middleware.py | {
"start": 4203,
"end": 12264
} | class ____:
"""Test server with JWT-like token authentication."""
def __init__(self) -> None:
self.tokens_db: dict[str, dict[str, str | float]] = {}
self.refresh_tokens_db: dict[str, dict[str, str | float]] = {
# Hash of refresh token -> user data
hashlib.sha256(b"demo_refresh_token_12345").hexdigest(): {
"user_id": "user123",
"username": "testuser",
"issued_at": time.time(),
}
}
def generate_access_token(self) -> str:
"""Generate a secure random access token."""
return secrets.token_urlsafe(32)
async def _process_token_refresh(self, data: dict[str, str]) -> web.Response:
"""Process the token refresh request."""
refresh_token = data.get("refresh_token")
if not refresh_token:
return web.json_response({"error": "refresh_token required"}, status=400)
# Hash the refresh token to look it up
refresh_token_hash = hashlib.sha256(refresh_token.encode()).hexdigest()
if refresh_token_hash not in self.refresh_tokens_db:
return web.json_response({"error": "Invalid refresh token"}, status=401)
user_data = self.refresh_tokens_db[refresh_token_hash]
# Generate new access token
access_token = self.generate_access_token()
expires_in = 300 # 5 minutes for demo
# Store the access token with expiry
token_hash = hashlib.sha256(access_token.encode()).hexdigest()
self.tokens_db[token_hash] = {
"user_id": user_data["user_id"],
"username": user_data["username"],
"expires_at": time.time() + expires_in,
"issued_at": time.time(),
}
# Clean up expired tokens periodically
current_time = time.time()
self.tokens_db = {
k: v
for k, v in self.tokens_db.items()
if isinstance(v["expires_at"], float) and v["expires_at"] > current_time
}
return web.json_response(
{
"access_token": access_token,
"token_type": "Bearer",
"expires_in": expires_in,
}
)
async def handle_token_refresh(self, request: web.Request) -> web.Response:
"""Handle token refresh requests."""
try:
data = await request.json()
return await self._process_token_refresh(data)
except json.JSONDecodeError:
return web.json_response({"error": "Invalid request"}, status=400)
async def verify_bearer_token(
self, request: web.Request
) -> dict[str, str | float] | None:
"""Verify bearer token and return user data if valid."""
auth_header = request.headers.get(hdrs.AUTHORIZATION, "")
if not auth_header.startswith("Bearer "):
return None
token = auth_header[7:] # Remove "Bearer "
token_hash = hashlib.sha256(token.encode()).hexdigest()
# Check if token exists and is not expired
if token_hash in self.tokens_db:
token_data = self.tokens_db[token_hash]
if (
isinstance(token_data["expires_at"], float)
and token_data["expires_at"] > time.time()
):
return token_data
return None
async def handle_protected_resource(self, request: web.Request) -> web.Response:
"""Protected endpoint that requires valid bearer token."""
user_data = await self.verify_bearer_token(request)
if not user_data:
return web.json_response({"error": "Invalid or expired token"}, status=401)
return web.json_response(
{
"message": "Access granted to protected resource",
"user": user_data["username"],
"data": "Secret information",
}
)
async def handle_user_info(self, request: web.Request) -> web.Response:
"""Another protected endpoint."""
user_data = await self.verify_bearer_token(request)
if not user_data:
return web.json_response({"error": "Invalid or expired token"}, status=401)
return web.json_response(
{
"user_id": user_data["user_id"],
"username": user_data["username"],
"email": f"{user_data['username']}@example.com",
"roles": ["user", "admin"],
}
)
async def run_test_server() -> web.AppRunner:
"""Run a test server with JWT auth endpoints."""
test_server = TestServer()
app = web.Application()
app.router.add_post("/token/refresh", test_server.handle_token_refresh)
app.router.add_get("/api/protected", test_server.handle_protected_resource)
app.router.add_get("/api/user", test_server.handle_user_info)
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, "localhost", 8080)
await site.start()
return runner
async def run_tests() -> None:
"""Run all token refresh middleware tests."""
# Create token refresh middleware
# In a real app, this refresh token would be securely stored
token_middleware = TokenRefreshMiddleware(
token_endpoint="http://localhost:8080/token/refresh",
refresh_token="demo_refresh_token_12345",
)
async with ClientSession(middlewares=(token_middleware,)) as session:
print("=== Test 1: First request (will trigger token refresh) ===")
async with session.get("http://localhost:8080/api/protected") as resp:
if resp.status == 200:
data = await resp.json()
print(f"Success! Response: {data}")
else:
print(f"Failed with status: {resp.status}")
print("\n=== Test 2: Second request (uses cached token) ===")
async with session.get("http://localhost:8080/api/user") as resp:
if resp.status == 200:
data = await resp.json()
print(f"User info: {data}")
else:
print(f"Failed with status: {resp.status}")
print("\n=== Test 3: Multiple concurrent requests ===")
print("(Should only refresh token once)")
coros: list[Coroutine[Any, Any, ClientResponse]] = []
for i in range(3):
coro = session.get("http://localhost:8080/api/protected")
coros.append(coro)
responses = await asyncio.gather(*coros)
for i, resp in enumerate(responses):
async with resp:
if resp.status == 200:
print(f"Request {i + 1}: Success")
else:
print(f"Request {i + 1}: Failed with {resp.status}")
print("\n=== Test 4: Simulate token expiry ===")
# For demo purposes, force token expiry
token_middleware.token_expires_at = time.time() - 1
print("Token expired, next request should trigger refresh...")
async with session.get("http://localhost:8080/api/protected") as resp:
if resp.status == 200:
data = await resp.json()
print(f"Success after token refresh! Response: {data}")
else:
print(f"Failed with status: {resp.status}")
print("\n=== Test 5: Request without middleware (no auth) ===")
# Make a request without any middleware to show the difference
async with session.get(
"http://localhost:8080/api/protected",
middlewares=(), # Bypass all middleware for this request
) as resp:
print(f"Status: {resp.status}")
if resp.status == 401:
error = await resp.json()
print(f"Failed as expected without auth: {error}")
async def main() -> None:
# Start test server
server = await run_test_server()
try:
await run_tests()
finally:
await server.cleanup()
if __name__ == "__main__":
asyncio.run(main())
| TestServer |
python | huggingface__transformers | src/transformers/models/clip/modeling_clip.py | {
"start": 24835,
"end": 26547
} | class ____(CLIPPreTrainedModel):
config: CLIPTextConfig
input_modalities = ("text",)
_no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"]
def __init__(self, config: CLIPTextConfig):
super().__init__(config)
self.text_model = CLIPTextTransformer(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.text_model.embeddings.token_embedding
def set_input_embeddings(self, value):
self.text_model.embeddings.token_embedding = value
@check_model_inputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPooling:
r"""
Examples:
```python
>>> from transformers import AutoTokenizer, CLIPTextModel
>>> model = CLIPTextModel.from_pretrained("openai/clip-vit-base-patch32")
>>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled (EOS token) states
```"""
return self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
**kwargs,
)
| CLIPTextModel |
python | encode__django-rest-framework | tests/authentication/test_authentication.py | {
"start": 19572,
"end": 20949
} | class ____(TestCase):
def setUp(self):
class AuthAccessingRenderer(renderers.BaseRenderer):
media_type = 'text/plain'
format = 'txt'
def render(self, data, media_type=None, renderer_context=None):
request = renderer_context['request']
if request.user.is_authenticated:
return b'authenticated'
return b'not authenticated'
class FailingAuth(BaseAuthentication):
def authenticate(self, request):
raise exceptions.AuthenticationFailed('authentication failed')
class ExampleView(APIView):
authentication_classes = (FailingAuth,)
renderer_classes = (AuthAccessingRenderer,)
def get(self, request):
return Response({'foo': 'bar'})
self.view = ExampleView.as_view()
def test_failing_auth_accessed_in_renderer(self):
"""
When authentication fails the renderer should still be able to access
`request.user` without raising an exception. Particularly relevant
to HTML responses that might reasonably access `request.user`.
"""
request = factory.get('/')
response = self.view(request)
content = response.render().content
assert content == b'not authenticated'
| FailingAuthAccessedInRenderer |
python | pypa__pipenv | pipenv/patched/pip/_internal/commands/download.py | {
"start": 745,
"end": 5393
} | class ____(RequirementCommand):
"""
Download packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports downloading from "requirements files", which provide
an easy way to specify a whole environment to be downloaded.
"""
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] <vcs project url> ...
%prog [options] <local project path> ...
%prog [options] <archive url/path> ..."""
def add_options(self) -> None:
self.cmd_opts.add_option(cmdoptions.constraints())
self.cmd_opts.add_option(cmdoptions.requirements())
self.cmd_opts.add_option(cmdoptions.no_deps())
self.cmd_opts.add_option(cmdoptions.global_options())
self.cmd_opts.add_option(cmdoptions.no_binary())
self.cmd_opts.add_option(cmdoptions.only_binary())
self.cmd_opts.add_option(cmdoptions.prefer_binary())
self.cmd_opts.add_option(cmdoptions.src())
self.cmd_opts.add_option(cmdoptions.pre())
self.cmd_opts.add_option(cmdoptions.require_hashes())
self.cmd_opts.add_option(cmdoptions.progress_bar())
self.cmd_opts.add_option(cmdoptions.no_build_isolation())
self.cmd_opts.add_option(cmdoptions.use_pep517())
self.cmd_opts.add_option(cmdoptions.no_use_pep517())
self.cmd_opts.add_option(cmdoptions.check_build_deps())
self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
self.cmd_opts.add_option(
"-d",
"--dest",
"--destination-dir",
"--destination-directory",
dest="download_dir",
metavar="dir",
default=os.curdir,
help="Download packages into <dir>.",
)
cmdoptions.add_target_python_options(self.cmd_opts)
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, self.cmd_opts)
@with_cleanup
def run(self, options: Values, args: List[str]) -> int:
options.ignore_installed = True
# editable doesn't really make sense for `pip download`, but the bowels
# of the RequirementSet code require that property.
options.editables = []
cmdoptions.check_dist_restriction(options)
options.download_dir = normalize_path(options.download_dir)
ensure_dir(options.download_dir)
session = self.get_default_session(options)
target_python = make_target_python(options)
finder = self._build_package_finder(
options=options,
session=session,
target_python=target_python,
ignore_requires_python=options.ignore_requires_python,
)
build_tracker = self.enter_context(get_build_tracker())
directory = TempDirectory(
delete=not options.no_clean,
kind="download",
globally_managed=True,
)
reqs = self.get_requirements(args, options, finder, session)
check_legacy_setup_py_options(options, reqs)
preparer = self.make_requirement_preparer(
temp_build_dir=directory,
options=options,
build_tracker=build_tracker,
session=session,
finder=finder,
download_dir=options.download_dir,
use_user_site=False,
verbosity=self.verbosity,
)
resolver = self.make_resolver(
preparer=preparer,
finder=finder,
options=options,
ignore_requires_python=options.ignore_requires_python,
use_pep517=options.use_pep517,
py_version_info=options.python_version,
)
self.trace_basic_info(finder)
requirement_set = resolver.resolve(reqs, check_supported_wheels=True)
downloaded: List[str] = []
for req in requirement_set.requirements.values():
if req.satisfied_by is None:
assert req.name is not None
preparer.save_linked_requirement(req)
downloaded.append(req.name)
preparer.prepare_linked_requirements_more(requirement_set.requirements.values())
if downloaded:
write_output("Successfully downloaded %s", " ".join(downloaded))
return SUCCESS
| DownloadCommand |
python | pypa__warehouse | tests/common/db/accounts.py | {
"start": 2482,
"end": 2858
} | class ____(WarehouseFactory):
class Meta:
model = UserTermsOfServiceEngagement
revision = "initial"
engagement = TermsOfServiceEngagement.Agreed
created = factory.Faker(
"date_time_between_dates",
datetime_start=datetime.datetime(2025, 1, 1),
datetime_end=datetime.datetime(2025, 2, 19),
)
| UserTermsOfServiceEngagementFactory |
python | joke2k__faker | faker/providers/person/de_LI/__init__.py | {
"start": 81,
"end": 17973
} | class ____(PersonProvider):
# Top 50 surnames in Liechtenstein
# Weighted by number of occurrences
# Source: https://de.wikipedia.org/wiki/Familiennamen_in_Liechtenstein#Die_h%C3%A4ufigsten_50_Familiennamen
# on 2024-10-31
last_names = OrderedDict(
(
("Banzer", 0.011916111),
("Bargetze", 0.007864633),
("Batliner", 0.011201144),
("Beck", 0.05926279),
("Biedermann", 0.016682555),
("Büchel", 0.063711471),
("Bühler", 0.01509374),
("Eberle", 0.023196695),
("Foser", 0.008420718),
("Frick", 0.053781379),
("Frommelt", 0.021607881),
("Gassner", 0.028519225),
("Gstöhl", 0.020734032),
("Hasler", 0.035668891),
("Heeb", 0.011201144),
("Hilti", 0.014458214),
("Hoop", 0.012789959),
("Jehle", 0.010486177),
("Kaiser", 0.018509692),
("Kaufmann", 0.014855418),
("Kieber", 0.010009533),
("Kind", 0.010486177),
("Kindle", 0.025977121),
("Konrad", 0.007626311),
("Kranz", 0.015967588),
("Lampert", 0.017318081),
("Marxer", 0.05608516),
("Matt", 0.017635844),
("Meier", 0.031776295),
("Negele", 0.01080394),
("Nigg", 0.015570384),
("Nipp", 0.009453448),
("Nägele", 0.008102955),
("Näscher", 0.011042262),
("Oehri", 0.014617096),
("Ospelt", 0.026612647),
("Risch", 0.016603114),
("Ritter", 0.023911662),
("Schädler", 0.04313632),
("Sele", 0.016920877),
("Sprenger", 0.010962822),
("Thöny", 0.007626311),
("Vogt", 0.047982205),
("Wachter", 0.010406737),
("Walser", 0.016682555),
("Wanger", 0.008976803),
("Wille", 0.008182396),
("Wohlwend", 0.022402288),
("Wolfinger", 0.010247855),
("Öhri", 0.01406101),
)
)
# Source:
# https://www.baby-vornamen.de/Namensthemen/Charts/Beliebteste-Vornamen-Liechtenstein-182.php#Jahrescharts-Liechtenstein
# Took the 30 most common baby names per year (1996 to 2022) and weighted them by number of
# occurences (how often the name appeared in one of the year lists)
first_names_male = OrderedDict(
(
("Aaron", 0.00817),
("Adrian", 0.00817),
("Ajan", 0.00117),
("Alessandro", 0.00233),
("Alessio", 0.00467),
("Alexander", 0.01517),
("Amar", 0.00233),
("Andreas", 0.0035),
("Andrin", 0.00583),
("Aras", 0.00117),
("Bastian", 0.00117),
("Ben", 0.00933),
("Benedikt", 0.00117),
("Benjamin", 0.01167),
("Brian", 0.00117),
("Christoph", 0.00233),
("Colin", 0.00117),
("Conradin", 0.00117),
("Constantin", 0.0035),
("Cristiano", 0.00117),
("Damian", 0.00233),
("Daniel", 0.00817),
("Dario", 0.007),
("Daris", 0.00117),
("David", 0.014),
("Davide", 0.00117),
("Diego", 0.00233),
("Diogo", 0.00117),
("Dominic", 0.00117),
("Dominik", 0.007),
("Dylan", 0.0035),
("Eldon", 0.00117),
("Elia", 0.00583),
("Elias", 0.01984),
("Elijah", 0.00117),
("Elio", 0.00117),
("Eloi", 0.00117),
("Emanuel", 0.00467),
("Emil", 0.0035),
("Emilian", 0.00233),
("Emmanuel", 0.00117),
("Enea", 0.00233),
("Eren", 0.00117),
("Eric", 0.00117),
("Fabian", 0.014),
("Fabio", 0.01517),
("Fabrizio", 0.00117),
("Felix", 0.00817),
("Finn", 0.00583),
("Florian", 0.00583),
("Florin", 0.00117),
("Gabriel", 0.01284),
("Gian", 0.0035),
("Gian-Luca", 0.00117),
("Gion", 0.00117),
("Goncalo", 0.00117),
("Gustav", 0.00117),
("Hans", 0.00117),
("Henri", 0.00117),
("Henry", 0.00233),
("Ian", 0.00117),
("Jakob", 0.00233),
("James", 0.00117),
("Jan", 0.007),
("Janick", 0.00117),
("Janik", 0.00117),
("Janis", 0.00117),
("Jano", 0.00117),
("Joel", 0.01167),
("Johannes", 0.00933),
("Jonas", 0.021),
("Jonathan", 0.00233),
("Josef", 0.00233),
("Joshua", 0.00117),
("Julian", 0.0245),
("Julius", 0.00233),
("Justin", 0.00467),
("Kevin", 0.00583),
("Kian", 0.00117),
("Kiano", 0.00117),
("Kilian", 0.00233),
("Konstantin", 0.00233),
("Lars", 0.00233),
("Laurenz", 0.00117),
("Laurin", 0.00933),
("Lean", 0.00117),
("Leandro", 0.00817),
("Leano", 0.0035),
("Lenny", 0.00233),
("Leo", 0.0105),
("Leon", 0.01634),
("Leonardo", 0.00117),
("Leonhard", 0.00117),
("Leopold", 0.00233),
("Levi", 0.00117),
("Levin", 0.0035),
("Liam", 0.00467),
("Lian", 0.00467),
("Linus", 0.00233),
("Lio", 0.00233),
("Lionel", 0.00583),
("Lirjan", 0.00117),
("Livio", 0.0035),
("Lorenz", 0.00117),
("Loris", 0.00233),
("Louie", 0.00233),
("Louis", 0.0105),
("Luan", 0.00233),
("Luca", 0.0175),
("Lucas", 0.00467),
("Luej", 0.00117),
("Luigi", 0.00117),
("Luis", 0.01517),
("Lukas", 0.0175),
("Mael", 0.00117),
("Malik", 0.0035),
("Malio", 0.00117),
("Mantas", 0.00117),
("Manuel", 0.014),
("Marc", 0.007),
("Marcel", 0.00233),
("Marco", 0.0105),
("Marino", 0.00117),
("Mario", 0.00117),
("Marlon", 0.0035),
("Martim", 0.00117),
("Martin", 0.0035),
("Marvin", 0.00117),
("Mathias", 0.00117),
("Mats", 0.00117),
("Matteo", 0.01167),
("Matthias", 0.007),
("Matti", 0.00117),
("Mattia", 0.00233),
("Maurice", 0.00117),
("Mauro", 0.0035),
("Max", 0.00817),
("Maxim", 0.00117),
("Maximilian", 0.01634),
("Metehan", 0.00117),
("Michael", 0.01167),
("Michele", 0.00233),
("Mike", 0.00117),
("Mikyas", 0.00117),
("Milan", 0.00117),
("Milo", 0.00117),
("Moritz", 0.00233),
("Muhamed", 0.00233),
("Muhammed", 0.00467),
("Nael", 0.00233),
("Nando", 0.00117),
("Natanael", 0.00117),
("Nelio", 0.00117),
("Nevio", 0.00233),
("Niclas", 0.00233),
("Nico", 0.01284),
("Nicola", 0.00117),
("Nicolas", 0.00933),
("Niels", 0.00117),
("Niklas", 0.007),
("Nils", 0.00233),
("Nino", 0.00467),
("Noah", 0.0245),
("Noam", 0.00117),
("Noe", 0.00117),
("Noel", 0.007),
("Oliver", 0.00233),
("Orlando", 0.00117),
("Oscar", 0.00117),
("Oskar", 0.00233),
("Pascal", 0.01167),
("Patrick", 0.007),
("Patrik", 0.00117),
("Paul", 0.00933),
("Philipp", 0.007),
("Rafael", 0.00583),
("Raffael", 0.00233),
("Ramon", 0.0035),
("Raphael", 0.01984),
("Rino", 0.00117),
("Robin", 0.0105),
("Rodrigo", 0.00233),
("Romeo", 0.00117),
("Ruben", 0.00583),
("Ryan", 0.00233),
("Samir", 0.00117),
("Samuel", 0.01867),
("Sandro", 0.007),
("Santiago", 0.00233),
("Sebastian", 0.0105),
("Severin", 0.00117),
("Silas", 0.00117),
("Silvio", 0.00117),
("Simon", 0.0175),
("Stefan", 0.00117),
("Tenzin", 0.00233),
("Theo", 0.00233),
("Theodor", 0.00233),
("Thiago", 0.00117),
("Thomas", 0.00117),
("Tiago", 0.00233),
("Till", 0.00117),
("Tim", 0.00467),
("Timo", 0.00233),
("Timon", 0.00117),
("Timur", 0.00117),
("Tiziano", 0.00117),
("Tobias", 0.01167),
("Valentin", 0.00933),
("Vince", 0.00117),
("Vincent", 0.00233),
("Wenzel", 0.00117),
("Yanis", 0.00117),
("Yannick", 0.0035),
("Yassin", 0.00117),
("Yoan", 0.00117),
("Ömer", 0.00117),
)
)
first_names_female = OrderedDict(
(
("Adriana", 0.00361),
("Afra", 0.0012),
("Alea", 0.0012),
("Alessia", 0.01566),
("Alexandra", 0.0012),
("Alicia", 0.0012),
("Alina", 0.01205),
("Alisa", 0.0012),
("Alya", 0.0012),
("Amaya", 0.0012),
("Amelia", 0.0012),
("Amelie", 0.01446),
("Amy", 0.00361),
("Anastasia", 0.0012),
("Angelina", 0.00241),
("Anika", 0.00241),
("Anisa", 0.0012),
("Anja", 0.0012),
("Anna", 0.02651),
("Anna-Lena", 0.0012),
("Annalena", 0.0012),
("Annika", 0.00241),
("Annina", 0.0012),
("Anouk", 0.0012),
("Aria", 0.0012),
("Ariana", 0.00241),
("Aurora", 0.00361),
("Ayse", 0.0012),
("Bianca", 0.0012),
("Carla", 0.00361),
("Carmen", 0.0012),
("Carolina", 0.0012),
("Caroline", 0.0012),
("Cataleya", 0.0012),
("Celina", 0.0012),
("Celine", 0.00482),
("Chiara", 0.01928),
("Christina", 0.0012),
("Claudia", 0.0012),
("Cosima", 0.0012),
("Daria", 0.0012),
("Deborah", 0.0012),
("Deniis", 0.0012),
("Diana", 0.00361),
("Dilara", 0.0012),
("Eileen", 0.0012),
("Ela", 0.00361),
("Elea", 0.00241),
("Elena", 0.01687),
("Elfida", 0.0012),
("Eliana", 0.00241),
("Eliane", 0.0012),
("Elif", 0.00241),
("Elin", 0.00482),
("Elina", 0.00361),
("Eliona", 0.0012),
("Elisa", 0.00361),
("Elisabeth", 0.0012),
("Ella", 0.00482),
("Elvana", 0.0012),
("Emelina", 0.0012),
("Emilia", 0.01566),
("Emilie", 0.0012),
("Emily", 0.00482),
("Emine", 0.0012),
("Emma", 0.01928),
("Enna", 0.0012),
("Enya", 0.0012),
("Eowyn", 0.0012),
("Erva", 0.0012),
("Eslemnur", 0.0012),
("Estella", 0.0012),
("Eva", 0.00482),
("Eva-Maria", 0.0012),
("Evita", 0.0012),
("Fabienne", 0.00602),
("Felicia", 0.0012),
("Filippa", 0.00241),
("Fiona", 0.00843),
("Fjolla", 0.0012),
("Florina", 0.0012),
("Franziska", 0.00241),
("Frida", 0.0012),
("Frieda", 0.0012),
("Gaia", 0.0012),
("Geraldine", 0.0012),
("Gina", 0.00241),
("Gioia", 0.0012),
("Giulia", 0.00482),
("Gizem", 0.0012),
("Grace", 0.0012),
("Gwenda", 0.0012),
("Hana", 0.0012),
("Hanna", 0.00241),
("Hannah", 0.00964),
("Helena", 0.00482),
("Ilenia", 0.0012),
("Irina", 0.0012),
("Isabel", 0.00241),
("Isabella", 0.00241),
("Jacqueline", 0.00241),
("Jana", 0.00964),
("Janina", 0.00241),
("Janine", 0.00361),
("Jasmin", 0.0012),
("Jennifer", 0.00482),
("Jenny", 0.0012),
("Jessica", 0.00964),
("Joana", 0.00361),
("Joanna", 0.0012),
("Johanna", 0.00964),
("Jolina", 0.0012),
("Jule", 0.0012),
("Julia", 0.02048),
("Katharina", 0.01084),
("Kerstin", 0.0012),
("Klara", 0.0012),
("Klea", 0.0012),
("Künkyi", 0.0012),
("Ladina", 0.01084),
("Lara", 0.02048),
("Larissa", 0.00964),
("Laura", 0.02289),
("Laurina", 0.0012),
("Lavinia", 0.0012),
("Lea", 0.01687),
("Leana", 0.0012),
("Lena", 0.01807),
("Leni", 0.00241),
("Leonie", 0.02048),
("Letizia", 0.00241),
("Leyla", 0.0012),
("Leyla-Katharina", 0.0012),
("Lhanzey", 0.0012),
("Lia", 0.00602),
("Lilia", 0.0012),
("Liliana", 0.0012),
("Lillian", 0.0012),
("Lilly", 0.0012),
("Lily", 0.0012),
("Lina", 0.01325),
("Linda", 0.00361),
("Lisa", 0.01928),
("Liv", 0.00241),
("Livia", 0.00602),
("Liya", 0.0012),
("Lola", 0.0012),
("Lorena", 0.00843),
("Louana", 0.0012),
("Louisa", 0.0012),
("Louise", 0.0012),
("Luana", 0.00241),
("Luena", 0.0012),
("Luisa", 0.01084),
("Luna", 0.0012),
("Lydia", 0.00241),
("Lynn", 0.00482),
("Madeleine", 0.0012),
("Madleina", 0.00241),
("Magdalena", 0.00361),
("Maila", 0.0012),
("Maisa", 0.0012),
("Maivi", 0.0012),
("Maja", 0.0012),
("Malea", 0.00482),
("Malene", 0.0012),
("Malu", 0.0012),
("Manila", 0.0012),
("Mara", 0.00602),
("Mara-Julie", 0.0012),
("Maren", 0.0012),
("Margarita", 0.0012),
("Mari", 0.0012),
("Maria", 0.01084),
("Marie", 0.00602),
("Marie-Cecilie", 0.0012),
("Mariella", 0.0012),
("Marina", 0.00241),
("Martina", 0.0012),
("Mathilda", 0.0012),
("Matilda", 0.00361),
("Mavie", 0.0012),
("Maxima", 0.0012),
("Maya", 0.0012),
("Melanie", 0.00843),
("Melanine", 0.0012),
("Melina", 0.00482),
("Melissa", 0.00723),
("Merve", 0.0012),
("Mia", 0.01446),
("Michele", 0.00241),
("Michelle", 0.00482),
("Mila", 0.00241),
("Milena", 0.00482),
("Mina", 0.00361),
("Mira", 0.0012),
("Muriel", 0.0012),
("Nadine", 0.0012),
("Nahla", 0.0012),
("Naomi", 0.00482),
("Natalie", 0.0012),
("Nathalie", 0.0012),
("Nathasha", 0.0012),
("Nelia", 0.0012),
("Nelya", 0.0012),
("Neslisah", 0.0012),
("Nicole", 0.00482),
("Nina", 0.01928),
("Noelia", 0.00482),
("Noemi", 0.00964),
("Nora", 0.00482),
("Nour", 0.0012),
("Olivia", 0.00241),
("Patricia", 0.0012),
("Paula", 0.00723),
("Paulina", 0.0012),
("Pia", 0.00241),
("Rahel", 0.00241),
("Ramona", 0.00361),
("Raphaela", 0.0012),
("Rebecca", 0.00602),
("Robin", 0.0012),
("Romy", 0.0012),
("Ronja", 0.00241),
("Sabrina", 0.00482),
("Sally", 0.0012),
("Salome", 0.00241),
("Samantha", 0.0012),
("Saphira", 0.00241),
("Sara", 0.00723),
("Sarah", 0.01446),
("Sarina", 0.00241),
("Selina", 0.00843),
("Sina", 0.01084),
("Sofia", 0.00361),
("Sophia", 0.02048),
("Sophie", 0.00602),
("Soraya", 0.0012),
("Stefanie", 0.00241),
("Svenja", 0.0012),
("Tamara", 0.0012),
("Tatjana", 0.0012),
("Tenzin", 0.0012),
("Teresa", 0.0012),
("Thalia", 0.0012),
("Tina", 0.0012),
("Valentina", 0.01325),
("Valeria", 0.00241),
("Vanessa", 0.01325),
("Victoria", 0.00482),
("Viktoria", 0.0012),
("Xenia", 0.0012),
("Yara", 0.0012),
("Ylvi", 0.0012),
("Zehra", 0.00241),
("Zejna", 0.0012),
("Zoe", 0.00361),
)
)
| Provider |
python | celery__celery | celery/bin/base.py | {
"start": 883,
"end": 4079
} | class ____:
"""Context Object for the CLI."""
def __init__(self, app, no_color, workdir, quiet=False):
"""Initialize the CLI context."""
self.app = app or get_current_app()
self.no_color = no_color
self.quiet = quiet
self.workdir = workdir
@cached_property
def OK(self):
return self.style("OK", fg="green", bold=True)
@cached_property
def ERROR(self):
return self.style("ERROR", fg="red", bold=True)
def style(self, message=None, **kwargs):
if self.no_color:
return message
else:
return click.style(message, **kwargs)
def secho(self, message=None, **kwargs):
if self.no_color:
kwargs['color'] = False
click.echo(message, **kwargs)
else:
click.secho(message, **kwargs)
def echo(self, message=None, **kwargs):
if self.no_color:
kwargs['color'] = False
click.echo(message, **kwargs)
else:
click.echo(message, **kwargs)
def error(self, message=None, **kwargs):
kwargs['err'] = True
if self.no_color:
kwargs['color'] = False
click.echo(message, **kwargs)
else:
click.secho(message, **kwargs)
def pretty(self, n):
if isinstance(n, list):
return self.OK, self.pretty_list(n)
if isinstance(n, dict):
if 'ok' in n or 'error' in n:
return self.pretty_dict_ok_error(n)
else:
s = json.dumps(n, sort_keys=True, indent=4)
if not self.no_color:
s = highlight(s, LEXER, FORMATTER)
return self.OK, s
if isinstance(n, str):
return self.OK, n
return self.OK, pformat(n)
def pretty_list(self, n):
if not n:
return '- empty -'
return '\n'.join(
f'{self.style("*", fg="white")} {item}' for item in n
)
def pretty_dict_ok_error(self, n):
try:
return (self.OK,
text.indent(self.pretty(n['ok'])[1], 4))
except KeyError:
pass
return (self.ERROR,
text.indent(self.pretty(n['error'])[1], 4))
def say_chat(self, direction, title, body='', show_body=False):
if direction == '<-' and self.quiet:
return
dirstr = not self.quiet and f'{self.style(direction, fg="white", bold=True)} ' or ''
self.echo(f'{dirstr} {title}')
if body and show_body:
self.echo(body)
def handle_preload_options(f):
"""Extract preload options and return a wrapped callable."""
def caller(ctx, *args, **kwargs):
app = ctx.obj.app
preload_options = [o.name for o in app.user_options.get('preload', [])]
if preload_options:
user_options = {
preload_option: kwargs[preload_option]
for preload_option in preload_options
}
user_preload_options.send(sender=f, app=app, options=user_options)
return f(ctx, *args, **kwargs)
return update_wrapper(caller, f)
| CLIContext |
python | numba__numba | numba/tests/test_gil.py | {
"start": 1788,
"end": 5966
} | class ____(TestCase):
def make_test_array(self, n_members):
return np.arange(n_members, dtype=np.int64)
def run_in_threads(self, func, n_threads):
# Run the function in parallel over an array and collect results.
threads = []
# Warm up compilation, since we don't want that to interfere with
# the test proper.
func(self.make_test_array(1), np.arange(1, dtype=np.intp))
arr = self.make_test_array(50)
for i in range(n_threads):
# Ensure different threads write into the array in different
# orders.
indices = np.arange(arr.size, dtype=np.intp)
np.random.shuffle(indices)
t = threading.Thread(target=func, args=(arr, indices))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
return arr
def check_gil_held(self, func):
arr = self.run_in_threads(func, n_threads=4)
distinct = set(arr)
self.assertEqual(len(distinct), 1, distinct)
def check_gil_released(self, func):
for n_threads in (4, 12, 32):
# Try harder each time. On an empty machine 4 threads seems
# sufficient, but in some contexts (e.g. Travis CI) we need more.
arr = self.run_in_threads(func, n_threads)
distinct = set(arr)
try:
self.assertGreater(len(distinct), 1, distinct)
except AssertionError as e:
failure = e
else:
return
raise failure
@skip_if_freethreading
def test_gil_held(self):
"""
Test the GIL is held by default, by checking serialized runs
produce deterministic results.
"""
cfunc = jit(f_sig, nopython=True)(f)
self.check_gil_held(cfunc)
def test_gil_released(self):
"""
Test releasing the GIL, by checking parallel runs produce
unpredictable results.
"""
cfunc = jit(f_sig, nopython=True, nogil=True)(f)
self.check_gil_released(cfunc)
def test_gil_released_inside_lifted_loop(self):
"""
Test the GIL can by released by a lifted loop even though the
surrounding code uses object mode.
"""
cfunc = jit(f_sig, forceobj=True, nogil=True)(lifted_f)
self.check_gil_released(cfunc)
def test_gil_released_by_caller(self):
"""
Releasing the GIL in the caller is sufficient to have it
released in a callee.
"""
compiled_f = jit(f_sig, nopython=True)(f)
@jit(f_sig, nopython=True, nogil=True)
def caller(a, i):
compiled_f(a, i)
self.check_gil_released(caller)
def test_gil_released_by_caller_and_callee(self):
"""
Same, but with both caller and callee asking to release the GIL.
"""
compiled_f = jit(f_sig, nopython=True, nogil=True)(f)
@jit(f_sig, nopython=True, nogil=True)
def caller(a, i):
compiled_f(a, i)
self.check_gil_released(caller)
@skip_if_freethreading
def test_gil_ignored_by_callee(self):
"""
When only the callee asks to release the GIL, it gets ignored.
"""
compiled_f = jit(f_sig, nopython=True, nogil=True)(f)
@jit(f_sig, nopython=True)
def caller(a, i):
compiled_f(a, i)
self.check_gil_held(caller)
def test_object_mode(self):
"""
When the function is compiled in object mode, a warning is
printed out.
"""
with warnings.catch_warnings(record=True) as wlist:
warnings.simplefilter('always', errors.NumbaWarning)
cfunc = jit(f_sig, forceobj=True, nogil=True)(object_f)
self.assertTrue(any(w.category is errors.NumbaWarning
and "Code running in object mode won't allow parallel execution" in str(w.message)
for w in wlist), wlist)
# Just check it doesn't crash.
self.run_in_threads(cfunc, 2)
if __name__ == '__main__':
unittest.main()
| TestGILRelease |
python | scrapy__scrapy | tests/test_scheduler_base.py | {
"start": 1173,
"end": 1463
} | class ____(MinimalScheduler):
def open(self, spider: Spider) -> defer.Deferred:
return defer.succeed("open")
def close(self, reason: str) -> defer.Deferred:
return defer.succeed("close")
def __len__(self) -> int:
return len(self.requests)
| SimpleScheduler |
python | django__django | django/contrib/gis/db/models/lookups.py | {
"start": 7496,
"end": 8272
} | class ____(GISLookup):
lookup_name = "relate"
sql_template = "%(func)s(%(lhs)s, %(rhs)s, %%s)"
pattern_regex = _lazy_re_compile(r"^[012TF*]{9}$")
def process_rhs(self, compiler, connection):
# Check the pattern argument
pattern = self.rhs_params[0]
backend_op = connection.ops.gis_operators[self.lookup_name]
if hasattr(backend_op, "check_relate_argument"):
backend_op.check_relate_argument(pattern)
elif not isinstance(pattern, str) or not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
sql, params = super().process_rhs(compiler, connection)
return sql, (*params, pattern)
@BaseSpatialField.register_lookup
| RelateLookup |
python | pytorch__pytorch | torch/_higher_order_ops/_invoke_quant.py | {
"start": 629,
"end": 826
} | class ____(BaseHOP):
def __init__(self) -> None:
super().__init__("invoke_quant")
invoke_quant = InvokeQuantUnpacked()
@dataclasses.dataclass(frozen=True, repr=True)
| InvokeQuantUnpacked |
python | getsentry__sentry | src/sentry/workflow_engine/migrations/0102_cleanup_failed_safe_deletes.py | {
"start": 207,
"end": 1717
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("workflow_engine", "0101_remove_is_single_written_field"),
]
operations = [
# Clean up table that may not have been deleted due to missing
# historical_silo_assignments entry before the fix
SafeRunSQL(
sql="DROP TABLE IF EXISTS workflow_engine_actiongroupstatus CASCADE;",
reverse_sql=migrations.RunSQL.noop,
hints={"tables": ["workflow_engine_actiongroupstatus"]},
),
]
| Migration |
python | ray-project__ray | doc/source/serve/doc_code/custom_request_router_app.py | {
"start": 534,
"end": 1520
} | class ____:
def __init__(self):
context = _get_internal_replica_context()
self.replica_id: ReplicaID = context.replica_id
async def __call__(self):
return self.replica_id
handle = serve.run(UniformRequestRouterApp.bind())
response = handle.remote().result()
print(f"Response from UniformRequestRouterApp: {response}")
# Example output:
# Response from UniformRequestRouterApp:
# Replica(id='67vc4ts5', deployment='UniformRequestRouterApp', app='default')
# __end_deploy_app_with_uniform_request_router__
# __begin_deploy_app_with_throughput_aware_request_router__
def _time_ms() -> int:
return int(time.time() * 1000)
@serve.deployment(
request_router_config=RequestRouterConfig(
request_router_class="custom_request_router:ThroughputAwareRequestRouter",
request_routing_stats_period_s=1,
request_routing_stats_timeout_s=1,
),
num_replicas=3,
ray_actor_options={"num_cpus": 0},
)
| UniformRequestRouterApp |
python | huggingface__transformers | src/transformers/models/doge/modeling_doge.py | {
"start": 23363,
"end": 24485
} | class ____(PreTrainedModel):
config: DogeConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["DogeDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = False
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False
_supports_attention_backend = True
_can_record_outputs = {
"router_logits": OutputRecorder(DogeCDMoE, index=1),
"hidden_states": DogeDecoderLayer,
"attentions": DogeAttention,
}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, DogeAttention):
if hasattr(module, "A"):
init.zeros_(module.A)
elif isinstance(module, DogeDecoderLayer):
if hasattr(module, "input_residual"):
init.ones_(module.input_residual)
if hasattr(module, "post_attention_residual"):
init.ones_(module.post_attention_residual)
@auto_docstring
| DogePreTrainedModel |
python | plotly__plotly.py | plotly/graph_objs/isosurface/_surface.py | {
"start": 233,
"end": 6708
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "isosurface"
_path_str = "isosurface.surface"
_valid_props = {"count", "fill", "pattern", "show"}
@property
def count(self):
"""
Sets the number of iso-surfaces between minimum and maximum
iso-values. By default this value is 2 meaning that only
minimum and maximum surfaces would be drawn.
The 'count' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["count"]
@count.setter
def count(self, val):
self["count"] = val
@property
def fill(self):
"""
Sets the fill ratio of the iso-surface. The default fill value
of the surface is 1 meaning that they are entirely shaded. On
the other hand Applying a `fill` ratio less than one would
allow the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
@property
def pattern(self):
"""
Sets the surface pattern of the iso-surface 3-D sections. The
default pattern of the surface is `all` meaning that the rest
of surface elements would be shaded. The check options (either
1 or 2) could be used to draw half of the squares on the
surface. Using various combinations of capital `A`, `B`, `C`,
`D` and `E` may also be used to reduce the number of triangles
on the iso-surfaces and creating other patterns of interest.
The 'pattern' property is a flaglist and may be specified
as a string containing:
- Any combination of ['A', 'B', 'C', 'D', 'E'] joined with '+' characters
(e.g. 'A+B')
OR exactly one of ['all', 'odd', 'even'] (e.g. 'even')
Returns
-------
Any
"""
return self["pattern"]
@pattern.setter
def pattern(self, val):
self["pattern"] = val
@property
def show(self):
"""
Hides/displays surfaces between minimum and maximum iso-values.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
@property
def _prop_descriptions(self):
return """\
count
Sets the number of iso-surfaces between minimum and
maximum iso-values. By default this value is 2 meaning
that only minimum and maximum surfaces would be drawn.
fill
Sets the fill ratio of the iso-surface. The default
fill value of the surface is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
pattern
Sets the surface pattern of the iso-surface 3-D
sections. The default pattern of the surface is `all`
meaning that the rest of surface elements would be
shaded. The check options (either 1 or 2) could be used
to draw half of the squares on the surface. Using
various combinations of capital `A`, `B`, `C`, `D` and
`E` may also be used to reduce the number of triangles
on the iso-surfaces and creating other patterns of
interest.
show
Hides/displays surfaces between minimum and maximum
iso-values.
"""
def __init__(
self, arg=None, count=None, fill=None, pattern=None, show=None, **kwargs
):
"""
Construct a new Surface object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.isosurface.Surface`
count
Sets the number of iso-surfaces between minimum and
maximum iso-values. By default this value is 2 meaning
that only minimum and maximum surfaces would be drawn.
fill
Sets the fill ratio of the iso-surface. The default
fill value of the surface is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
pattern
Sets the surface pattern of the iso-surface 3-D
sections. The default pattern of the surface is `all`
meaning that the rest of surface elements would be
shaded. The check options (either 1 or 2) could be used
to draw half of the squares on the surface. Using
various combinations of capital `A`, `B`, `C`, `D` and
`E` may also be used to reduce the number of triangles
on the iso-surfaces and creating other patterns of
interest.
show
Hides/displays surfaces between minimum and maximum
iso-values.
Returns
-------
Surface
"""
super().__init__("surface")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.isosurface.Surface
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.Surface`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("count", arg, count)
self._set_property("fill", arg, fill)
self._set_property("pattern", arg, pattern)
self._set_property("show", arg, show)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Surface |
python | pytorch__pytorch | torch/distributed/elastic/rendezvous/api.py | {
"start": 893,
"end": 987
} | class ____(Exception):
"""Represents the base type for rendezvous errors."""
| RendezvousError |
python | doocs__leetcode | solution/0600-0699/0674.Longest Continuous Increasing Subsequence/Solution2.py | {
"start": 0,
"end": 304
} | class ____:
def findLengthOfLCIS(self, nums: List[int]) -> int:
ans, n = 1, len(nums)
i = 0
while i < n:
j = i + 1
while j < n and nums[j - 1] < nums[j]:
j += 1
ans = max(ans, j - i)
i = j
return ans
| Solution |
python | openai__openai-python | src/openai/resources/fine_tuning/jobs/checkpoints.py | {
"start": 6713,
"end": 6966
} | class ____:
def __init__(self, checkpoints: AsyncCheckpoints) -> None:
self._checkpoints = checkpoints
self.list = _legacy_response.async_to_raw_response_wrapper(
checkpoints.list,
)
| AsyncCheckpointsWithRawResponse |
python | ApeWorX__ape | src/ape/plugins/project.py | {
"start": 198,
"end": 852
} | class ____(PluginType):
"""
A plugin for converting files to a ``PackageManifest``.
The default project plugin is the :class:`~ape.api.projects.ApeProject`.
Otherwise, you can define your own project implementation for converting
a set of files to a ``PackageManifest``, such as one that resolves dependencies
via ``.gitmodules``.
"""
@hookspec
def projects(self) -> Iterator[type["ProjectAPI"]]: # type: ignore[empty-body]
"""
A hook that returns a :class:`~ape.api.projects.ProjectAPI` subclass type.
Returns:
type[:class:`~ape.api.projects.ProjectAPI`]
"""
| ProjectPlugin |
python | doocs__leetcode | solution/0200-0299/0265.Paint House II/Solution.py | {
"start": 0,
"end": 350
} | class ____:
def minCostII(self, costs: List[List[int]]) -> int:
n, k = len(costs), len(costs[0])
f = costs[0][:]
for i in range(1, n):
g = costs[i][:]
for j in range(k):
t = min(f[h] for h in range(k) if h != j)
g[j] += t
f = g
return min(f)
| Solution |
python | mlflow__mlflow | mlflow/genai/scorers/base.py | {
"start": 904,
"end": 1214
} | class ____(Enum):
CLASS = "class"
BUILTIN = "builtin"
DECORATOR = "decorator"
INSTRUCTIONS = "instructions"
GUIDELINES = "guidelines"
_ALLOWED_SCORERS_FOR_REGISTRATION = [
ScorerKind.BUILTIN,
ScorerKind.DECORATOR,
ScorerKind.INSTRUCTIONS,
ScorerKind.GUIDELINES,
]
| ScorerKind |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/data_structures/lookup_ops_test.py | {
"start": 42937,
"end": 58790
} | class ____(BaseLookupTableTest):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def testStringStaticVocabularyTable(self, is_anonymous):
if is_anonymous and not tf2.enabled():
self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)
vocab_file = self._createVocabFile("feat_to_id_1.txt")
vocab_size = 3
oov_buckets = 1
table = self.getVocabularyTable()(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
oov_buckets,
experimental_is_anonymous=is_anonymous)
self.initialize_table(table)
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table.size()))
def testStaticVocabularyTableGetItem(self, is_anonymous):
if is_anonymous and not tf2.enabled():
self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)
vocab_file = self._createVocabFile("feat_to_id_1.txt")
vocab_size = 3
oov_buckets = 1
table = self.getVocabularyTable()(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
oov_buckets,
experimental_is_anonymous=is_anonymous)
self.initialize_table(table)
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table[input_string]
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table.size()))
def testInt32StaticVocabularyTable(self, is_anonymous):
if is_anonymous and not tf2.enabled():
self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)
vocab_file = self._createVocabFile("feat_to_id_2.txt", ("42", "1", "-1000"))
vocab_size = 3
oov_buckets = 1
table = self.getVocabularyTable()(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
oov_buckets,
lookup_key_dtype=dtypes.int32,
experimental_is_anonymous=is_anonymous)
self.initialize_table(table)
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int32)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table.size()))
def testInt64StaticVocabularyTable(self, is_anonymous):
if is_anonymous and not tf2.enabled():
self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)
vocab_file = self._createVocabFile("feat_to_id_3.txt", ("42", "1", "-1000"))
vocab_size = 3
oov_buckets = 1
table = self.getVocabularyTable()(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
oov_buckets,
experimental_is_anonymous=is_anonymous)
self.initialize_table(table)
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table.size()))
def testStringStaticVocabularyTableNoInitializer(self, is_anonymous):
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = self.getVocabularyTable()(
None, oov_buckets, experimental_is_anonymous=is_anonymous)
self.initialize_table(table)
values = constant_op.constant(("brain", "salad", "surgery"))
out = table.lookup(values)
self.assertAllEqual(
[
3, # fingerprint("brain") mod 5.
1, # fingerprint("salad") mod 5.
4 # fingerprint("surgery") mod 5
],
self.evaluate(out))
self.assertEqual(oov_buckets, self.evaluate(table.size()))
def testStaticVocabularyTableWithMultipleInitializers(self, is_anonymous):
if is_anonymous and not tf2.enabled():
self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)
vocab_file = self._createVocabFile("feat_to_id_4.txt")
vocab_size = 3
oov_buckets = 3
init = lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size)
table1 = self.getVocabularyTable()(
init,
oov_buckets,
name="table1",
experimental_is_anonymous=is_anonymous)
table2 = self.getVocabularyTable()(
init,
oov_buckets,
name="table2",
experimental_is_anonymous=is_anonymous)
self.evaluate(lookup_ops.tables_initializer())
input_string = constant_op.constant(
["fruit", "brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string)
out2 = table2.lookup(input_string)
out1, out2 = self.evaluate([out1, out2])
self.assertAllEqual([5, 0, 1, 2, 5], out1)
self.assertAllEqual([5, 0, 1, 2, 5], out2)
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table1.size()))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table2.size()))
def testStaticVocabularyTableInitializationAcrossSessions(self, is_anonymous):
if is_anonymous and not tf2.enabled():
self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)
vocab_file = self._createVocabFile("feat_to_id_5.txt")
with self.cached_session():
vocab_size = 3
oov_buckets = 1
table1 = self.getVocabularyTable()(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
oov_buckets,
experimental_is_anonymous=is_anonymous)
self.initialize_table(table1)
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string_1)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(out1))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table1.size()))
with self.cached_session():
vocab_size = 3
oov_buckets = 1
# Underlying lookup table already initialized in previous session.
# No need to initialize table2
table2 = self.getVocabularyTable()(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
oov_buckets,
experimental_is_anonymous=is_anonymous)
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out2 = table2.lookup(input_string_2)
self.assertAllEqual([3, 1, 3], self.evaluate(out2))
self.assertEqual(vocab_size + oov_buckets, self.evaluate(table2.size()))
def testStaticVocabularyTableAssetTracking(self, is_anonymous):
vocab_file = self._createVocabFile("vocab.txt")
vocab_size = 3
oov_buckets = 1
table = self.getVocabularyTable()(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
oov_buckets,
experimental_is_anonymous=is_anonymous)
objects = checkpoint_util.list_objects(graph_view.ObjectGraphView(table))
assets = list(filter(lambda obj: isinstance(obj, asset.Asset), objects))
self.assertLen(assets, 1)
self.assertEqual(
self.evaluate(assets[0].asset_path), compat.as_bytes(vocab_file))
def testSparseTensor(self, is_anonymous):
if is_anonymous and not tf2.enabled():
self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)
vocab_file = self._createVocabFile("feat_to_id_7.txt")
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "brain", "surgery", "tarkus"],
dtypes.string),
constant_op.constant(input_shape, dtypes.int64))
table = self.getVocabularyTable()(
lookup_ops.TextFileIdTableInitializer(vocab_file, vocab_size=3),
1,
experimental_is_anonymous=is_anonymous)
self.initialize_table(table)
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = self.evaluate(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testRaggedTensor(self, is_anonymous):
if is_anonymous and not tf2.enabled():
self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)
vocab_file = self._createVocabFile("feat_to_id_7.txt")
input_row_splits = [0, 2, 4, 5]
ragged_features = ragged_tensor.RaggedTensor.from_row_splits(
constant_op.constant(["brain", "salad", "brain", "surgery", "tarkus"],
dtypes.string),
constant_op.constant(input_row_splits, dtypes.int64))
table = self.getVocabularyTable()(
lookup_ops.TextFileIdTableInitializer(vocab_file, vocab_size=3),
1,
experimental_is_anonymous=is_anonymous)
self.initialize_table(table)
ragged_ids = table.lookup(ragged_features)
self.assertAllEqual([5], ragged_ids.values._shape_as_list())
ragged_ids_val, ragged_ids_row_splits = self.evaluate(
[ragged_ids.values, ragged_ids.row_splits])
self.assertAllEqual([0, 1, 0, 2, 3], ragged_ids_val)
self.assertAllEqual(input_row_splits, ragged_ids_row_splits)
def testInt32SparseTensor(self, is_anonymous):
if is_anonymous and not tf2.enabled():
self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32),
constant_op.constant(input_shape, dtypes.int64))
table = self.getVocabularyTable()(
lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),
dtypes.int64, dtypes.int64),
1,
lookup_key_dtype=dtypes.int32,
experimental_is_anonymous=is_anonymous)
self.initialize_table(table)
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = self.evaluate(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt32RaggedTensor(self, is_anonymous):
if is_anonymous and not tf2.enabled():
self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)
input_row_splits = [0, 2, 4, 5]
ragged_features = ragged_tensor.RaggedTensor.from_row_splits(
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32),
constant_op.constant(input_row_splits, dtypes.int64))
table = self.getVocabularyTable()(
lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),
dtypes.int64, dtypes.int64),
1,
lookup_key_dtype=dtypes.int32,
experimental_is_anonymous=is_anonymous)
self.initialize_table(table)
ragged_ids = table.lookup(ragged_features)
self.assertAllEqual([5], ragged_ids.values._shape_as_list())
ragged_ids_val, ragged_ids_row_splits = self.evaluate(
[ragged_ids.values, ragged_ids.row_splits])
self.assertAllEqual([0, 1, 0, 2, 3], ragged_ids_val)
self.assertAllEqual(input_row_splits, ragged_ids_row_splits)
def testInt64SparseTensor(self, is_anonymous):
if is_anonymous and not tf2.enabled():
self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64),
constant_op.constant(input_shape, dtypes.int64))
table = self.getVocabularyTable()(
lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),
dtypes.int64, dtypes.int64),
1,
experimental_is_anonymous=is_anonymous)
self.initialize_table(table)
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = self.evaluate(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt64RaggedTensor(self, is_anonymous):
if is_anonymous and not tf2.enabled():
self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)
input_row_splits = [0, 2, 4, 5]
ragged_features = ragged_tensor.RaggedTensor.from_row_splits(
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64),
constant_op.constant(input_row_splits, dtypes.int64))
table = self.getVocabularyTable()(
lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),
dtypes.int64, dtypes.int64),
1,
experimental_is_anonymous=is_anonymous)
self.initialize_table(table)
ragged_ids = table.lookup(ragged_features)
self.assertAllEqual([5], ragged_ids.values._shape_as_list())
ragged_ids_val, ragged_ids_row_splits = self.evaluate(
[ragged_ids.values, ragged_ids.row_splits])
self.assertAllEqual([0, 1, 0, 2, 3], ragged_ids_val)
self.assertAllEqual(input_row_splits, ragged_ids_row_splits)
def testStaticVocabularyTableNoInnerTable(self, is_anonymous):
table = self.getVocabularyTable()(
None, num_oov_buckets=1, experimental_is_anonymous=is_anonymous)
self.assertIsNone(table.resource_handle)
@test_util.run_v2_only
def testSavedModelSaveRestore(self, is_anonymous):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
root = autotrackable.AutoTrackable()
vocab_file = self._createVocabFile("feat_to_id_3.txt", ("11", "12", "13"))
vocab_size = 3
oov_buckets = 1
root.table = self.getVocabularyTable()(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
oov_buckets,
experimental_is_anonymous=is_anonymous)
@def_function.function(
input_signature=[tensor_spec.TensorSpec((), dtypes.int64)])
def lookup(key):
return root.table.lookup(key)
@def_function.function(input_signature=[])
def size():
return root.table.size()
@def_function.function(input_signature=[])
def is_ref_counting():
return test_ops.is_resource_handle_ref_counting(
root.table.resource_handle)
root.lookup = lookup
root.size = size
root.is_ref_counting = is_ref_counting
self.assertEqual(root.table.size(), 4)
self.assertEqual(root.lookup(12), 1)
self.assertEqual(root.lookup(10), 3)
self.assertEqual(root.is_ref_counting(), is_anonymous)
saved_model_save.save(root, save_path)
del root
loaded = saved_model_load.load(save_path)
self.assertEqual(loaded.size(), 4)
self.assertEqual(loaded.lookup(12), 1)
self.assertEqual(loaded.lookup(10), 3)
self.assertEqual(loaded.is_ref_counting(), is_anonymous)
@parameterized.named_parameters(
(f"_{is_anonymous}", is_anonymous) for is_anonymous in [False, True])
| StaticVocabularyTableTest |
python | keras-team__keras | keras/src/layers/reshaping/cropping3d_test.py | {
"start": 190,
"end": 7172
} | class ____(testing.TestCase):
@parameterized.product(
(
{"dim1_cropping": (1, 2), "dim1_expected": (1, 5)}, # both
{"dim1_cropping": (0, 2), "dim1_expected": (0, 5)}, # left only
{"dim1_cropping": (1, 0), "dim1_expected": (1, 7)}, # right only
),
(
{"dim2_cropping": (3, 4), "dim2_expected": (3, 5)}, # both
{"dim2_cropping": (0, 4), "dim2_expected": (0, 5)}, # left only
{"dim2_cropping": (3, 0), "dim2_expected": (3, 9)}, # right only
),
(
{"dim3_cropping": (5, 6), "dim3_expected": (5, 7)}, # both
{"dim3_cropping": (0, 6), "dim3_expected": (0, 7)}, # left only
{"dim3_cropping": (5, 0), "dim3_expected": (5, 13)}, # right only
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
@pytest.mark.requires_trainable_backend
def test_cropping_3d(
self,
dim1_cropping,
dim2_cropping,
dim3_cropping,
data_format,
dim1_expected,
dim2_expected,
dim3_expected,
):
if data_format == "channels_first":
inputs = np.random.rand(3, 5, 7, 9, 13)
expected_output = ops.convert_to_tensor(
inputs[
:,
:,
dim1_expected[0] : dim1_expected[1],
dim2_expected[0] : dim2_expected[1],
dim3_expected[0] : dim3_expected[1],
]
)
else:
inputs = np.random.rand(3, 7, 9, 13, 5)
expected_output = ops.convert_to_tensor(
inputs[
:,
dim1_expected[0] : dim1_expected[1],
dim2_expected[0] : dim2_expected[1],
dim3_expected[0] : dim3_expected[1],
:,
]
)
cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
self.run_layer_test(
layers.Cropping3D,
init_kwargs={"cropping": cropping, "data_format": data_format},
input_data=inputs,
expected_output=expected_output,
)
@parameterized.product(
(
# same cropping values with 3 tuples
{
"cropping": ((2, 2), (2, 2), (2, 2)),
"expected": ((2, 5), (2, 7), (2, 11)),
},
# same cropping values with 1 tuple
{"cropping": (2, 2, 2), "expected": ((2, 5), (2, 7), (2, 11))},
# same cropping values with an integer
{"cropping": 2, "expected": ((2, 5), (2, 7), (2, 11))},
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
@pytest.mark.requires_trainable_backend
def test_cropping_3d_with_same_cropping(
self, cropping, data_format, expected
):
if data_format == "channels_first":
inputs = np.random.rand(3, 5, 7, 9, 13)
expected_output = ops.convert_to_tensor(
inputs[
:,
:,
expected[0][0] : expected[0][1],
expected[1][0] : expected[1][1],
expected[2][0] : expected[2][1],
]
)
else:
inputs = np.random.rand(3, 7, 9, 13, 5)
expected_output = ops.convert_to_tensor(
inputs[
:,
expected[0][0] : expected[0][1],
expected[1][0] : expected[1][1],
expected[2][0] : expected[2][1],
:,
]
)
self.run_layer_test(
layers.Cropping3D,
init_kwargs={"cropping": cropping, "data_format": data_format},
input_data=inputs,
expected_output=expected_output,
)
def test_cropping_3d_with_dynamic_spatial_dim(self):
if backend.config.image_data_format() == "channels_last":
input_layer = layers.Input(batch_shape=(1, 7, None, 13, 5))
else:
input_layer = layers.Input(batch_shape=(1, 5, 7, None, 13))
cropped = layers.Cropping3D(((1, 2), (3, 4), (5, 6)))(input_layer)
if backend.config.image_data_format() == "channels_last":
self.assertEqual(cropped.shape, (1, 4, None, 2, 5))
else:
self.assertEqual(cropped.shape, (1, 5, 4, None, 2))
@parameterized.product(
(
{"cropping": ((3, 6), (0, 0), (0, 0))},
{"cropping": ((0, 0), (5, 8), (0, 0))},
{"cropping": ((0, 0), (0, 0), (7, 6))},
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_cropping_3d_errors_if_cropping_more_than_available(
self, cropping, data_format
):
input_layer = layers.Input(batch_shape=(3, 7, 9, 13, 5))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=cropping, data_format=data_format)(
input_layer
)
def test_cropping_3d_errors_if_cropping_argument_invalid(self):
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=(1,))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=(1, 2))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=(1, 2, 3, 4))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping="1")
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=((1, 2), (3, 4), (5, 6, 7)))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=((1, 2), (3, 4), (5, -6)))
with self.assertRaises(ValueError):
layers.Cropping3D(cropping=((1, 2), (3, 4), "5"))
@parameterized.product(
(
{"cropping": ((8, 1), (1, 1), (1, 1))},
{"cropping": ((1, 1), (10, 1), (1, 1))},
{"cropping": ((1, 1), (1, 1), (14, 1))},
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_cropping_3d_with_excessive_cropping(self, cropping, data_format):
if data_format == "channels_first":
shape = (3, 5, 7, 9, 13)
input_layer = layers.Input(batch_shape=shape)
else:
shape = (3, 7, 9, 13, 5)
input_layer = layers.Input(batch_shape=shape)
expected_error_msg = (
"Values in `cropping` argument should be smaller than the"
)
with self.assertRaisesRegex(ValueError, expected_error_msg):
layers.Cropping3D(cropping=cropping, data_format=data_format)(
input_layer
)
| Cropping3DTest |
python | ray-project__ray | python/ray/serve/config.py | {
"start": 24234,
"end": 27633
} | class ____(BaseModel):
"""HTTP options for the proxies. Supported fields:
- host: Host that the proxies listens for HTTP on. Defaults to
"127.0.0.1". To expose Serve publicly, you probably want to set
this to "0.0.0.0".
- port: Port that the proxies listen for HTTP on. Defaults to 8000.
- root_path: An optional root path to mount the serve application
(for example, "/prefix"). All deployment routes are prefixed
with this path.
- request_timeout_s: End-to-end timeout for HTTP requests.
- keep_alive_timeout_s: Duration to keep idle connections alive when no
requests are ongoing.
- ssl_keyfile: Path to the SSL key file for HTTPS. If provided with
ssl_certfile, the HTTP server will use HTTPS.
- ssl_certfile: Path to the SSL certificate file for HTTPS. If provided
with ssl_keyfile, the HTTP server will use HTTPS.
- ssl_keyfile_password: Optional password for the SSL key file.
- ssl_ca_certs: Optional path to CA certificate file for client certificate
verification.
- location: [DEPRECATED: use `proxy_location` field instead] The deployment
location of HTTP servers:
- "HeadOnly": start one HTTP server on the head node. Serve
assumes the head node is the node you executed serve.start
on. This is the default.
- "EveryNode": start one HTTP server per node.
- "NoServer": disable HTTP server.
- num_cpus: [DEPRECATED] The number of CPU cores to reserve for each
internal Serve HTTP proxy actor.
"""
host: Optional[str] = DEFAULT_HTTP_HOST
port: int = DEFAULT_HTTP_PORT
middlewares: List[Any] = []
location: Optional[DeploymentMode] = DeploymentMode.HeadOnly
num_cpus: int = 0
root_url: str = ""
root_path: str = ""
request_timeout_s: Optional[float] = None
keep_alive_timeout_s: int = DEFAULT_UVICORN_KEEP_ALIVE_TIMEOUT_S
ssl_keyfile: Optional[str] = None
ssl_certfile: Optional[str] = None
ssl_keyfile_password: Optional[str] = None
ssl_ca_certs: Optional[str] = None
@validator("location", always=True)
def location_backfill_no_server(cls, v, values):
if values["host"] is None or v is None:
return DeploymentMode.NoServer
return v
@validator("ssl_certfile")
def validate_ssl_certfile(cls, v, values):
ssl_keyfile = values.get("ssl_keyfile")
validate_ssl_config(v, ssl_keyfile)
return v
@validator("middlewares", always=True)
def warn_for_middlewares(cls, v, values):
if v:
warnings.warn(
"Passing `middlewares` to HTTPOptions is deprecated and will be "
"removed in a future version. Consider using the FastAPI integration "
"to configure middlewares on your deployments: "
"https://docs.ray.io/en/latest/serve/http-guide.html#fastapi-http-deployments" # noqa 501
)
return v
@validator("num_cpus", always=True)
def warn_for_num_cpus(cls, v, values):
if v:
warnings.warn(
"Passing `num_cpus` to HTTPOptions is deprecated and will be "
"removed in a future version."
)
return v
class Config:
validate_assignment = True
arbitrary_types_allowed = True
@PublicAPI(stability="alpha")
| HTTPOptions |
python | sqlalchemy__sqlalchemy | test/sql/test_compare.py | {
"start": 78108,
"end": 78713
} | class ____(fixtures.TestBase):
@testing.combinations(
(select(column("a")),),
(table("q", column("a")).insert(),),
(table("q", column("a")).update(),),
(table("q", column("a")).delete(),),
(lambda_stmt(lambda: select(column("a"))),),
)
def test_is_select(self, case):
if isinstance(case, LambdaElement):
resolved_case = case._resolved
else:
resolved_case = case
if isinstance(resolved_case, Select):
is_true(case.is_select)
else:
is_false(case.is_select)
| ExecutableFlagsTest |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/metrics_test.py | {
"start": 112589,
"end": 118584
} | class ____(test.TestCase):
def setUp(self):
self._predictions = (((0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9),
(0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6)),
((0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6),
(0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9)))
self._predictions_idx = (((9, 4, 6, 2, 0), (5, 7, 2, 9, 6)),
((5, 7, 2, 9, 6), (9, 4, 6, 2, 0)))
# Note: We don't test dense labels here, since examples have different
# numbers of labels.
self._labels = _binary_3d_label_to_sparse_value(((
(0, 0, 1, 0, 0, 0, 0, 1, 1, 0), (0, 1, 1, 0, 0, 1, 0, 0, 0, 0)), (
(0, 1, 1, 0, 0, 1, 0, 1, 0, 0), (0, 0, 1, 0, 0, 0, 0, 0, 1, 0))))
self._test_recall_at_k = functools.partial(
_test_recall_at_k, test_case=self)
self._test_recall_at_top_k = functools.partial(
_test_recall_at_top_k, test_case=self)
@test_util.run_deprecated_v1
def test_3d_nan(self):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN, class_id=class_id)
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=NAN,
class_id=class_id)
@test_util.run_deprecated_v1
def test_3d_no_predictions(self):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=0.0, class_id=class_id)
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=0.0,
class_id=class_id)
@test_util.run_deprecated_v1
def test_3d(self):
# Class 2: 4 labels, all correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=4.0 / 4, class_id=2)
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=4.0 / 4,
class_id=2)
# Class 5: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=2.0 / 2, class_id=5)
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=2.0 / 2,
class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=1.0 / 2, class_id=7)
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=1.0 / 2,
class_id=7)
# All classes: 12 labels, 7 correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=7.0 / 12)
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=7.0 / 12)
@test_util.run_deprecated_v1
def test_3d_ignore_all(self):
for class_id in range(10):
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN, class_id=class_id,
weights=[[0], [0]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=NAN,
class_id=class_id, weights=[[0], [0]])
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN, class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=NAN,
class_id=class_id, weights=[[0, 0], [0, 0]])
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=NAN,
weights=[[0], [0]])
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN,
weights=[[0, 0], [0, 0]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=NAN,
weights=[[0, 0], [0, 0]])
@test_util.run_deprecated_v1
def test_3d_ignore_some(self):
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=2.0 / 2.0, class_id=2,
weights=[[1], [0]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=2.0 / 2.0,
class_id=2, weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=2.0 / 2.0, class_id=2,
weights=[[0], [1]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=2.0 / 2.0,
class_id=2, weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=1.0 / 1.0, class_id=7,
weights=[[0], [1]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=1.0 / 1.0,
class_id=7, weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=0.0 / 1.0, class_id=7,
weights=[[1], [0]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=0.0 / 1.0,
class_id=7, weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=1.0 / 2.0, class_id=7,
weights=[[1, 0], [1, 0]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=1.0 / 2.0,
class_id=7, weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN, class_id=7,
weights=[[0, 1], [0, 1]])
self._test_recall_at_top_k(
self._predictions_idx, self._labels, k=5, expected=NAN, class_id=7,
weights=[[0, 1], [0, 1]])
| MultiLabel3dRecallAtKTest |
python | kamyu104__LeetCode-Solutions | Python/maximum-font-to-fit-a-sentence-in-a-screen.py | {
"start": 403,
"end": 1216
} | class ____(object):
def maxFont(self, text, w, h, fonts, fontInfo):
"""
:type text: str
:type w: int
:type h: int
:type fonts: List[int]
:type fontInfo: FontInfo
:rtype: int
"""
def check(count, w, h, fonts, fontInfo, x): # Time: O(1)
return (fontInfo.getHeight(fonts[x]) <= h and
sum(cnt * fontInfo.getWidth(fonts[x], c) for c, cnt in count.iteritems()) <= w)
count = collections.Counter(text)
left, right = 0, len(fonts)-1
while left <= right:
mid = left + (right-left)//2
if not check(count, w, h, fonts, fontInfo, mid):
right = mid-1
else:
left = mid+1
return fonts[right] if right >= 0 else -1
| Solution |
python | pola-rs__polars | py-polars/src/polars/_typing.py | {
"start": 10105,
"end": 13856
} | class ____(BasicCursor):
def fetchall(self, *args: Any, **kwargs: Any) -> Any:
"""Fetch all results."""
def fetchmany(self, *args: Any, **kwargs: Any) -> Any:
"""Fetch results in batches."""
AlchemyConnection: TypeAlias = Union["Connection", "Engine", "Session"]
AlchemyAsyncConnection: TypeAlias = Union[
"AsyncConnection", "AsyncEngine", "AsyncSession"
]
ConnectionOrCursor: TypeAlias = Union[
BasicConnection, BasicCursor, Cursor, AlchemyConnection, AlchemyAsyncConnection
]
# Annotations for `__getitem__` methods
SingleIndexSelector: TypeAlias = int
MultiIndexSelector: TypeAlias = Union[
slice,
range,
Sequence[int],
"Series",
"np.ndarray[Any, Any]",
]
SingleNameSelector: TypeAlias = str
MultiNameSelector: TypeAlias = Union[
slice,
Sequence[str],
"Series",
"np.ndarray[Any, Any]",
]
BooleanMask: TypeAlias = Union[
Sequence[bool],
"Series",
"np.ndarray[Any, Any]",
]
SingleColSelector: TypeAlias = Union[SingleIndexSelector, SingleNameSelector]
MultiColSelector: TypeAlias = Union[MultiIndexSelector, MultiNameSelector, BooleanMask]
# LazyFrame engine selection
EngineType: TypeAlias = Union[
Literal["auto", "in-memory", "streaming", "gpu"], "GPUEngine"
]
PlanStage: TypeAlias = Literal["ir", "physical"]
FileSource: TypeAlias = Union[
str,
Path,
IO[bytes],
bytes,
list[str],
list[Path],
list[IO[bytes]],
list[bytes],
]
JSONEncoder = Union[Callable[[Any], bytes], Callable[[Any], str]]
DeprecationType: TypeAlias = Literal[
"function",
"renamed_parameter",
"streaming_parameter",
"nonkeyword_arguments",
"parameter_as_multi_positional",
]
__all__ = [
"Ambiguous",
"ArrowArrayExportable",
"ArrowStreamExportable",
"AsofJoinStrategy",
"AvroCompression",
"BooleanMask",
"BufferInfo",
"CategoricalOrdering",
"ClosedInterval",
"ColumnFormatDict",
"ColumnNameOrSelector",
"ColumnTotalsDefinition",
"ColumnWidthsDefinition",
"ComparisonOperator",
"ConcatMethod",
"ConditionalFormatDict",
"ConnectionOrCursor",
"CorrelationMethod",
"CsvEncoding",
"CsvQuoteStyle",
"Cursor",
"DbReadEngine",
"DbWriteEngine",
"DbWriteMode",
"DeprecationType",
"Endianness",
"EngineType",
"EpochTimeUnit",
"ExcelSpreadsheetEngine",
"ExplainFormat",
"FileSource",
"FillNullStrategy",
"FloatFmt",
"FrameInitTypes",
"FrameType",
"IndexOrder",
"InterpolationMethod",
"IntoExpr",
"IntoExprColumn",
"IpcCompression",
"JSONEncoder",
"JaxExportType",
"JoinStrategy",
"JoinValidation",
"Label",
"ListToStructWidthStrategy",
"MaintainOrderJoin",
"MapElementsStrategy",
"MultiColSelector",
"MultiIndexSelector",
"MultiNameSelector",
"NonExistent",
"NonNestedLiteral",
"NullBehavior",
"NumericLiteral",
"OneOrMoreDataTypes",
"Orientation",
"ParallelStrategy",
"ParametricProfileNames",
"ParquetCompression",
"PivotAgg",
"PolarsDataType",
"PolarsIntegerType",
"PolarsTemporalType",
"PolarsType",
"PythonDataType",
"PythonLiteral",
"QuantileMethod",
"RankMethod",
"Roll",
"RowTotalsDefinition",
"SchemaDefinition",
"SchemaDict",
"SearchSortedSide",
"SelectorType",
"SerializationFormat",
"SeriesBuffers",
"SingleColSelector",
"SingleIndexSelector",
"SingleNameSelector",
"SizeUnit",
"StartBy",
"SyncOnCloseMethod",
"TemporalLiteral",
"TimeUnit",
"TorchExportType",
"TransferEncoding",
"UnicodeForm",
"UniqueKeepStrategy",
"UnstackDirection",
"WindowMappingStrategy",
]
| Cursor |
python | PyCQA__pylint | tests/functional/ext/docparams/parameter/missing_param_doc_required_no_doc_rgx_test_all.py | {
"start": 732,
"end": 867
} | class ____:
def __init__(self, my_param: int) -> None: # [missing-param-doc]
"""
My init docstring
"""
| MyClass |
python | ray-project__ray | python/ray/dashboard/modules/reporter/tests/test_gpu_providers.py | {
"start": 763,
"end": 3232
} | class ____(unittest.TestCase):
"""Test GpuUtilizationInfo TypedDict."""
def test_creation_with_processes(self):
"""Test GpuUtilizationInfo with process information."""
process1 = ProcessGPUInfo(pid=1234, gpu_memory_usage=256, gpu_utilization=None)
process2 = ProcessGPUInfo(pid=5678, gpu_memory_usage=512, gpu_utilization=None)
gpu_info = GpuUtilizationInfo(
index=0,
name="NVIDIA GeForce RTX 3080",
uuid="GPU-12345678-1234-1234-1234-123456789abc",
utilization_gpu=75,
memory_used=8192,
memory_total=10240,
processes_pids={1234: process1, 5678: process2},
)
self.assertEqual(gpu_info["index"], 0)
self.assertEqual(gpu_info["name"], "NVIDIA GeForce RTX 3080")
self.assertEqual(gpu_info["uuid"], "GPU-12345678-1234-1234-1234-123456789abc")
self.assertEqual(gpu_info["utilization_gpu"], 75)
self.assertEqual(gpu_info["memory_used"], 8192)
self.assertEqual(gpu_info["memory_total"], 10240)
self.assertEqual(len(gpu_info["processes_pids"]), 2)
self.assertIn(1234, gpu_info["processes_pids"])
self.assertIn(5678, gpu_info["processes_pids"])
self.assertEqual(gpu_info["processes_pids"][1234]["pid"], 1234)
self.assertEqual(gpu_info["processes_pids"][1234]["gpu_memory_usage"], 256)
self.assertEqual(gpu_info["processes_pids"][5678]["pid"], 5678)
self.assertEqual(gpu_info["processes_pids"][5678]["gpu_memory_usage"], 512)
def test_creation_without_processes(self):
"""Test GpuUtilizationInfo without process information."""
gpu_info = GpuUtilizationInfo(
index=1,
name="AMD Radeon RX 6800 XT",
uuid="GPU-87654321-4321-4321-4321-ba9876543210",
utilization_gpu=None,
memory_used=4096,
memory_total=16384,
processes_pids=None,
)
self.assertEqual(gpu_info["index"], 1)
self.assertEqual(gpu_info["name"], "AMD Radeon RX 6800 XT")
self.assertEqual(gpu_info["uuid"], "GPU-87654321-4321-4321-4321-ba9876543210")
self.assertIsNone(gpu_info["utilization_gpu"]) # Should be None, not -1
self.assertEqual(gpu_info["memory_used"], 4096)
self.assertEqual(gpu_info["memory_total"], 16384)
self.assertIsNone(gpu_info["processes_pids"]) # Should be None, not []
| TestGpuUtilizationInfo |
python | astropy__astropy | astropy/utils/metadata/exceptions.py | {
"start": 265,
"end": 318
} | class ____(AstropyWarning):
pass
| MergeConflictWarning |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-okta/source_okta/config_migration.py | {
"start": 419,
"end": 3109
} | class ____:
"""
This class stands for migrating the config at runtime,
while providing the backward compatibility when falling back to the previous source version.
"""
message_repository: MessageRepository = InMemoryMessageRepository()
@classmethod
def should_migrate(cls, config: Mapping[str, Any]) -> bool:
"""
Uses the presence of the `domain` field to know if the config should be migrated.
Returns:
> True, if the transformation is necessary
> False, otherwise.
> Raises the Exception if the structure could not be migrated.
"""
return "domain" not in config
@classmethod
def modify(cls, config: Mapping[str, Any]) -> Mapping[str, Any]:
config["domain"] = config["base_url"].split("https://")[1].split(".")[0]
if "credentials" not in config:
if "token" in config:
config["credentials"] = {
"auth_type": "api_token",
"api_token": config["token"],
}
else:
raise ValueError(f"Invalid config. got {config}")
return config
@classmethod
def modify_and_save(cls, config_path: str, source: Source, config: Mapping[str, Any]) -> Mapping[str, Any]:
# modify the config
migrated_config = cls.modify(config)
# save the config
source.write_config(migrated_config, config_path)
# return modified config
return migrated_config
@classmethod
def emit_control_message(cls, migrated_config: Mapping[str, Any]) -> None:
# add the Airbyte Control Message to message repo
cls.message_repository.emit_message(create_connector_config_control_message(migrated_config))
# emit the Airbyte Control Message from message queue to stdout
for message in cls.message_repository._message_queue:
print(message.json(exclude_unset=True))
@classmethod
def migrate(cls, args: List[str], source: Source) -> None:
"""
This method checks the input args, should the config be migrated,
transform if necessary and emit the CONTROL message.
"""
# get config path
config_path = AirbyteEntrypoint(source).extract_config(args)
# proceed only if `--config` arg is provided
if config_path:
# read the existing config
config = source.read_config(config_path)
# migration check
if cls.should_migrate(config):
cls.emit_control_message(
cls.modify_and_save(config_path, source, config),
)
| OktaConfigMigration |
python | apache__airflow | providers/alibaba/src/airflow/providers/alibaba/cloud/sensors/analyticdb_spark.py | {
"start": 1168,
"end": 2325
} | class ____(BaseSensorOperator):
"""
Monitor a AnalyticDB Spark session for termination.
:param app_id: identifier of the monitored app depends on the option that's being modified.
:param adb_spark_conn_id: reference to a pre-defined ADB Spark connection.
:param region: AnalyticDB MySQL region you want to submit spark application.
"""
template_fields: Sequence[str] = ("app_id",)
def __init__(
self,
*,
app_id: str,
adb_spark_conn_id: str = "adb_spark_default",
region: str | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.app_id = app_id
self._region = region
self._adb_spark_conn_id = adb_spark_conn_id
@cached_property
def hook(self) -> AnalyticDBSparkHook:
"""Get valid hook."""
return AnalyticDBSparkHook(adb_spark_conn_id=self._adb_spark_conn_id, region=self._region)
def poke(self, context: Context) -> bool:
app_id = self.app_id
state = self.hook.get_spark_state(app_id)
return AppState(state) in AnalyticDBSparkHook.TERMINAL_STATES
| AnalyticDBSparkSensor |
python | huggingface__transformers | src/transformers/models/llava_next/modeling_llava_next.py | {
"start": 6978,
"end": 8551
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size (batch_size * num_patches, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
# Copied from transformers.models.llava.modeling_llava.LlavaMultiModalProjector with Llava->LlavaNext
| LlavaNextCausalLMOutputWithPast |
python | neetcode-gh__leetcode | python/0740-delete-and-earn.py | {
"start": 68,
"end": 506
} | class ____(object):
def deleteAndEarn(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
upperLimit = max(nums) + 1
store = [0] * upperLimit
for num in nums:
store[num] += num
dp = [0] * upperLimit
dp[1] = 1 * store[1]
for i in range(2, upperLimit):
dp[i] = max(dp[i - 2] + store[i], dp[i - 1])
return dp[-1] | Solution |
python | getsentry__sentry | src/sentry/tasks/summaries/metrics.py | {
"start": 768,
"end": 1230
} | class ____(EventLifecycleMetric):
operation_type: WeeklyReportOperationType
dry_run: bool
def get_metric_key(self, outcome: EventLifecycleOutcome) -> str:
tokens = ("weekly_report", self.operation_type, str(outcome))
return ".".join(tokens)
def get_metric_tags(self) -> Mapping[str, str]:
return {
"operation_type": self.operation_type,
"dry_run": str(self.dry_run).lower(),
}
| WeeklyReportSLO |
python | huggingface__transformers | src/transformers/models/metaclip_2/modular_metaclip_2.py | {
"start": 33176,
"end": 33576
} | class ____(CLIPForImageClassification):
pass
__all__ = [
"MetaClip2Config",
"MetaClip2TextConfig",
"MetaClip2VisionConfig",
"MetaClip2Model",
"MetaClip2PreTrainedModel",
"MetaClip2TextModel",
"MetaClip2TextModelWithProjection",
"MetaClip2VisionModel",
"MetaClip2VisionModelWithProjection",
"MetaClip2ForImageClassification",
]
| MetaClip2ForImageClassification |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pycodestyle/E21.py | {
"start": 195,
"end": 620
} | class ____ (Bar, Baz):
pass
def fetch_name () -> Union[str, None]:
"""Fetch name from --person-name in sys.argv.
Returns:
name of the person if available, otherwise None
"""
test = len(5)
Logger.info(test)
# test commented code
# Logger.info("test code")
for i in range (0, len (sys.argv)) :
if sys.argv[i] == "--name" :
return sys.argv[i + 1]
return None
| Foo |
python | getsentry__sentry | src/sentry/codecov/endpoints/sync_repos/sync_repos.py | {
"start": 1109,
"end": 3349
} | class ____(CodecovEndpoint):
owner = ApiOwner.CODECOV
publish_status = {
"POST": ApiPublishStatus.PUBLIC,
"GET": ApiPublishStatus.PUBLIC,
}
permission_classes = (SyncReposPermission,)
@extend_schema(
operation_id="Syncs repositories from an integrated org with GitHub",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
PreventParams.OWNER,
],
request=None,
responses={
200: SyncReposSerializer,
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def post(self, request: Request, owner: RpcIntegration, **kwargs) -> Response:
"""
Syncs repositories for an integrated organization with GitHub.
"""
owner_slug = owner.name
variables: dict[str, Any] = {}
client = CodecovApiClient(git_provider_org=owner_slug)
graphql_response = client.query(query=mutation, variables=variables)
serializer = SyncReposSerializer(context={"http_method": request.method})
is_syncing = serializer.to_representation(graphql_response.json())
return Response(is_syncing)
@extend_schema(
operation_id="Gets syncing status for repositories for an integrated org",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
PreventParams.OWNER,
],
request=None,
responses={
200: SyncReposSerializer,
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def get(self, request: Request, owner: RpcIntegration, **kwargs) -> Response:
"""
Gets syncing status for repositories for an integrated organization.
"""
owner_slug = owner.name
variables: dict[str, Any] = {}
client = CodecovApiClient(git_provider_org=owner_slug)
graphql_response = client.query(query=query, variables=variables)
serializer = SyncReposSerializer(context={"http_method": request.method})
is_syncing = serializer.to_representation(graphql_response.json())
return Response(is_syncing)
| SyncReposEndpoint |
python | pytorch__pytorch | torch/_numpy/_dtypes.py | {
"start": 2216,
"end": 2315
} | class ____(floating):
name = "float16"
typecode = "e"
torch_dtype = torch.float16
| float16 |
python | optuna__optuna | optuna/testing/pruners.py | {
"start": 52,
"end": 318
} | class ____(optuna.pruners.BasePruner):
def __init__(self, is_pruning: bool) -> None:
self.is_pruning = is_pruning
def prune(self, study: "optuna.study.Study", trial: "optuna.trial.FrozenTrial") -> bool:
return self.is_pruning
| DeterministicPruner |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-seller-partner/unit_tests/integration/test_vendor_orders_status.py | {
"start": 2075,
"end": 6189
} | class ____:
@staticmethod
def _read(config_: ConfigBuilder, expecting_exception: bool = False) -> EntrypointOutput:
return read_output(
config_builder=config_,
stream_name=_STREAM_NAME,
sync_mode=SyncMode.full_refresh,
expecting_exception=expecting_exception,
)
@HttpMocker()
def test_given_one_page_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
mock_auth(http_mocker)
http_mocker.get(
_vendor_orders_status_request().build(), _vendor_orders_status_response().with_record(_order_status_record()).build()
)
output = self._read(config().with_start_date(_START_DATE).with_end_date(_END_DATE))
assert len(output.records) == 1
@HttpMocker()
def test_given_two_pages_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
mock_auth(http_mocker)
http_mocker.get(
_vendor_orders_status_request().build(),
_vendor_orders_status_response().with_pagination().with_record(_order_status_record()).build(),
)
query_params_with_next_page_token = {
_REPLICATION_START_FIELD: _START_DATE.strftime(TIME_FORMAT),
_REPLICATION_END_FIELD: _END_DATE.strftime(TIME_FORMAT),
"nextToken": NEXT_TOKEN_STRING,
}
http_mocker.get(
_vendor_orders_status_request().with_query_params(query_params_with_next_page_token).build(),
_vendor_orders_status_response().with_record(_order_status_record()).with_record(_order_status_record()).build(),
)
output = self._read(config().with_start_date(_START_DATE).with_end_date(_END_DATE))
assert len(output.records) == 3
@HttpMocker()
def test_given_two_slices_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
end_date = _START_DATE.add(days=8)
mock_auth(http_mocker)
query_params_first_slice = {
_REPLICATION_START_FIELD: _START_DATE.strftime(TIME_FORMAT),
_REPLICATION_END_FIELD: _START_DATE.add(days=7).strftime(TIME_FORMAT),
}
http_mocker.get(
_vendor_orders_status_request().with_query_params(query_params_first_slice).build(),
_vendor_orders_status_response().with_record(_order_status_record()).build(),
)
query_params_second_slice = {
_REPLICATION_START_FIELD: query_params_first_slice[_REPLICATION_END_FIELD],
_REPLICATION_END_FIELD: end_date.strftime(TIME_FORMAT),
}
http_mocker.get(
_vendor_orders_status_request().with_query_params(query_params_second_slice).build(),
_vendor_orders_status_response().with_record(_order_status_record()).build(),
)
output = self._read(config().with_start_date(_START_DATE).with_end_date(end_date))
assert len(output.records) == 2
@HttpMocker()
def test_given_http_status_500_then_200_when_read_then_retry_and_return_records(self, http_mocker: HttpMocker) -> None:
mock_auth(http_mocker)
http_mocker.get(
_vendor_orders_status_request().build(),
[
response_with_status(status_code=HTTPStatus.INTERNAL_SERVER_ERROR),
_vendor_orders_status_response().with_record(_order_status_record()).build(),
],
)
output = self._read(config().with_start_date(_START_DATE).with_end_date(_END_DATE))
assert len(output.records) == 1
@HttpMocker()
def test_given_http_status_500_on_availability_when_read_then_raise_system_error(self, http_mocker: HttpMocker) -> None:
mock_auth(http_mocker)
http_mocker.get(_vendor_orders_status_request().build(), response_with_status(status_code=HTTPStatus.INTERNAL_SERVER_ERROR))
output = self._read(config().with_start_date(_START_DATE).with_end_date(_END_DATE), expecting_exception=True)
assert output.errors[-1].trace.error.failure_type == FailureType.config_error
@freezegun.freeze_time(NOW.isoformat())
| TestFullRefresh |
python | modin-project__modin | modin/core/execution/unidist/generic/partitioning/partition_manager.py | {
"start": 1053,
"end": 2293
} | class ____(PandasDataframePartitionManager):
"""The class implements the interface in `PandasDataframePartitionManager`."""
@classmethod
def to_numpy(cls, partitions, **kwargs):
"""
Convert `partitions` into a NumPy array.
Parameters
----------
partitions : NumPy array
A 2-D array of partitions to convert to local NumPy array.
**kwargs : dict
Keyword arguments to pass to each partition ``.to_numpy()`` call.
Returns
-------
NumPy array
"""
if partitions.shape[1] == 1:
parts = cls.get_objects_from_partitions(partitions.flatten())
parts = [part.to_numpy(**kwargs) for part in parts]
else:
parts = UnidistWrapper.materialize(
[
obj.apply(
lambda df, **kwargs: df.to_numpy(**kwargs)
).list_of_blocks[0]
for row in partitions
for obj in row
]
)
rows, cols = partitions.shape
parts = [parts[i * cols : (i + 1) * cols] for i in range(rows)]
return np.block(parts)
| GenericUnidistDataframePartitionManager |
python | huggingface__transformers | src/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py | {
"start": 27432,
"end": 31871
} | class ____(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
and "Generating Long Sequences with Sparse Transformers".
"""
def __init__(self, config: Qwen2_5_VLTextConfig, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(
f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
"to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.is_causal = True
self.attention_dropout = config.attention_dropout
self.rope_parameters = config.rope_parameters
self.scaling = self.head_dim**-0.5
if (self.head_dim * self.num_heads) != self.hidden_size:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {self.num_heads})."
)
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None
self.sliding_window = config.sliding_window if self.layer_type == "sliding_attention" else None
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_multimodal_rotary_pos_emb(
query_states, key_states, cos, sin, self.config.rope_parameters["mrope_section"]
)
if past_key_values is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=self.sliding_window,
position_ids=position_ids, # pass positions for FA2
**kwargs,
)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Qwen2_5_VLAttention |
python | streamlit__streamlit | lib/streamlit/elements/lib/column_config_utils.py | {
"start": 1692,
"end": 16754
} | class ____(str, Enum):
INTEGER = "integer"
FLOAT = "float"
DATE = "date"
TIME = "time"
DATETIME = "datetime"
BOOLEAN = "boolean"
STRING = "string"
TIMEDELTA = "timedelta"
PERIOD = "period"
INTERVAL = "interval"
BYTES = "bytes"
DECIMAL = "decimal"
COMPLEX = "complex"
LIST = "list"
DICT = "dict"
EMPTY = "empty"
UNKNOWN = "unknown"
# The dataframe schema is a mapping from the name of the column
# in the underlying dataframe to the column data kind.
# The index column uses `_index` as name.
DataframeSchema: TypeAlias = dict[str, ColumnDataKind]
# This mapping contains all editable column types mapped to the data kinds
# that the column type is compatible for editing.
_EDITING_COMPATIBILITY_MAPPING: Final[dict[ColumnType, list[ColumnDataKind]]] = {
"text": [ColumnDataKind.STRING, ColumnDataKind.EMPTY],
"number": [
ColumnDataKind.INTEGER,
ColumnDataKind.FLOAT,
ColumnDataKind.DECIMAL,
ColumnDataKind.STRING,
ColumnDataKind.TIMEDELTA,
ColumnDataKind.EMPTY,
],
"checkbox": [
ColumnDataKind.BOOLEAN,
ColumnDataKind.STRING,
ColumnDataKind.INTEGER,
ColumnDataKind.EMPTY,
],
"selectbox": [
ColumnDataKind.STRING,
ColumnDataKind.BOOLEAN,
ColumnDataKind.INTEGER,
ColumnDataKind.FLOAT,
ColumnDataKind.EMPTY,
],
"date": [ColumnDataKind.DATE, ColumnDataKind.DATETIME, ColumnDataKind.EMPTY],
"time": [ColumnDataKind.TIME, ColumnDataKind.DATETIME, ColumnDataKind.EMPTY],
"datetime": [
ColumnDataKind.DATETIME,
ColumnDataKind.DATE,
ColumnDataKind.TIME,
ColumnDataKind.EMPTY,
],
"link": [ColumnDataKind.STRING, ColumnDataKind.EMPTY],
"list": [
ColumnDataKind.LIST,
ColumnDataKind.STRING,
ColumnDataKind.EMPTY,
],
"multiselect": [
ColumnDataKind.LIST,
ColumnDataKind.STRING,
ColumnDataKind.EMPTY,
],
}
def is_type_compatible(column_type: ColumnType, data_kind: ColumnDataKind) -> bool:
"""Check if the column type is compatible with the underlying data kind.
This check only applies to editable column types (e.g. number or text).
Non-editable column types (e.g. bar_chart or image) can be configured for
all data kinds (this might change in the future).
Parameters
----------
column_type : ColumnType
The column type to check.
data_kind : ColumnDataKind
The data kind to check.
Returns
-------
bool
True if the column type is compatible with the data kind, False otherwise.
"""
if column_type not in _EDITING_COMPATIBILITY_MAPPING:
return True
return data_kind in _EDITING_COMPATIBILITY_MAPPING[column_type]
def _determine_data_kind_via_arrow(field: pa.Field) -> ColumnDataKind:
"""Determine the data kind via the arrow type information.
The column data kind refers to the shared data type of the values
in the column (e.g. int, float, str, bool).
Parameters
----------
field : pa.Field
The arrow field from the arrow table schema.
Returns
-------
ColumnDataKind
The data kind of the field.
"""
import pyarrow as pa
field_type = field.type
if pa.types.is_integer(field_type):
return ColumnDataKind.INTEGER
if pa.types.is_floating(field_type):
return ColumnDataKind.FLOAT
if pa.types.is_boolean(field_type):
return ColumnDataKind.BOOLEAN
if pa.types.is_string(field_type):
return ColumnDataKind.STRING
if pa.types.is_date(field_type):
return ColumnDataKind.DATE
if pa.types.is_time(field_type):
return ColumnDataKind.TIME
if pa.types.is_timestamp(field_type):
return ColumnDataKind.DATETIME
if pa.types.is_duration(field_type):
return ColumnDataKind.TIMEDELTA
if pa.types.is_list(field_type):
return ColumnDataKind.LIST
if pa.types.is_decimal(field_type):
return ColumnDataKind.DECIMAL
if pa.types.is_null(field_type):
return ColumnDataKind.EMPTY
# Interval does not seem to work correctly:
# if pa.types.is_interval(field_type):
# return ColumnDataKind.INTERVAL # noqa: ERA001
if pa.types.is_binary(field_type):
return ColumnDataKind.BYTES
if pa.types.is_struct(field_type):
return ColumnDataKind.DICT
return ColumnDataKind.UNKNOWN
def _determine_data_kind_via_pandas_dtype(
column: Series[Any] | Index[Any],
) -> ColumnDataKind:
"""Determine the data kind by using the pandas dtype.
The column data kind refers to the shared data type of the values
in the column (e.g. int, float, str, bool).
Parameters
----------
column : pd.Series, pd.Index
The column for which the data kind should be determined.
Returns
-------
ColumnDataKind
The data kind of the column.
"""
import pandas as pd
column_dtype = column.dtype
if pd.api.types.is_bool_dtype(column_dtype):
return ColumnDataKind.BOOLEAN
if pd.api.types.is_integer_dtype(column_dtype):
return ColumnDataKind.INTEGER
if pd.api.types.is_float_dtype(column_dtype):
return ColumnDataKind.FLOAT
if pd.api.types.is_datetime64_any_dtype(column_dtype):
return ColumnDataKind.DATETIME
if pd.api.types.is_timedelta64_dtype(column_dtype):
return ColumnDataKind.TIMEDELTA
if isinstance(column_dtype, pd.PeriodDtype):
return ColumnDataKind.PERIOD
if isinstance(column_dtype, pd.IntervalDtype):
return ColumnDataKind.INTERVAL
if pd.api.types.is_complex_dtype(column_dtype):
return ColumnDataKind.COMPLEX
if pd.api.types.is_object_dtype(
column_dtype
) is False and pd.api.types.is_string_dtype(column_dtype):
# The is_string_dtype
return ColumnDataKind.STRING
return ColumnDataKind.UNKNOWN
def _determine_data_kind_via_inferred_type(
column: Series[Any] | Index[Any],
) -> ColumnDataKind:
"""Determine the data kind by inferring it from the underlying data.
The column data kind refers to the shared data type of the values
in the column (e.g. int, float, str, bool).
Parameters
----------
column : pd.Series, pd.Index
The column to determine the data kind for.
Returns
-------
ColumnDataKind
The data kind of the column.
"""
from pandas.api.types import infer_dtype
inferred_type = infer_dtype(column)
if inferred_type == "string":
return ColumnDataKind.STRING
if inferred_type == "bytes":
return ColumnDataKind.BYTES
if inferred_type in ["floating", "mixed-integer-float"]:
return ColumnDataKind.FLOAT
if inferred_type == "integer":
return ColumnDataKind.INTEGER
if inferred_type == "decimal":
return ColumnDataKind.DECIMAL
if inferred_type == "complex":
return ColumnDataKind.COMPLEX
if inferred_type == "boolean":
return ColumnDataKind.BOOLEAN
if inferred_type in ["datetime64", "datetime"]:
return ColumnDataKind.DATETIME
if inferred_type == "date":
return ColumnDataKind.DATE
if inferred_type in ["timedelta64", "timedelta"]:
return ColumnDataKind.TIMEDELTA
if inferred_type == "time":
return ColumnDataKind.TIME
if inferred_type == "period":
return ColumnDataKind.PERIOD
if inferred_type == "interval":
return ColumnDataKind.INTERVAL
if inferred_type == "empty":
return ColumnDataKind.EMPTY
# Unused types: mixed, unknown-array, categorical, mixed-integer
return ColumnDataKind.UNKNOWN
def _determine_data_kind(
column: Series[Any] | Index[Any], field: pa.Field | None = None
) -> ColumnDataKind:
"""Determine the data kind of a column.
The column data kind refers to the shared data type of the values
in the column (e.g. int, float, str, bool).
Parameters
----------
column : pd.Series, pd.Index
The column to determine the data kind for.
field : pa.Field, optional
The arrow field from the arrow table schema.
Returns
-------
ColumnDataKind
The data kind of the column.
"""
import pandas as pd
if isinstance(column.dtype, pd.CategoricalDtype):
# Categorical columns can have different underlying data kinds
# depending on the categories.
return _determine_data_kind_via_inferred_type(column.dtype.categories)
if field is not None:
data_kind = _determine_data_kind_via_arrow(field)
if data_kind != ColumnDataKind.UNKNOWN:
return data_kind
if column.dtype.name == "object":
# If dtype is object, we need to infer the type from the column
return _determine_data_kind_via_inferred_type(column)
return _determine_data_kind_via_pandas_dtype(column)
def determine_dataframe_schema(
data_df: DataFrame, arrow_schema: pa.Schema
) -> DataframeSchema:
"""Determine the schema of a dataframe.
Parameters
----------
data_df : pd.DataFrame
The dataframe to determine the schema of.
arrow_schema : pa.Schema
The Arrow schema of the dataframe.
Returns
-------
DataframeSchema
A mapping that contains the detected data type for the index and columns.
The key is the column name in the underlying dataframe or ``_index`` for index columns.
"""
dataframe_schema: DataframeSchema = {}
# Add type of index:
# TODO(lukasmasuch): We need to apply changes here to support multiindex.
dataframe_schema[INDEX_IDENTIFIER] = _determine_data_kind(data_df.index)
# Add types for all columns:
for i, column in enumerate(data_df.items()):
column_name = str(column[0])
column_data = column[1]
dataframe_schema[column_name] = _determine_data_kind(
column_data, arrow_schema.field(i)
)
return dataframe_schema
# A mapping of column names/IDs to column configs.
ColumnConfigMapping: TypeAlias = dict[IndexIdentifierType | str | int, ColumnConfig]
ColumnConfigMappingInput: TypeAlias = Mapping[
# TODO(lukasmasuch): This should also use int here to
# correctly type the support for positional index. However,
# allowing int here leads mypy to complain about simple dict[str, ...]
# as input -> which seems like a mypy bug.
IndexIdentifierType | str,
ColumnConfig | None | str,
]
def process_config_mapping(
column_config: ColumnConfigMappingInput | None = None,
) -> ColumnConfigMapping:
"""Transforms a user-provided column config mapping into a valid column config mapping
that can be used by the frontend.
Parameters
----------
column_config: dict or None
The user-provided column config mapping.
Returns
-------
dict
The transformed column config mapping.
"""
if column_config is None:
return {}
transformed_column_config: ColumnConfigMapping = {}
for column, config in column_config.items():
if config is None:
transformed_column_config[column] = ColumnConfig(hidden=True)
elif isinstance(config, str):
transformed_column_config[column] = ColumnConfig(label=config)
elif isinstance(config, dict):
# Ensure that the column config objects are cloned
# since we will apply in-place changes to it.
transformed_column_config[column] = copy.deepcopy(config)
else:
raise StreamlitAPIException(
f"Invalid column config for column `{column}`. "
f"Expected `None`, `str` or `dict`, but got `{type(config)}`."
)
return transformed_column_config
def update_column_config(
column_config_mapping: ColumnConfigMapping,
column: str | int,
column_config: ColumnConfig,
) -> None:
"""Updates the column config value for a single column within the mapping.
Parameters
----------
column_config_mapping : ColumnConfigMapping
The column config mapping to update.
column : str | int
The column to update the config value for. This can be the column name or
the numerical position of the column.
column_config : ColumnConfig
The column config to update.
"""
if column not in column_config_mapping:
column_config_mapping[column] = {}
column_config_mapping[column].update(column_config)
def apply_data_specific_configs(
columns_config: ColumnConfigMapping,
data_format: DataFormat,
) -> None:
"""Apply data specific configurations to the provided dataframe.
This will apply inplace changes to the dataframe and the column configurations
depending on the data format.
Parameters
----------
columns_config : ColumnConfigMapping
A mapping of column names/ids to column configurations.
data_format : DataFormat
The format of the data.
"""
# Pandas adds a range index as default to all datastructures
# but for most of the non-pandas data objects it is unnecessary
# to show this index to the user. Therefore, we will hide it as default.
if data_format in [
DataFormat.SET_OF_VALUES,
DataFormat.TUPLE_OF_VALUES,
DataFormat.LIST_OF_VALUES,
DataFormat.NUMPY_LIST,
DataFormat.NUMPY_MATRIX,
DataFormat.LIST_OF_RECORDS,
DataFormat.LIST_OF_ROWS,
DataFormat.COLUMN_VALUE_MAPPING,
# Dataframe-like objects that don't have an index:
DataFormat.PANDAS_ARRAY,
DataFormat.PANDAS_INDEX,
DataFormat.POLARS_DATAFRAME,
DataFormat.POLARS_SERIES,
DataFormat.POLARS_LAZYFRAME,
DataFormat.PYARROW_ARRAY,
DataFormat.RAY_DATASET,
]:
update_column_config(columns_config, INDEX_IDENTIFIER, {"hidden": True})
def _convert_column_config_to_json(column_config_mapping: ColumnConfigMapping) -> str:
try:
# Ignore all None values and prefix columns specified by numerical index:
return json.dumps(
{
(f"{_NUMERICAL_POSITION_PREFIX}{k!s}" if isinstance(k, int) else k): v
for (k, v) in remove_none_values(column_config_mapping).items()
},
allow_nan=False,
)
except ValueError as ex:
raise StreamlitAPIException(
f"The provided column config cannot be serialized into JSON: {ex}"
) from ex
def marshall_column_config(
proto: ArrowProto, column_config_mapping: ColumnConfigMapping
) -> None:
"""Marshall the column config into the Arrow proto.
Parameters
----------
proto : ArrowProto
The proto to marshall into.
column_config_mapping : ColumnConfigMapping
The column config to marshall.
"""
proto.columns = _convert_column_config_to_json(column_config_mapping)
| ColumnDataKind |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/api_endpoints/test_user_endpoint.py | {
"start": 2860,
"end": 4153
} | class ____:
@pytest.fixture(autouse=True)
def setup_attrs(self, configured_app, request) -> None:
self.app = configured_app
self.client = self.app.test_client()
self.session = self.app.appbuilder.session
# Logout the user after each request
@request.addfinalizer
def logout():
with configured_app.test_request_context():
logout_user()
def teardown_method(self) -> None:
# Delete users that have our custom default time
self.session.execute(delete(User).where(User.changed_on == timezone.parse(DEFAULT_TIME)))
self.session.commit()
def _create_users(self, count, roles=None):
# create users with defined created_on and changed_on date
# for easy testing
if roles is None:
roles = []
return [
User(
first_name=f"test{i}",
last_name=f"test{i}",
username=f"TEST_USER{i}",
email=f"mytest@test{i}.org",
roles=roles or [],
created_on=timezone.parse(DEFAULT_TIME),
changed_on=timezone.parse(DEFAULT_TIME),
active=True,
)
for i in range(1, count + 1)
]
| TestUserEndpoint |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/callback.py | {
"start": 1045,
"end": 4801
} | class ____(ABC):
"""
Base class for Deadline Alert callbacks.
Callbacks are used to execute custom logic when a deadline is missed.
The `callback_callable` can be a Python callable type or a string containing the path to the callable that
can be used to import the callable. It must be a top-level callable in a module present on the host where
it will run.
It will be called with Airflow context and specified kwargs when a deadline is missed.
"""
path: str
kwargs: dict
def __init__(self, callback_callable: Callable | str, kwargs: dict[str, Any] | None = None):
self.path = self.get_callback_path(callback_callable)
if kwargs and "context" in kwargs:
raise ValueError("context is a reserved kwarg for this class")
self.kwargs = kwargs or {}
@classmethod
def get_callback_path(cls, _callback: str | Callable) -> str:
"""Convert callback to a string path that can be used to import it later."""
if callable(_callback):
cls.verify_callable(_callback)
# TODO: This implementation doesn't support using a lambda function as a callback.
# We should consider that in the future, but the addition is non-trivial.
# Get the reference path to the callable in the form `airflow.models.deadline.get_from_db`
return f"{_callback.__module__}.{_callback.__qualname__}"
if not isinstance(_callback, str) or not is_valid_dotpath(_callback.strip()):
raise ImportError(f"`{_callback}` doesn't look like a valid dot path.")
stripped_callback = _callback.strip()
try:
# The provided callback is a string which appears to be a valid dotpath, attempt to import it.
callback = import_string(stripped_callback)
if not callable(callback):
# The input is a string which can be imported, but is not callable.
raise AttributeError(f"Provided callback {callback} is not callable.")
cls.verify_callable(callback)
except ImportError as e:
# Logging here instead of failing because it is possible that the code for the callable
# exists somewhere other than on the DAG processor. We are making a best effort to validate,
# but can't rule out that it may be available at runtime even if it can not be imported here.
log.debug(
"Callback %s is formatted like a callable dotpath, but could not be imported.\n%s",
stripped_callback,
e,
)
return stripped_callback
@classmethod
def verify_callable(cls, callback: Callable):
"""For additional verification of the callable during initialization in subclasses."""
pass # No verification needed in the base class
@classmethod
def deserialize(cls, data: dict, version):
path = data.pop("path")
return cls(callback_callable=path, **data)
@classmethod
def serialized_fields(cls) -> tuple[str, ...]:
return ("path", "kwargs")
def serialize(self) -> dict[str, Any]:
return {f: getattr(self, f) for f in self.serialized_fields()}
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.serialize() == other.serialize()
def __hash__(self):
serialized = self.serialize()
hashable_items = []
for k, v in serialized.items():
if isinstance(v, dict):
hashable_items.append((k, tuple(sorted(v.items()))))
else:
hashable_items.append((k, v))
return hash(tuple(sorted(hashable_items)))
| Callback |
python | getsentry__sentry | src/sentry/api/endpoints/release_thresholds/release_threshold_details.py | {
"start": 1102,
"end": 2049
} | class ____(serializers.Serializer[ReleaseThresholdPUTData]):
threshold_type = serializers.ChoiceField(choices=ReleaseThresholdType.as_str_choices())
trigger_type = serializers.ChoiceField(choices=ReleaseThresholdTriggerType.as_str_choices())
value = serializers.IntegerField(required=True, min_value=0)
window_in_seconds = serializers.IntegerField(required=True, min_value=0)
def validate_threshold_type(self, threshold_type: str) -> int:
if threshold_type not in THRESHOLD_TYPE_STR_TO_INT:
raise serializers.ValidationError("Invalid threshold type")
return THRESHOLD_TYPE_STR_TO_INT[threshold_type]
def validate_trigger_type(self, trigger_type: str) -> int:
if trigger_type not in TRIGGER_TYPE_STRING_TO_INT:
raise serializers.ValidationError("Invalid trigger type")
return TRIGGER_TYPE_STRING_TO_INT[trigger_type]
@region_silo_endpoint
| ReleaseThresholdPUTSerializer |
python | doocs__leetcode | solution/0000-0099/0053.Maximum Subarray/Solution.py | {
"start": 0,
"end": 199
} | class ____:
def maxSubArray(self, nums: List[int]) -> int:
ans = f = nums[0]
for x in nums[1:]:
f = max(f, 0) + x
ans = max(ans, f)
return ans
| Solution |
python | lazyprogrammer__machine_learning_examples | ann_class2/dropout_tensorflow.py | {
"start": 552,
"end": 961
} | class ____(object):
def __init__(self, M1, M2):
self.M1 = M1
self.M2 = M2
W = np.random.randn(M1, M2) * np.sqrt(2.0 / M1)
b = np.zeros(M2)
self.W = tf.Variable(W.astype(np.float32))
self.b = tf.Variable(b.astype(np.float32))
self.params = [self.W, self.b]
def forward(self, X):
return tf.nn.relu(tf.matmul(X, self.W) + self.b)
| HiddenLayer |
python | arrow-py__arrow | arrow/locales.py | {
"start": 47573,
"end": 47899
} | class ____(GermanBaseLocale, Locale):
names = ["de-at"]
month_names = [
"",
"Jänner",
"Februar",
"März",
"April",
"Mai",
"Juni",
"Juli",
"August",
"September",
"Oktober",
"November",
"Dezember",
]
| AustrianLocale |
python | jmcnamara__XlsxWriter | xlsxwriter/exceptions.py | {
"start": 1203,
"end": 1287
} | class ____(XlsxFileError):
"""IO error when creating xlsx file."""
| FileCreateError |
python | numba__numba | numba/core/typeinfer.py | {
"start": 7930,
"end": 8989
} | class ____(object):
def __init__(self, target, items, loc):
self.target = target
self.items = items
self.loc = loc
def __call__(self, typeinfer):
with new_error_context("typing of {container_type} at {loc}",
container_type=self.container_type,
loc=self.loc):
typevars = typeinfer.typevars
tsets = [typevars[i.name].get() for i in self.items]
if not tsets:
typeinfer.add_type(self.target,
self.container_type(types.undefined),
loc=self.loc)
else:
for typs in itertools.product(*tsets):
unified = typeinfer.context.unify_types(*typs)
if unified is not None:
typeinfer.add_type(self.target,
self.container_type(unified),
loc=self.loc)
| _BuildContainerConstraint |
python | doocs__leetcode | solution/0000-0099/0067.Add Binary/Solution.py | {
"start": 0,
"end": 110
} | class ____:
def addBinary(self, a: str, b: str) -> str:
return bin(int(a, 2) + int(b, 2))[2:]
| Solution |
python | ansible__ansible | hacking/create-bulk-issues.py | {
"start": 6551,
"end": 6700
} | class ____(Args):
tests: list[str]
def run(self) -> None:
deprecated_command(self)
@dataclasses.dataclass(frozen=True)
| DeprecationArgs |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchLiteral2.py | {
"start": 218,
"end": 264
} | class ____:
tag: Literal["b"]
num: int
| B |
python | django__django | tests/get_or_create/models.py | {
"start": 282,
"end": 390
} | class ____(models.Model):
first_name = models.CharField(max_length=100, default="Anonymous")
| DefaultPerson |
python | walkccc__LeetCode | solutions/1736. Latest Time by Replacing Hidden Digits/1736.py | {
"start": 0,
"end": 342
} | class ____:
def maximumTime(self, time: str) -> str:
ans = list(time)
if time[0] == '?':
ans[0] = '2' if time[1] == '?' or time[1] < '4' else '1'
if time[1] == '?':
ans[1] = '3' if ans[0] == '2' else '9'
if time[3] == '?':
ans[3] = '5'
if time[4] == '?':
ans[4] = '9'
return ''.join(ans)
| Solution |
python | ansible__ansible | test/integration/targets/strategy-external/ansible_collections/ns/col/plugins/strategy/external.py | {
"start": 115,
"end": 161
} | class ____(LinearStrategy):
...
| StrategyModule |
python | sqlalchemy__sqlalchemy | test/orm/dml/test_bulk_statements.py | {
"start": 77797,
"end": 81566
} | class ____(fixtures.DeclarativeMappedTest):
__requires__ = ("insert_returning", "ctes_on_dml")
__sparse_driver_backend__ = True
@classmethod
def setup_classes(cls):
decl_base = cls.DeclarativeBasic
class User(ComparableEntity, decl_base):
__tablename__ = "users"
id: Mapped[uuid.UUID] = mapped_column(primary_key=True)
username: Mapped[str]
@testing.combinations(
("cte_aliased", True),
("cte", False),
argnames="wrap_cte_in_aliased",
id_="ia",
)
@testing.combinations(
("use_union", True),
("no_union", False),
argnames="use_a_union",
id_="ia",
)
@testing.combinations(
"from_statement", "aliased", "direct", argnames="fetch_entity_type"
)
def test_select_from_insert_cte(
self, wrap_cte_in_aliased, use_a_union, fetch_entity_type
):
"""test the use case from #8544; SELECT that selects from a
CTE INSERT...RETURNING.
"""
User = self.classes.User
id_ = uuid.uuid4()
cte = (
insert(User)
.values(id=id_, username="some user")
.returning(User)
.cte()
)
if wrap_cte_in_aliased:
cte = aliased(User, cte)
if use_a_union:
stmt = select(User).where(User.id == id_).union(select(cte))
else:
stmt = select(cte)
if fetch_entity_type == "from_statement":
outer_stmt = select(User).from_statement(stmt)
expect_entity = True
elif fetch_entity_type == "aliased":
outer_stmt = select(aliased(User, stmt.subquery()))
expect_entity = True
elif fetch_entity_type == "direct":
outer_stmt = stmt
expect_entity = not use_a_union and wrap_cte_in_aliased
else:
assert False
sess = fixture_session(bind=self.bind)
with self.sql_execution_asserter() as asserter:
if not expect_entity:
row = sess.execute(outer_stmt).one()
eq_(row, (id_, "some user"))
else:
new_user = sess.scalars(outer_stmt).one()
eq_(new_user, User(id=id_, username="some user"))
cte_sql = (
"(INSERT INTO users (id, username) "
"VALUES (:param_1, :param_2) "
"RETURNING users.id, users.username)"
)
if fetch_entity_type == "aliased" and not use_a_union:
expected = (
f"WITH anon_2 AS {cte_sql} "
"SELECT anon_1.id, anon_1.username "
"FROM (SELECT anon_2.id AS id, anon_2.username AS username "
"FROM anon_2) AS anon_1"
)
elif not use_a_union:
expected = (
f"WITH anon_1 AS {cte_sql} "
"SELECT anon_1.id, anon_1.username FROM anon_1"
)
elif fetch_entity_type == "aliased":
expected = (
f"WITH anon_2 AS {cte_sql} SELECT anon_1.id, anon_1.username "
"FROM (SELECT users.id AS id, users.username AS username "
"FROM users WHERE users.id = :id_1 "
"UNION SELECT anon_2.id AS id, anon_2.username AS username "
"FROM anon_2) AS anon_1"
)
else:
expected = (
f"WITH anon_1 AS {cte_sql} "
"SELECT users.id, users.username FROM users "
"WHERE users.id = :id_1 "
"UNION SELECT anon_1.id, anon_1.username FROM anon_1"
)
asserter.assert_(
CompiledSQL(expected, [{"param_1": id_, "param_2": "some user"}])
)
| CTETest |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_redshift_cluster.py | {
"start": 9184,
"end": 11219
} | class ____:
@mock.patch(
"airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.get_cluster_snapshot_status"
)
@mock.patch.object(RedshiftHook, "conn")
def test_delete_cluster_snapshot_wait(self, mock_conn, mock_get_cluster_snapshot_status):
mock_get_cluster_snapshot_status.return_value = None
delete_snapshot = RedshiftDeleteClusterSnapshotOperator(
task_id="test_snapshot",
cluster_identifier="test_cluster",
snapshot_identifier="test_snapshot",
)
delete_snapshot.execute(None)
mock_conn.delete_cluster_snapshot.assert_called_once_with(
SnapshotClusterIdentifier="test_cluster",
SnapshotIdentifier="test_snapshot",
)
mock_get_cluster_snapshot_status.assert_called_once_with(
snapshot_identifier="test_snapshot",
)
@mock.patch(
"airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.get_cluster_snapshot_status"
)
@mock.patch.object(RedshiftHook, "conn")
def test_delete_cluster_snapshot(self, mock_conn, mock_get_cluster_snapshot_status):
delete_snapshot = RedshiftDeleteClusterSnapshotOperator(
task_id="test_snapshot",
cluster_identifier="test_cluster",
snapshot_identifier="test_snapshot",
wait_for_completion=False,
)
delete_snapshot.execute(None)
mock_conn.delete_cluster_snapshot.assert_called_once_with(
SnapshotClusterIdentifier="test_cluster",
SnapshotIdentifier="test_snapshot",
)
mock_get_cluster_snapshot_status.assert_not_called()
def test_template_fields(self):
operator = RedshiftDeleteClusterSnapshotOperator(
task_id="test_snapshot",
cluster_identifier="test_cluster",
snapshot_identifier="test_snapshot",
wait_for_completion=False,
)
validate_template_fields(operator)
| TestRedshiftDeleteClusterSnapshotOperator |
python | great-expectations__great_expectations | great_expectations/core/batch_spec.py | {
"start": 2037,
"end": 2913
} | class ____(SerializableDotDict, BatchSpec, PandasBatchSpecProtocol):
@property
@override
def reader_method(self) -> str:
return self["reader_method"]
@property
@override
def reader_options(self) -> dict:
return self.get("reader_options", {})
@override
def to_json_dict(self) -> dict[str, JSONValues]:
from great_expectations.datasource.fluent.pandas_datasource import (
_EXCLUDE_TYPES_FROM_JSON,
)
json_dict: dict[str, JSONValues] = dict()
json_dict["reader_method"] = self.reader_method
json_dict["reader_options"] = {
reader_option_name: reader_option
for reader_option_name, reader_option in self.reader_options.items()
if not isinstance(reader_option, tuple(_EXCLUDE_TYPES_FROM_JSON))
}
return json_dict
| PandasBatchSpec |
python | django__django | django/db/models/lookups.py | {
"start": 28166,
"end": 28249
} | class ____(UUIDTextMixin, EndsWith):
pass
@UUIDField.register_lookup
| UUIDEndsWith |
python | sqlalchemy__sqlalchemy | test/aaa_profiling/test_pool.py | {
"start": 218,
"end": 1367
} | class ____(fixtures.TestBase, AssertsExecutionResults):
__requires__ = ("cpython", "python_profiling_backend")
class Connection:
def rollback(self):
pass
def close(self):
pass
def setup_test(self):
# create a throwaway pool which
# has the effect of initializing
# class-level event listeners on Pool,
# if not present already.
p1 = QueuePool(creator=self.Connection, pool_size=3, max_overflow=-1)
p1.connect()
global pool
pool = QueuePool(creator=self.Connection, pool_size=3, max_overflow=-1)
# make this a real world case where we have a "connect" handler
@event.listens_for(pool, "connect")
def do_connect(dbapi_conn, conn_record):
pass
@profiling.function_call_count(variance=0.10)
def test_first_connect(self):
pool.connect()
def test_second_connect(self):
conn = pool.connect()
conn.close()
@profiling.function_call_count(variance=0.10)
def go():
conn2 = pool.connect()
return conn2
go()
| QueuePoolTest |
python | Pylons__pyramid | tests/test_wsgi.py | {
"start": 18,
"end": 748
} | class ____(unittest.TestCase):
def _callFUT(self, app):
from pyramid.wsgi import wsgiapp
return wsgiapp(app)
def test_wsgiapp_none(self):
self.assertRaises(ValueError, self._callFUT, None)
def test_decorator(self):
context = DummyContext()
request = DummyRequest()
decorator = self._callFUT(dummyapp)
response = decorator(context, request)
self.assertEqual(response, dummyapp)
def test_decorator_object_instance(self):
context = DummyContext()
request = DummyRequest()
app = DummyApp()
decorator = self._callFUT(app)
response = decorator(context, request)
self.assertEqual(response, app)
| WSGIAppTests |
python | django__django | tests/check_framework/template_test_apps/same_tags_app_1/apps.py | {
"start": 36,
"end": 140
} | class ____(AppConfig):
name = "check_framework.template_test_apps.same_tags_app_1"
| SameTagsApp1AppConfig |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/mlengine.py | {
"start": 1504,
"end": 1705
} | class ____(BaseGoogleLink):
"""Helper class for constructing ML Engine link."""
name = "MLEngine Model"
key = "ml_engine_model"
format_str = MLENGINE_MODEL_DETAILS_LINK
| MLEngineModelLink |
python | numpy__numpy | tools/swig/test/testTensor.py | {
"start": 324,
"end": 11639
} | class ____(unittest.TestCase):
def __init__(self, methodName="runTests"):
unittest.TestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
self.result = sqrt(28.0 / 8)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNorm(self):
"Test norm function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
norm = Tensor.__dict__[self.typeStr + "Norm"]
tensor = [[[0, 1], [2, 3]],
[[3, 2], [1, 0]]]
if isinstance(self.result, int):
self.assertEqual(norm(tensor), self.result)
else:
self.assertAlmostEqual(norm(tensor), self.result, 6)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormBadList(self):
"Test norm function with bad list"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
norm = Tensor.__dict__[self.typeStr + "Norm"]
tensor = [[[0, "one"], [2, 3]],
[[3, "two"], [1, 0]]]
self.assertRaises(BadListError, norm, tensor)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormWrongDim(self):
"Test norm function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
norm = Tensor.__dict__[self.typeStr + "Norm"]
tensor = [[0, 1, 2, 3],
[3, 2, 1, 0]]
self.assertRaises(TypeError, norm, tensor)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormWrongSize(self):
"Test norm function with wrong size"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
norm = Tensor.__dict__[self.typeStr + "Norm"]
tensor = [[[0, 1, 0], [2, 3, 2]],
[[3, 2, 3], [1, 0, 1]]]
self.assertRaises(TypeError, norm, tensor)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormNonContainer(self):
"Test norm function with non-container"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
norm = Tensor.__dict__[self.typeStr + "Norm"]
self.assertRaises(TypeError, norm, None)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMax(self):
"Test max function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Tensor.__dict__[self.typeStr + "Max"]
tensor = [[[1, 2], [3, 4]],
[[5, 6], [7, 8]]]
self.assertEqual(max(tensor), 8)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxBadList(self):
"Test max function with bad list"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Tensor.__dict__[self.typeStr + "Max"]
tensor = [[[1, "two"], [3, 4]],
[[5, "six"], [7, 8]]]
self.assertRaises(BadListError, max, tensor)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxNonContainer(self):
"Test max function with non-container"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Tensor.__dict__[self.typeStr + "Max"]
self.assertRaises(TypeError, max, None)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxWrongDim(self):
"Test max function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Tensor.__dict__[self.typeStr + "Max"]
self.assertRaises(TypeError, max, [0, 1, 2, 3])
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMin(self):
"Test min function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Tensor.__dict__[self.typeStr + "Min"]
tensor = [[[9, 8], [7, 6]],
[[5, 4], [3, 2]]]
self.assertEqual(min(tensor), 2)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinBadList(self):
"Test min function with bad list"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Tensor.__dict__[self.typeStr + "Min"]
tensor = [[["nine", 8], [7, 6]],
[["five", 4], [3, 2]]]
self.assertRaises(BadListError, min, tensor)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinNonContainer(self):
"Test min function with non-container"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Tensor.__dict__[self.typeStr + "Min"]
self.assertRaises(TypeError, min, True)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinWrongDim(self):
"Test min function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Tensor.__dict__[self.typeStr + "Min"]
self.assertRaises(TypeError, min, [[1, 3], [5, 7]])
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScale(self):
"Test scale function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Tensor.__dict__[self.typeStr + "Scale"]
tensor = np.array([[[1, 0, 1], [0, 1, 0], [1, 0, 1]],
[[0, 1, 0], [1, 0, 1], [0, 1, 0]],
[[1, 0, 1], [0, 1, 0], [1, 0, 1]]], self.typeCode)
scale(tensor, 4)
self.assertEqual((tensor == [[[4, 0, 4], [0, 4, 0], [4, 0, 4]],
[[0, 4, 0], [4, 0, 4], [0, 4, 0]],
[[4, 0, 4], [0, 4, 0], [4, 0, 4]]]).all(), True)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongType(self):
"Test scale function with wrong type"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Tensor.__dict__[self.typeStr + "Scale"]
tensor = np.array([[[1, 0, 1], [0, 1, 0], [1, 0, 1]],
[[0, 1, 0], [1, 0, 1], [0, 1, 0]],
[[1, 0, 1], [0, 1, 0], [1, 0, 1]]], 'c')
self.assertRaises(TypeError, scale, tensor)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongDim(self):
"Test scale function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Tensor.__dict__[self.typeStr + "Scale"]
tensor = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1],
[0, 1, 0], [1, 0, 1], [0, 1, 0]], self.typeCode)
self.assertRaises(TypeError, scale, tensor)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongSize(self):
"Test scale function with wrong size"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Tensor.__dict__[self.typeStr + "Scale"]
tensor = np.array([[[1, 0], [0, 1], [1, 0]],
[[0, 1], [1, 0], [0, 1]],
[[1, 0], [0, 1], [1, 0]]], self.typeCode)
self.assertRaises(TypeError, scale, tensor)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleNonArray(self):
"Test scale function with non-array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Tensor.__dict__[self.typeStr + "Scale"]
self.assertRaises(TypeError, scale, True)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloor(self):
"Test floor function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Tensor.__dict__[self.typeStr + "Floor"]
tensor = np.array([[[1, 2], [3, 4]],
[[5, 6], [7, 8]]], self.typeCode)
floor(tensor, 4)
np.testing.assert_array_equal(tensor, np.array([[[4, 4], [4, 4]],
[[5, 6], [7, 8]]]))
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorWrongType(self):
"Test floor function with wrong type"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Tensor.__dict__[self.typeStr + "Floor"]
tensor = np.array([[[1, 2], [3, 4]],
[[5, 6], [7, 8]]], 'c')
self.assertRaises(TypeError, floor, tensor)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorWrongDim(self):
"Test floor function with wrong type"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Tensor.__dict__[self.typeStr + "Floor"]
tensor = np.array([[1, 2], [3, 4], [5, 6], [7, 8]], self.typeCode)
self.assertRaises(TypeError, floor, tensor)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorNonArray(self):
"Test floor function with non-array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Tensor.__dict__[self.typeStr + "Floor"]
self.assertRaises(TypeError, floor, object)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeil(self):
"Test ceil function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Tensor.__dict__[self.typeStr + "Ceil"]
tensor = np.array([[[9, 8], [7, 6]],
[[5, 4], [3, 2]]], self.typeCode)
ceil(tensor, 5)
np.testing.assert_array_equal(tensor, np.array([[[5, 5], [5, 5]],
[[5, 4], [3, 2]]]))
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilWrongType(self):
"Test ceil function with wrong type"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Tensor.__dict__[self.typeStr + "Ceil"]
tensor = np.array([[[9, 8], [7, 6]],
[[5, 4], [3, 2]]], 'c')
self.assertRaises(TypeError, ceil, tensor)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilWrongDim(self):
"Test ceil function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Tensor.__dict__[self.typeStr + "Ceil"]
tensor = np.array([[9, 8], [7, 6], [5, 4], [3, 2]], self.typeCode)
self.assertRaises(TypeError, ceil, tensor)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilNonArray(self):
"Test ceil function with non-array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Tensor.__dict__[self.typeStr + "Ceil"]
tensor = [[[9, 8], [7, 6]],
[[5, 4], [3, 2]]]
self.assertRaises(TypeError, ceil, tensor)
# Test (type ARGOUT_ARRAY3[ANY][ANY][ANY]) typemap
def testLUSplit(self):
"Test luSplit function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
luSplit = Tensor.__dict__[self.typeStr + "LUSplit"]
lower, upper = luSplit([[[1, 1], [1, 1]],
[[1, 1], [1, 1]]])
self.assertEqual((lower == [[[1, 1], [1, 0]],
[[1, 0], [0, 0]]]).all(), True)
self.assertEqual((upper == [[[0, 0], [0, 1]],
[[0, 1], [1, 1]]]).all(), True)
######################################################################
| TensorTestCase |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vector_index.py | {
"start": 9297,
"end": 10452
} | class ____:
Encoding = _VectorIndexMultivectorEncoding
@deprecated(
'Using the "encoding" argument is deprecated. Instead, specify it at the top-level when creating your `vector_config`'
)
@overload
@staticmethod
def multi_vector(
encoding: _MultiVectorEncodingConfigCreate,
aggregation: Optional[MultiVectorAggregation] = None,
) -> _MultiVectorConfigCreate: ...
@overload
@staticmethod
def multi_vector(
encoding: Optional[_MultiVectorEncodingConfigCreate] = None,
aggregation: Optional[MultiVectorAggregation] = None,
) -> _MultiVectorConfigCreate: ...
@staticmethod
def multi_vector(
encoding: Optional[_MultiVectorEncodingConfigCreate] = None,
aggregation: Optional[MultiVectorAggregation] = None,
) -> _MultiVectorConfigCreate:
if encoding is not None:
_Warnings.encoding_in_multi_vector_config()
return _MultiVectorConfigCreate(
encoding=encoding if encoding is not None else None,
aggregation=aggregation.value if aggregation is not None else None,
)
| _VectorIndexMultiVector |
python | getsentry__sentry-python | sentry_sdk/integrations/pydantic_ai/__init__.py | {
"start": 308,
"end": 1232
} | class ____(Integration):
identifier = "pydantic_ai"
origin = f"auto.ai.{identifier}"
def __init__(self, include_prompts=True):
# type: (bool) -> None
"""
Initialize the Pydantic AI integration.
Args:
include_prompts: Whether to include prompts and messages in span data.
Requires send_default_pii=True. Defaults to True.
"""
self.include_prompts = include_prompts
@staticmethod
def setup_once():
# type: () -> None
"""
Set up the pydantic-ai integration.
This patches the key methods in pydantic-ai to create Sentry spans for:
- Agent invocations (Agent.run methods)
- Model requests (AI client calls)
- Tool executions
"""
_patch_agent_run()
_patch_graph_nodes()
_patch_model_request()
_patch_tool_execution()
| PydanticAIIntegration |
python | getsentry__sentry | src/sentry/web/frontend/debug/debug_sso_link_email.py | {
"start": 889,
"end": 1307
} | class ____(View):
def get(self, request: HttpRequest) -> HttpResponse:
context = get_context(request)
context["has_password"] = True
return MailPreview(
text_template="sentry/emails/auth-sso-disabled.txt",
html_template="sentry/emails/auth-sso-disabled.html",
context=context,
).render(request)
@internal_region_silo_view
| DebugSsoUnlinkedEmailView |
python | sqlalchemy__sqlalchemy | test/orm/test_relationships.py | {
"start": 13444,
"end": 19811
} | class ____(fixtures.MappedTest, AssertsCompiledSQL):
"""Tests the ultimate join condition, a single column
that points to itself, e.g. within a SQL function or similar.
The test is against a materialized path setup.
this is an **extremely** unusual case:
.. sourcecode:: text
Entity
------
path -------+
^ |
+---------+
In this case, one-to-many and many-to-one are no longer accurate.
Both relationships return collections. I'm not sure if this is a good
idea.
"""
__dialect__ = "default"
@classmethod
def define_tables(cls, metadata):
Table(
"entity", metadata, Column("path", String(100), primary_key=True)
)
@classmethod
def setup_classes(cls):
class Entity(cls.Basic):
def __init__(self, path):
self.path = path
def _descendants_fixture(self, data=True):
Entity = self.classes.Entity
entity = self.tables.entity
m = self.mapper_registry.map_imperatively(
Entity,
entity,
properties={
"descendants": relationship(
Entity,
primaryjoin=remote(foreign(entity.c.path)).like(
entity.c.path.concat("/%")
),
viewonly=True,
order_by=entity.c.path,
)
},
)
configure_mappers()
assert m.get_property("descendants").direction is ONETOMANY
if data:
return self._fixture()
def _anscestors_fixture(self, data=True):
Entity = self.classes.Entity
entity = self.tables.entity
m = self.mapper_registry.map_imperatively(
Entity,
entity,
properties={
"anscestors": relationship(
Entity,
primaryjoin=entity.c.path.like(
remote(foreign(entity.c.path)).concat("/%")
),
viewonly=True,
order_by=entity.c.path,
)
},
)
configure_mappers()
assert m.get_property("anscestors").direction is ONETOMANY
if data:
return self._fixture()
def _fixture(self):
Entity = self.classes.Entity
sess = fixture_session()
sess.add_all(
[
Entity("/foo"),
Entity("/foo/bar1"),
Entity("/foo/bar2"),
Entity("/foo/bar2/bat1"),
Entity("/foo/bar2/bat2"),
Entity("/foo/bar3"),
Entity("/bar"),
Entity("/bar/bat1"),
]
)
return sess
def test_descendants_lazyload_clause(self):
self._descendants_fixture(data=False)
Entity = self.classes.Entity
self.assert_compile(
Entity.descendants.property.strategy._lazywhere,
"entity.path LIKE (:param_1 || :path_1)",
)
self.assert_compile(
Entity.descendants.property.strategy._rev_lazywhere,
":param_1 LIKE (entity.path || :path_1)",
)
def test_ancestors_lazyload_clause(self):
self._anscestors_fixture(data=False)
Entity = self.classes.Entity
# :param_1 LIKE (:param_1 || :path_1)
self.assert_compile(
Entity.anscestors.property.strategy._lazywhere,
":param_1 LIKE (entity.path || :path_1)",
)
self.assert_compile(
Entity.anscestors.property.strategy._rev_lazywhere,
"entity.path LIKE (:param_1 || :path_1)",
)
def test_descendants_lazyload(self):
sess = self._descendants_fixture()
Entity = self.classes.Entity
e1 = sess.query(Entity).filter_by(path="/foo").first()
eq_(
[e.path for e in e1.descendants],
[
"/foo/bar1",
"/foo/bar2",
"/foo/bar2/bat1",
"/foo/bar2/bat2",
"/foo/bar3",
],
)
def test_anscestors_lazyload(self):
sess = self._anscestors_fixture()
Entity = self.classes.Entity
e1 = sess.query(Entity).filter_by(path="/foo/bar2/bat1").first()
eq_([e.path for e in e1.anscestors], ["/foo", "/foo/bar2"])
def test_descendants_joinedload(self):
sess = self._descendants_fixture()
Entity = self.classes.Entity
e1 = (
sess.query(Entity)
.filter_by(path="/foo")
.options(joinedload(Entity.descendants))
.first()
)
eq_(
[e.path for e in e1.descendants],
[
"/foo/bar1",
"/foo/bar2",
"/foo/bar2/bat1",
"/foo/bar2/bat2",
"/foo/bar3",
],
)
def test_descendants_subqueryload(self):
sess = self._descendants_fixture()
Entity = self.classes.Entity
e1 = (
sess.query(Entity)
.filter_by(path="/foo")
.options(subqueryload(Entity.descendants))
.first()
)
eq_(
[e.path for e in e1.descendants],
[
"/foo/bar1",
"/foo/bar2",
"/foo/bar2/bat1",
"/foo/bar2/bat2",
"/foo/bar3",
],
)
def test_anscestors_joinedload(self):
sess = self._anscestors_fixture()
Entity = self.classes.Entity
e1 = (
sess.query(Entity)
.filter_by(path="/foo/bar2/bat1")
.options(joinedload(Entity.anscestors))
.first()
)
eq_([e.path for e in e1.anscestors], ["/foo", "/foo/bar2"])
def test_plain_join_descendants(self):
self._descendants_fixture(data=False)
Entity = self.classes.Entity
sess = fixture_session()
da = aliased(Entity)
self.assert_compile(
sess.query(Entity).join(Entity.descendants.of_type(da)),
"SELECT entity.path AS entity_path FROM entity JOIN entity AS "
"entity_1 ON entity_1.path LIKE (entity.path || :path_1)",
)
| DirectSelfRefFKTest |
python | keras-team__keras | keras/src/distillation/distillation_loss_test.py | {
"start": 328,
"end": 1248
} | class ____(TestCase):
"""Test cases for LogitsDistillation distillation_loss."""
def test_logits_distillation_basic(self):
"""Test basic logits distillation structure validation."""
# Create dummy logits
teacher_logits = keras.ops.convert_to_tensor(
np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), dtype="float32"
)
student_logits = keras.ops.convert_to_tensor(
np.array([[2.0, 1.0, 4.0], [3.0, 6.0, 2.0]]), dtype="float32"
)
distillation_loss = LogitsDistillation(temperature=3.0)
distillation_loss.validate_outputs(teacher_logits, student_logits)
incompatible_logits = {"output": teacher_logits}
with self.assertRaises(ValueError):
distillation_loss.validate_outputs(
teacher_logits, incompatible_logits
)
@pytest.mark.requires_trainable_backend
| TestLogitsDistillation |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/diamond_link_bottom/package.py | {
"start": 217,
"end": 479
} | class ____(Package):
"""Part of diamond-link-{top,left,right,bottom} group"""
homepage = "http://www.example.com"
url = "http://www.example.com/diamond-link-bottom-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
| DiamondLinkBottom |
python | kamyu104__LeetCode-Solutions | Python/longest-strictly-increasing-or-strictly-decreasing-subarray.py | {
"start": 524,
"end": 958
} | class ____(object):
def longestMonotonicSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = cnt1 = cnt2 = 1
for i in xrange(1, len(nums)):
cnt1 = cnt1+1 if nums[i-1] < nums[i] else 1
cnt2 = cnt2+1 if nums[i-1] > nums[i] else 1
result = max(result, cnt1, cnt2)
return result
# Time: O(n)
# Space: O(1)
# array
| Solution2 |
python | openai__openai-python | src/openai/resources/beta/realtime/realtime.py | {
"start": 27581,
"end": 31415
} | class ____(BaseRealtimeConnectionResource):
def delete(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None:
"""Send this event when you want to remove any item from the conversation
history.
The server will respond with a `conversation.item.deleted` event,
unless the item does not exist in the conversation history, in which case the
server will respond with an error.
"""
self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "conversation.item.delete", "item_id": item_id, "event_id": event_id}),
)
)
def create(
self,
*,
item: ConversationItemParam,
event_id: str | NotGiven = NOT_GIVEN,
previous_item_id: str | NotGiven = NOT_GIVEN,
) -> None:
"""
Add a new Item to the Conversation's context, including messages, function
calls, and function call responses. This event can be used both to populate a
"history" of the conversation and to add new items mid-stream, but has the
current limitation that it cannot populate assistant audio messages.
If successful, the server will respond with a `conversation.item.created`
event, otherwise an `error` event will be sent.
"""
self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given(
{
"type": "conversation.item.create",
"item": item,
"event_id": event_id,
"previous_item_id": previous_item_id,
}
),
)
)
def truncate(
self, *, audio_end_ms: int, content_index: int, item_id: str, event_id: str | NotGiven = NOT_GIVEN
) -> None:
"""Send this event to truncate a previous assistant message’s audio.
The server
will produce audio faster than realtime, so this event is useful when the user
interrupts to truncate audio that has already been sent to the client but not
yet played. This will synchronize the server's understanding of the audio with
the client's playback.
Truncating audio will delete the server-side text transcript to ensure there
is not text in the context that hasn't been heard by the user.
If successful, the server will respond with a `conversation.item.truncated`
event.
"""
self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given(
{
"type": "conversation.item.truncate",
"audio_end_ms": audio_end_ms,
"content_index": content_index,
"item_id": item_id,
"event_id": event_id,
}
),
)
)
def retrieve(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None:
"""
Send this event when you want to retrieve the server's representation of a specific item in the conversation history. This is useful, for example, to inspect user audio after noise cancellation and VAD.
The server will respond with a `conversation.item.retrieved` event,
unless the item does not exist in the conversation history, in which case the
server will respond with an error.
"""
self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "conversation.item.retrieve", "item_id": item_id, "event_id": event_id}),
)
)
| RealtimeConversationItemResource |
python | numpy__numpy | numpy/lib/tests/test_nanfunctions.py | {
"start": 3250,
"end": 9433
} | class ____:
nanfuncs = [np.nanmin, np.nanmax]
stdfuncs = [np.min, np.max]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
@pytest.mark.parametrize("axis", [None, 0, 1])
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
@pytest.mark.parametrize("array", [
np.array(np.nan),
np.full((3, 3), np.nan),
], ids=["0d", "2d"])
def test_allnans(self, axis, dtype, array):
if axis is not None and array.ndim == 0:
pytest.skip("`axis != None` not supported for 0d arrays")
array = array.astype(dtype)
match = "All-NaN slice encountered"
for func in self.nanfuncs:
with pytest.warns(RuntimeWarning, match=match):
out = func(array, axis=axis)
assert np.isnan(out).all()
assert out.dtype == array.dtype
def test_masked(self):
mat = np.ma.fix_invalid(_ndat)
msk = mat._mask.copy()
for f in [np.nanmin]:
res = f(mat, axis=1)
tgt = f(_ndat, axis=1)
assert_equal(res, tgt)
assert_equal(mat._mask, msk)
assert_(not np.isinf(mat).any())
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_subclass(self):
class MyNDArray(np.ndarray):
pass
# Check that it works and that type and
# shape are preserved
mine = np.eye(3).view(MyNDArray)
for f in self.nanfuncs:
res = f(mine, axis=0)
assert_(isinstance(res, MyNDArray))
assert_(res.shape == (3,))
res = f(mine, axis=1)
assert_(isinstance(res, MyNDArray))
assert_(res.shape == (3,))
res = f(mine)
assert_(res.shape == ())
# check that rows of nan are dealt with for subclasses (#4628)
mine[1] = np.nan
for f in self.nanfuncs:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mine, axis=0)
assert_(isinstance(res, MyNDArray))
assert_(not np.any(np.isnan(res)))
assert_(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mine, axis=1)
assert_(isinstance(res, MyNDArray))
assert_(np.isnan(res[1]) and not np.isnan(res[0])
and not np.isnan(res[2]))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mine)
assert_(res.shape == ())
assert_(res != np.nan)
assert_(len(w) == 0)
def test_object_array(self):
arr = np.array([[1.0, 2.0], [np.nan, 4.0], [np.nan, np.nan]], dtype=object)
assert_equal(np.nanmin(arr), 1.0)
assert_equal(np.nanmin(arr, axis=0), [1.0, 2.0])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# assert_equal does not work on object arrays of nan
assert_equal(list(np.nanmin(arr, axis=1)), [1.0, 4.0, np.nan])
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
def test_initial(self, dtype):
class MyNDArray(np.ndarray):
pass
ar = np.arange(9).astype(dtype)
ar[:5] = np.nan
for f in self.nanfuncs:
initial = 100 if f is np.nanmax else 0
ret1 = f(ar, initial=initial)
assert ret1.dtype == dtype
assert ret1 == initial
ret2 = f(ar.view(MyNDArray), initial=initial)
assert ret2.dtype == dtype
assert ret2 == initial
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
def test_where(self, dtype):
class MyNDArray(np.ndarray):
pass
ar = np.arange(9).reshape(3, 3).astype(dtype)
ar[0, :] = np.nan
where = np.ones_like(ar, dtype=np.bool)
where[:, 0] = False
for f in self.nanfuncs:
reference = 4 if f is np.nanmin else 8
ret1 = f(ar, where=where, initial=5)
assert ret1.dtype == dtype
assert ret1 == reference
ret2 = f(ar.view(MyNDArray), where=where, initial=5)
assert ret2.dtype == dtype
assert ret2 == reference
| TestNanFunctions_MinMax |
python | PrefectHQ__prefect | src/prefect/settings/models/client.py | {
"start": 1230,
"end": 3609
} | class ____(PrefectBaseSettings):
"""
Settings for controlling API client behavior
"""
model_config: ClassVar[SettingsConfigDict] = build_settings_config(("client",))
max_retries: int = Field(
default=5,
ge=0,
description="""
The maximum number of retries to perform on failed HTTP requests.
Defaults to 5. Set to 0 to disable retries.
See `PREFECT_CLIENT_RETRY_EXTRA_CODES` for details on which HTTP status codes are
retried.
""",
)
retry_jitter_factor: float = Field(
default=0.2,
ge=0.0,
description="""
A value greater than or equal to zero to control the amount of jitter added to retried
client requests. Higher values introduce larger amounts of jitter.
Set to 0 to disable jitter. See `clamped_poisson_interval` for details on the how jitter
can affect retry lengths.
""",
)
retry_extra_codes: ClientRetryExtraCodes = Field(
default_factory=set,
description="""
A list of extra HTTP status codes to retry on. Defaults to an empty list.
429, 502 and 503 are always retried. Please note that not all routes are idempotent and retrying
may result in unexpected behavior.
""",
examples=["404,429,503", "429", {404, 429, 503}],
)
csrf_support_enabled: bool = Field(
default=True,
description="""
Determines if CSRF token handling is active in the Prefect client for API
requests.
When enabled (`True`), the client automatically manages CSRF tokens by
retrieving, storing, and including them in applicable state-changing requests
""",
)
custom_headers: JsonStringOrDict = Field(
default_factory=dict,
description="""
Custom HTTP headers to include with every API request to the Prefect server.
Headers are specified as key-value pairs. Note that headers like 'User-Agent'
and CSRF-related headers are managed by Prefect and cannot be overridden.
""",
examples=[{"X-Custom-Header": "value"}, {"Authorization": "Bearer token"}],
)
metrics: ClientMetricsSettings = Field(
default_factory=ClientMetricsSettings,
description="Settings for controlling metrics reporting from the client",
)
| ClientSettings |
python | davidhalter__jedi | test/completion/usages.py | {
"start": 3101,
"end": 3400
} | class ____():
def a(self):
#< 13 (4,13), (0,13)
self._instance_var = 3
def b(self):
#< (-4,13), (0,13)
self._instance_var
# A call to self used to trigger an error, because it's also a trailer
# with two children.
self()
| TestInstanceVar |
python | run-llama__llama_index | llama-index-core/llama_index/core/chat_engine/condense_question.py | {
"start": 1458,
"end": 14138
} | class ____(BaseChatEngine):
"""
Condense Question Chat Engine.
First generate a standalone question from conversation context and last message,
then query the query engine for a response.
"""
def __init__(
self,
query_engine: BaseQueryEngine,
condense_question_prompt: BasePromptTemplate,
memory: BaseMemory,
llm: LLM,
verbose: bool = False,
callback_manager: Optional[CallbackManager] = None,
) -> None:
self._query_engine = query_engine
self._condense_question_prompt = condense_question_prompt
self._memory = memory
self._llm = llm
self._verbose = verbose
self.callback_manager = callback_manager or CallbackManager([])
@classmethod
def from_defaults(
cls,
query_engine: BaseQueryEngine,
condense_question_prompt: Optional[BasePromptTemplate] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
memory_cls: Type[BaseMemory] = ChatMemoryBuffer,
verbose: bool = False,
system_prompt: Optional[str] = None,
prefix_messages: Optional[List[ChatMessage]] = None,
llm: Optional[LLM] = None,
**kwargs: Any,
) -> "CondenseQuestionChatEngine":
"""Initialize a CondenseQuestionChatEngine from default parameters."""
condense_question_prompt = condense_question_prompt or DEFAULT_PROMPT
llm = llm or Settings.llm
chat_history = chat_history or []
memory = memory or memory_cls.from_defaults(chat_history=chat_history, llm=llm)
if system_prompt is not None:
raise NotImplementedError(
"system_prompt is not supported for CondenseQuestionChatEngine."
)
if prefix_messages is not None:
raise NotImplementedError(
"prefix_messages is not supported for CondenseQuestionChatEngine."
)
return cls(
query_engine,
condense_question_prompt,
memory,
llm,
verbose=verbose,
callback_manager=Settings.callback_manager,
)
def _condense_question(
self, chat_history: List[ChatMessage], last_message: str
) -> str:
"""
Generate standalone question from conversation context and last message.
"""
if not chat_history:
# Keep the question as is if there's no conversation context.
return last_message
chat_history_str = messages_to_history_str(chat_history)
logger.debug(chat_history_str)
return self._llm.predict(
self._condense_question_prompt,
question=last_message,
chat_history=chat_history_str,
)
async def _acondense_question(
self, chat_history: List[ChatMessage], last_message: str
) -> str:
"""
Generate standalone question from conversation context and last message.
"""
if not chat_history:
# Keep the question as is if there's no conversation context.
return last_message
chat_history_str = messages_to_history_str(chat_history)
logger.debug(chat_history_str)
return await self._llm.apredict(
self._condense_question_prompt,
question=last_message,
chat_history=chat_history_str,
)
def _get_tool_output_from_response(
self, query: str, response: RESPONSE_TYPE
) -> ToolOutput:
if isinstance(response, (StreamingResponse, AsyncStreamingResponse)):
return ToolOutput(
content="",
tool_name="query_engine",
raw_input={"query": query},
raw_output=response,
)
else:
return ToolOutput(
content=str(response),
tool_name="query_engine",
raw_input={"query": query},
raw_output=response,
)
@trace_method("chat")
def chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> AgentChatResponse:
chat_history = chat_history or self._memory.get(input=message)
# Generate standalone question from conversation context and last message
condensed_question = self._condense_question(chat_history, message)
log_str = f"Querying with: {condensed_question}"
logger.info(log_str)
if self._verbose:
print(log_str)
# TODO: right now, query engine uses class attribute to configure streaming,
# we are moving towards separate streaming and non-streaming methods.
# In the meanwhile, use this hack to toggle streaming.
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
if isinstance(self._query_engine, RetrieverQueryEngine):
is_streaming = self._query_engine._response_synthesizer._streaming
self._query_engine._response_synthesizer._streaming = False
# Query with standalone question
query_response = self._query_engine.query(condensed_question)
# NOTE: reset streaming flag
if isinstance(self._query_engine, RetrieverQueryEngine):
self._query_engine._response_synthesizer._streaming = is_streaming
tool_output = self._get_tool_output_from_response(
condensed_question, query_response
)
# Record response
self._memory.put(ChatMessage(role=MessageRole.USER, content=message))
self._memory.put(
ChatMessage(role=MessageRole.ASSISTANT, content=str(query_response))
)
return AgentChatResponse(response=str(query_response), sources=[tool_output])
@trace_method("chat")
def stream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
chat_history = chat_history or self._memory.get(input=message)
# Generate standalone question from conversation context and last message
condensed_question = self._condense_question(chat_history, message)
log_str = f"Querying with: {condensed_question}"
logger.info(log_str)
if self._verbose:
print(log_str)
# TODO: right now, query engine uses class attribute to configure streaming,
# we are moving towards separate streaming and non-streaming methods.
# In the meanwhile, use this hack to toggle streaming.
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
if isinstance(self._query_engine, RetrieverQueryEngine):
is_streaming = self._query_engine._response_synthesizer._streaming
self._query_engine._response_synthesizer._streaming = True
# Query with standalone question
query_response = self._query_engine.query(condensed_question)
# NOTE: reset streaming flag
if isinstance(self._query_engine, RetrieverQueryEngine):
self._query_engine._response_synthesizer._streaming = is_streaming
tool_output = self._get_tool_output_from_response(
condensed_question, query_response
)
# Record response
if (
isinstance(query_response, StreamingResponse)
and query_response.response_gen is not None
):
# override the generator to include writing to chat history
self._memory.put(ChatMessage(role=MessageRole.USER, content=message))
response = StreamingAgentChatResponse(
chat_stream=response_gen_from_query_engine(query_response.response_gen),
sources=[tool_output],
)
thread = Thread(
target=response.write_response_to_history,
args=(self._memory,),
)
thread.start()
else:
raise ValueError("Streaming is not enabled. Please use chat() instead.")
return response
@trace_method("chat")
async def achat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> AgentChatResponse:
chat_history = chat_history or await self._memory.aget(input=message)
# Generate standalone question from conversation context and last message
condensed_question = await self._acondense_question(chat_history, message)
log_str = f"Querying with: {condensed_question}"
logger.info(log_str)
if self._verbose:
print(log_str)
# TODO: right now, query engine uses class attribute to configure streaming,
# we are moving towards separate streaming and non-streaming methods.
# In the meanwhile, use this hack to toggle streaming.
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
if isinstance(self._query_engine, RetrieverQueryEngine):
is_streaming = self._query_engine._response_synthesizer._streaming
self._query_engine._response_synthesizer._streaming = False
# Query with standalone question
query_response = await self._query_engine.aquery(condensed_question)
# NOTE: reset streaming flag
if isinstance(self._query_engine, RetrieverQueryEngine):
self._query_engine._response_synthesizer._streaming = is_streaming
tool_output = self._get_tool_output_from_response(
condensed_question, query_response
)
# Record response
await self._memory.aput(ChatMessage(role=MessageRole.USER, content=message))
await self._memory.aput(
ChatMessage(role=MessageRole.ASSISTANT, content=str(query_response))
)
return AgentChatResponse(response=str(query_response), sources=[tool_output])
@trace_method("chat")
async def astream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
chat_history = chat_history or await self._memory.aget(input=message)
# Generate standalone question from conversation context and last message
condensed_question = await self._acondense_question(chat_history, message)
log_str = f"Querying with: {condensed_question}"
logger.info(log_str)
if self._verbose:
print(log_str)
# TODO: right now, query engine uses class attribute to configure streaming,
# we are moving towards separate streaming and non-streaming methods.
# In the meanwhile, use this hack to toggle streaming.
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
if isinstance(self._query_engine, RetrieverQueryEngine):
is_streaming = self._query_engine._response_synthesizer._streaming
self._query_engine._response_synthesizer._streaming = True
# Query with standalone question
query_response = await self._query_engine.aquery(condensed_question)
# NOTE: reset streaming flag
if isinstance(self._query_engine, RetrieverQueryEngine):
self._query_engine._response_synthesizer._streaming = is_streaming
tool_output = self._get_tool_output_from_response(
condensed_question, query_response
)
# Record response
if isinstance(query_response, AsyncStreamingResponse):
# override the generator to include writing to chat history
# TODO: query engine does not support async generator yet
await self._memory.aput(ChatMessage(role=MessageRole.USER, content=message))
response = StreamingAgentChatResponse(
achat_stream=aresponse_gen_from_query_engine(
query_response.async_response_gen()
),
sources=[tool_output],
)
response.awrite_response_to_history_task = asyncio.create_task(
response.awrite_response_to_history(self._memory)
)
else:
raise ValueError("Streaming is not enabled. Please use achat() instead.")
return response
def reset(self) -> None:
# Clear chat history
self._memory.reset()
@property
def chat_history(self) -> List[ChatMessage]:
"""Get chat history."""
return self._memory.get_all()
| CondenseQuestionChatEngine |
python | pytorch__pytorch | test/test_dynamic_shapes.py | {
"start": 74835,
"end": 109734
} | class ____(TestCase):
@skipIfTorchDynamo("mark_dynamic not supported")
def test_simplify_max_1_0(self):
x = torch.rand(10)
torch._dynamo.mark_dynamic(x, 0, max=20, min=5)
@torch.compile(fullgraph=True)
def func(x, v):
# test that statically_known_true
if (v == 0 or v == 1) and not statically_known_true(
max(v, (-1 + x.size()[0] // 2)) == (-1 + x.size()[0] // 2)
):
raise AssertionError("error")
if max(v, (-1 + x.size()[0] // 2)) == (-1 + x.size()[0] // 2):
return x * 400
else:
return (x * 10) * 100
# testing that this does not throw constraint violation error.
self.assertEqual(func(x, 1), x * 400)
self.assertEqual(func(x, 0), x * 400)
def test_dim_constraints_reduce_congruences_simple(self):
from sympy import Symbol
s = Symbol("s", positive=True, integer=True)
dim_constraints = DimConstraints({}, {}, set(), {})
dim_constraints._congruences[s] = {
(s / 2) % 2,
(s / 2) % 8,
(s / 2) % 4,
s % 2,
((s / 16) + 2) % 4,
}
congruences = dim_constraints._reduce_congruences()
self.assertEqual(congruences[s], {(s + 32) % 64})
def test_dim_constraints_reduce_inequalities_simple(self):
from sympy import Eq, Interval, Ne, Symbol
from sympy.solvers.inequalities import reduce_inequalities
s = Symbol("s", positive=True, integer=True)
exprs = {
s >= 2,
Ne(8 * s, 16),
Ne(s / 2, 1),
Ne(16 * s, 32),
s < 16,
Ne(s, 2),
s / 2 < 16,
s / 2 > 1,
s / 2 >= 2,
Ne(3 * s / 2, 3),
}
solution = reduce_inequalities(exprs, s).as_set()
self.assertEqual(solution, Interval.Ropen(4, 16))
exprs.add(Eq(s / 2, 4))
solution = reduce_inequalities(exprs, s).as_set()
self.assertEqual(solution, {8})
def test_dim_constraints_reduce_inequalities_error(self):
from collections import defaultdict
from sympy import Symbol
from sympy.solvers.inequalities import reduce_inequalities
from torch._dynamo.source import (
LocalSource,
TensorProperty,
TensorPropertySource,
)
from torch.fx.experimental.symbolic_shapes import DynamicDimConstraintPrinter
s0 = Symbol("s0", positive=True, integer=True)
exprs = {
4 * s0**3 - 4 * s0**2 + s0 <= 2147483647,
s0 >= 2,
s0**3 <= 2147483647,
s0 <= 2147483647,
}
answer = reduce_inequalities(exprs, s0)
symbol_to_source = defaultdict(list)
symbol_to_source[s0].append(
TensorPropertySource(
base=LocalSource(local_name="a"), prop=TensorProperty.SIZE, idx=0
)
)
dcp = DynamicDimConstraintPrinter(symbol_to_source, {})
with self.assertRaisesRegex(
AssertionError,
"Unknown symbol.*created by constraints solver",
):
dcp.doprint(answer)
def test_dim_constraints_solve_full(self):
from sympy import Eq, Integer, Ne, Symbol
from torch._dynamo.source import (
LocalSource,
TensorProperty,
TensorPropertySource,
)
src0 = TensorPropertySource(
base=LocalSource(local_name="a"), prop=TensorProperty.SIZE, idx=0
)
src2 = TensorPropertySource(
base=LocalSource(local_name="b"), prop=TensorProperty.SIZE, idx=0
)
src3 = TensorPropertySource(
base=LocalSource(local_name="c"), prop=TensorProperty.SIZE, idx=0
)
src4 = TensorPropertySource(
base=LocalSource(local_name="d"), prop=TensorProperty.SIZE, idx=0
)
src1 = TensorPropertySource(
base=LocalSource(local_name="a"), prop=TensorProperty.SIZE, idx=2
)
src7 = TensorPropertySource(
base=LocalSource(local_name="a"), prop=TensorProperty.SIZE, idx=3
)
src5 = TensorPropertySource(
base=LocalSource(local_name="a"), prop=TensorProperty.SIZE, idx=1
)
src8 = TensorPropertySource(
base=LocalSource(local_name="b"), prop=TensorProperty.SIZE, idx=1
)
src6 = TensorPropertySource(
base=LocalSource(local_name="c"), prop=TensorProperty.SIZE, idx=1
)
src9 = TensorPropertySource(
base=LocalSource(local_name="d"), prop=TensorProperty.SIZE, idx=1
)
src10 = TensorPropertySource(
base=LocalSource(local_name="e"), prop=TensorProperty.SIZE, idx=1
)
src11 = TensorPropertySource(
base=LocalSource(local_name="f"), prop=TensorProperty.SIZE, idx=1
)
src12 = TensorPropertySource(
base=LocalSource(local_name="b"), prop=TensorProperty.SIZE, idx=2
)
s0 = Symbol("s0", positive=True, integer=True)
s1 = Symbol("s1", positive=True, integer=True)
s5 = Symbol("s5", positive=True, integer=True)
s6 = Symbol("s6", positive=True, integer=True)
symbol_to_source = {
s0: [src0, src2, src3, src4],
s1: [src1, src7],
s5: [src5, src8],
s6: [src6, src9, src10],
}
var_to_val = {s0: 8, s1: 96, s5: 22, s6: 21}
marked_dynamic = {s0, s1, s5, s6}
dim_constraints = DimConstraints(
symbol_to_source, var_to_val, marked_dynamic, {}
)
dim_constraints.add_equality(src2, s0)
dim_constraints.add_equality(src3, s0)
dim_constraints.add_equality(src4, s0)
dim_constraints.add_equality(src7, s1)
dim_constraints.add_equality(src8, s5)
dim_constraints.add_equality(src9, s6)
dim_constraints.add_equality(src10, s6)
dim_constraints.add_equality(src11, Integer(1))
dim_constraints.add_equality(src12, Integer(3))
dim_constraints.add(s1**2 <= 2147483647)
dim_constraints.add(32 * s1**2 <= 2147483647)
dim_constraints.add(s0 < 16)
dim_constraints.add(Eq(Mod(s1, 2), 0))
dim_constraints.add(Ne(FloorDiv(s1, 2), 1))
dim_constraints.add(Ne((FloorDiv(s1, 2)) ** 2, 1))
dim_constraints.add(32 * (FloorDiv(s1, 2)) ** 2 <= 2147483647)
dim_constraints.add((FloorDiv(s1, 2)) ** 2 > 1)
dim_constraints.add(Ne(FloorDiv(s1, 2), 1))
dim_constraints.add(
64 * (FloorDiv((FloorDiv(s1, 2) - 1), 2)) ** 2
+ 128 * (FloorDiv((FloorDiv(s1, 2) - 1), 2))
+ 64
<= 2147483647
)
dim_constraints.add(Ne(FloorDiv((FloorDiv(s1, 2) - 1), 2) + 1, 1))
dim_constraints.add(
Ne(
(FloorDiv((FloorDiv(s1, 2) - 1), 2)) ** 2
+ 2 * (FloorDiv((FloorDiv(s1, 2) - 1), 2))
+ 1,
1,
)
)
dim_constraints.add(Ne(FloorDiv((FloorDiv(s1, 2) - 1), 2) + 1, 1))
dim_constraints.add(
(FloorDiv((FloorDiv(s1, 2) - 1), 2)) ** 2
+ 2 * (FloorDiv((FloorDiv(s1, 2) - 1), 2))
+ 1
> 1
)
dim_constraints.add(
128 * (FloorDiv((FloorDiv(s1, 2) - 1), 4)) ** 2
+ 256 * (FloorDiv((FloorDiv(s1, 2) - 1), 4))
+ 128
<= 2147483647
)
dim_constraints.add(Ne(FloorDiv((FloorDiv(s1, 2) - 1), 4) + 1, 1))
dim_constraints.add(
Ne(
(FloorDiv((FloorDiv(s1, 2) - 1), 4)) ** 2
+ 2 * (FloorDiv((FloorDiv(s1, 2) - 1), 4))
+ 1,
1,
)
)
dim_constraints.add(Ne(FloorDiv((FloorDiv(s1, 2) - 1), 4) + 1, 1))
dim_constraints.add(
(FloorDiv((FloorDiv(s1, 2) - 1), 4)) ** 2
+ 2 * (FloorDiv((FloorDiv(s1, 2) - 1), 4))
+ 1
> 1
)
dim_constraints.add(
256 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
+ 512 * (FloorDiv((FloorDiv(s1, 2) - 1), 8))
+ 256
<= 2147483647
)
dim_constraints.add(Ne(FloorDiv((FloorDiv(s1, 2) - 1), 8) + 1, 1))
dim_constraints.add(
Ne(
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
+ 2 * (FloorDiv((FloorDiv(s1, 2) - 1), 8))
+ 1,
1,
)
)
dim_constraints.add(Ne(FloorDiv((FloorDiv(s1, 2) - 1), 8) + 1, 1))
dim_constraints.add(
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
+ 2 * (FloorDiv((FloorDiv(s1, 2) - 1), 8))
+ 1
> 1
)
dim_constraints.add(FloorDiv((FloorDiv(s1, 2) - 1), 8) + 1 >= 3)
dim_constraints.add(
60 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60
<= 2147483647
)
dim_constraints.add(FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1 >= 0)
dim_constraints.add(FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1 >= 1)
dim_constraints.add(
Ne(
60 * s0 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * s0 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60 * s0,
0,
)
)
dim_constraints.add(Ne(FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1, 1))
dim_constraints.add(
Ne(
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 2 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 1,
1,
)
)
dim_constraints.add(
Ne(
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 2 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 1,
0,
)
)
dim_constraints.add(
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 2 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 1
>= 0
)
dim_constraints.add(Ne(FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1, 0))
dim_constraints.add(
1
< 60 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60
)
dim_constraints.add(Ne(FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1, -1))
dim_constraints.add(
Ne(
60 * s0 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * s0 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60 * s0,
120 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 240 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 120,
)
)
dim_constraints.add(
120 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 240 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 120
> 0
)
dim_constraints.add(
Eq(
60 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2 * (Mod(s0, 2))
- 120 * FloorDiv((FloorDiv(s1, 2) - 1), 8) * Mod(s0, 2)
+ 60 * (Mod(s0, 2)),
0,
)
)
dim_constraints.add(
Ne(
120 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 240 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 120,
0,
)
)
dim_constraints.add(
Ne(
60
* (FloorDiv(s0, 2))
* (FloorDiv(s0, (FloorDiv(s0, 2))))
* (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120
* FloorDiv(s0, 2)
* FloorDiv(s0, (FloorDiv(s0, 2)))
* FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60 * (FloorDiv(s0, 2)) * (FloorDiv(s0, (FloorDiv(s0, 2)))),
0,
)
)
dim_constraints.add(Ne(FloorDiv(s0, 2), 1))
dim_constraints.add(
Ne(
60 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60,
0,
)
)
dim_constraints.add(
60 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60
>= 0
)
dim_constraints.add(
1
< 60
* (FloorDiv(s0, (FloorDiv(s0, 2))))
* (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv(s0, (FloorDiv(s0, 2))) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60 * (FloorDiv(s0, (FloorDiv(s0, 2))))
)
dim_constraints.add(Ne(16 * s0, 32))
dim_constraints.add(Eq(16 * (Mod(s0, 2)), 0))
dim_constraints.add(Ne(16 * s0, 32))
dim_constraints.add(Eq(16 * (Mod(s0, 2)), 0))
dim_constraints.add(FloorDiv(s0, 2) >= 2)
dim_constraints.add(Ne(FloorDiv(s0, 2), 1))
dim_constraints.add(1 < FloorDiv(s0, 2))
dim_constraints.add(Ne(s0, 2))
dim_constraints.add(
60
* (FloorDiv(s0, (FloorDiv(s0, 2))))
* (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv(s0, (FloorDiv(s0, 2))) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60 * (FloorDiv(s0, (FloorDiv(s0, 2))))
>= 60 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60
)
dim_constraints.add(
60
* (FloorDiv(s0, 2))
* (FloorDiv(s0, (FloorDiv(s0, 2))))
* (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120
* FloorDiv(s0, 2)
* FloorDiv(s0, (FloorDiv(s0, 2)))
* FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60 * (FloorDiv(s0, 2)) * (FloorDiv(s0, (FloorDiv(s0, 2))))
> 0
)
dim_constraints.add(
Ne(
60
* (FloorDiv(s0, 2))
* (FloorDiv(s0, (FloorDiv(s0, 2))))
* (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120
* FloorDiv(s0, 2)
* FloorDiv(s0, (FloorDiv(s0, 2)))
* FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60 * (FloorDiv(s0, 2)) * (FloorDiv(s0, (FloorDiv(s0, 2)))),
3 * (FloorDiv(s0, 2)) * (FloorDiv(s0, (FloorDiv(s0, 2)))),
)
)
dim_constraints.add(
Ne(
20 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 40 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 20,
0,
)
)
dim_constraints.add(
20 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 40 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 20
>= 0
)
dim_constraints.add(
Ne(
20 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 40 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 20,
20,
)
)
dim_constraints.add(
Ne(
20
* (
Mod(
1,
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 2 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 1,
)
),
0,
)
)
dim_constraints.add(
Ne(
20
* (FloorDiv((FloorDiv(s1, 2) - 1), 8))
* (
Mod(
1,
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
/ (FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1)
- 2
* FloorDiv((FloorDiv(s1, 2) - 1), 8)
/ (FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1)
+ 1 / (FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1),
)
)
- 20
* Mod(
1,
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
/ (FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1)
- 2
* FloorDiv((FloorDiv(s1, 2) - 1), 8)
/ (FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1)
+ 1 / (FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1),
),
0,
)
)
dim_constraints.add(Ne(FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1, 1))
dim_constraints.add(
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 2 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 1
>= 1
)
dim_constraints.add(
20 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 40 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 20
>= 0
)
dim_constraints.add(
20 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 40 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 20
>= 1
)
dim_constraints.add(
20 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 40 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 20
>= 2
)
dim_constraints.add(
20 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 40 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 20
> 1
)
dim_constraints.add(
20 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 40 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 20
< 60 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60
)
dim_constraints.add(
Ne(
60 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60,
60,
)
)
dim_constraints.add(
Ne(
FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1,
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 2 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 1,
)
)
dim_constraints.add(
Eq(
(FloorDiv((FloorDiv(s1, 2) - 1), 8))
* (
Mod(
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
/ (FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1)
- 2
* FloorDiv((FloorDiv(s1, 2) - 1), 8)
/ (FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1)
+ 1 / (FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1),
1,
)
)
- Mod(
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
/ (FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1)
- 2
* FloorDiv((FloorDiv(s1, 2) - 1), 8)
/ (FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1)
+ 1 / (FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1),
1,
),
0,
)
)
dim_constraints.add(
Ne(
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 2 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 1,
FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1,
)
)
dim_constraints.add(Ne(8 * s0, 16))
dim_constraints.add(
60 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60
>= (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 2 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 1
)
dim_constraints.add(
60
* (FloorDiv(s0, (FloorDiv(s0, 2))))
* (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv(s0, (FloorDiv(s0, 2))) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60 * (FloorDiv(s0, (FloorDiv(s0, 2))))
<= 2147483647
)
dim_constraints.add(
90 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 180 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 90
<= 2147483647
)
dim_constraints.add(FloorDiv(s0, 2) < 16)
dim_constraints.add(FloorDiv(s0, 2) > 1)
dim_constraints.add(
Ne(
90 * (FloorDiv(s0, 2)) * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 180 * FloorDiv(s0, 2) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 90 * (FloorDiv(s0, 2)),
0,
)
)
dim_constraints.add(
1
< 90 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 180 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 90
)
dim_constraints.add(
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 2 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 1
> 1
)
dim_constraints.add(
60
* (FloorDiv(s0, (FloorDiv(s0, 2))))
* (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv(s0, (FloorDiv(s0, 2))) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60 * (FloorDiv(s0, (FloorDiv(s0, 2))))
> 1
)
dim_constraints.add(
Ne(
60 * (FloorDiv(s0, 2)) * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv(s0, 2) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60 * (FloorDiv(s0, 2)),
0,
)
)
dim_constraints.add(
90 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 180 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 90
> 1
)
dim_constraints.add(
60 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60
> 1
)
dim_constraints.add(
Ne(
60 * (FloorDiv(s0, 2)) * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv(s0, 2) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60 * (FloorDiv(s0, 2)),
3 * (FloorDiv(s0, 2)),
)
)
dim_constraints.add(
60 * (FloorDiv(s0, 2)) * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv(s0, 2) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60 * (FloorDiv(s0, 2))
> 0
)
dim_constraints.add(
60 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60
> 0
)
dim_constraints.add(
Ne(
120 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 240 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 120,
0,
)
)
dim_constraints.add(
1
< 120 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 240 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 120
)
dim_constraints.add(
Ne(
120 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 240 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 120,
6,
)
)
dim_constraints.add(
120 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 240 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 120
> 0
)
dim_constraints.add(
Ne(
120 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 240 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 120,
0,
)
)
dim_constraints.add(
120 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 240 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 120
<= 2147483647
)
dim_constraints.add(
120 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 240 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 120
<= 20480
)
dim_constraints.add(
Ne(
90 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 180 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 90,
0,
)
)
dim_constraints.add(
120 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 240 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 120
> 1
)
dim_constraints.add(
90 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 180 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 90
<= 20480
)
dim_constraints.add(
60 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 120 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 60
<= 20480
)
dim_constraints.add(
Ne(
240 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 480 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 240,
0,
)
)
dim_constraints.add(Eq(6 * s5, 132))
dim_constraints.add(Eq(4, FloorDiv(s0, 2)))
dim_constraints.add(Eq(FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1, 4))
dim_constraints.add(
Ne(
64 * (FloorDiv(s0, 2)) * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 128 * FloorDiv(s0, 2) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 64 * (FloorDiv(s0, 2)),
0,
)
)
dim_constraints.add(
1
< 64 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 128 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 64
)
dim_constraints.add(
64 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 128 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 64
<= 2147483647
)
dim_constraints.add(
64 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 128 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 64
> 1
)
dim_constraints.add(
62 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 124 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 62
<= 2147483647
)
dim_constraints.add(
Ne(
62 * (FloorDiv(s0, 2)) * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 124 * FloorDiv(s0, 2) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 62 * (FloorDiv(s0, 2)),
0,
)
)
dim_constraints.add(
1
< 62 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 124 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 62
)
dim_constraints.add(Ne(3 * (FloorDiv(s0, 2)), 3))
dim_constraints.add(Ne(3 * (FloorDiv(s0, 2)), 3))
dim_constraints.add(Eq(FloorDiv(s0, 2), 4))
dim_constraints.add(Eq(4, FloorDiv(s0, 2)))
dim_constraints.add(Eq(FloorDiv(s0, 2), 4))
dim_constraints.add(FloorDiv((FloorDiv(s1, 2) - 1), 8) - 1 >= 3)
dim_constraints.add(
64 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 384 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 576
<= 2147483647
)
dim_constraints.add(FloorDiv((FloorDiv(s1, 2) - 1), 8) - 3 >= 0)
dim_constraints.add(FloorDiv((FloorDiv(s1, 2) - 1), 8) - 3 >= 1)
dim_constraints.add(
Ne(
64 * (FloorDiv(s0, 2)) * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 384 * FloorDiv(s0, 2) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 576 * (FloorDiv(s0, 2)),
0,
)
)
dim_constraints.add(Ne(FloorDiv((FloorDiv(s1, 2) - 1), 8) - 3, 1))
dim_constraints.add(
Ne(
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 6 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 9,
1,
)
)
dim_constraints.add(
Ne(
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 6 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 9,
0,
)
)
dim_constraints.add(
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 6 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 9
>= 0
)
dim_constraints.add(Ne(FloorDiv((FloorDiv(s1, 2) - 1), 8) - 3, 0))
dim_constraints.add(
1
< 64 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 384 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 576
)
dim_constraints.add(Ne(FloorDiv((FloorDiv(s1, 2) - 1), 8) - 3, 1))
dim_constraints.add(
Ne(
64 * (FloorDiv(s0, 2)) * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 384 * FloorDiv(s0, 2) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 576 * (FloorDiv(s0, 2)),
256,
)
)
dim_constraints.add(
Eq(
64
* (
Mod(
(FloorDiv(s0, 2)) * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 6 * FloorDiv(s0, 2) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 9 * (FloorDiv(s0, 2)),
4,
)
),
0,
)
)
dim_constraints.add(
Eq(
FloorDiv(s0, 2),
FloorDiv(
(
(FloorDiv(s0, 2)) * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 6 * FloorDiv(s0, 2) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 9 * (FloorDiv(s0, 2))
),
4,
),
)
)
dim_constraints.add(
Eq(
FloorDiv(
(
(FloorDiv(s0, 2)) * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 6 * FloorDiv(s0, 2) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 9 * (FloorDiv(s0, 2))
),
4,
),
FloorDiv(s0, 2),
)
)
dim_constraints.add(
Ne(64 * (Mod(FloorDiv((FloorDiv(s1, 2) - 1), 8) + 1, 4)), 0)
)
dim_constraints.add(
Eq(
64
* (
Mod(
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 6 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 1,
4,
)
),
0,
)
)
dim_constraints.add(
64 * (FloorDiv(s0, 2)) * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 384 * FloorDiv(s0, 2) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 576 * (FloorDiv(s0, 2))
> 0
)
dim_constraints.add(
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 6 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 9
>= 1
)
dim_constraints.add(
Eq(
64 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 384 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 576,
256,
)
)
dim_constraints.add(
60 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 360 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 540
<= 2147483647
)
dim_constraints.add(
Ne(
60 * (FloorDiv(s0, 2)) * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 360 * FloorDiv(s0, 2) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 540 * (FloorDiv(s0, 2)),
0,
)
)
dim_constraints.add(
1
< 60 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 360 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 540
)
dim_constraints.add(
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 6 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 9
<= 2147483647
)
dim_constraints.add(
Ne(
(FloorDiv(s0, 2)) * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 6 * FloorDiv(s0, 2) * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 9 * (FloorDiv(s0, 2)),
0,
)
)
dim_constraints.add(
1
< (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 6 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 9
)
dim_constraints.add(
(FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 6 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 9
> 1
)
dim_constraints.add(
60 * (FloorDiv((FloorDiv(s1, 2) - 1), 8)) ** 2
- 360 * FloorDiv((FloorDiv(s1, 2) - 1), 8)
+ 540
> 1
)
dim_constraints.add(s0 >= 2)
dim_constraints.add(s1 >= 2)
dim_constraints.add(s6 >= 2)
dim_constraints.add(s5 >= 2)
dim_constraints.solve()
self.assertEqual(
dim_constraints._static_results,
{
"L['c'].size()[0] == 8",
"L['d'].size()[0] == 8",
"L['a'].size()[2] == 96",
"L['f'].size()[1] == 1",
"L['a'].size()[3] == 96",
"L['b'].size()[2] == 3",
"L['b'].size()[1] == 22",
"L['b'].size()[0] == 8",
"L['a'].size()[1] == 22",
"L['a'].size()[0] == 8",
},
)
self.assertEqual(
dim_constraints._dynamic_results,
{
"2 <= L['c'].size()[1]",
"L['d'].size()[1] == L['c'].size()[1]",
"L['e'].size()[1] == L['c'].size()[1]",
},
)
| TestDimConstraints |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.