language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | facebook__pyre-check | stubs/integration_test/fixture_source/integration_test/overrides.py | {
"start": 303,
"end": 547
} | class ____(Iterable[T]):
def __iter__(self):
return source()
def issue_with_direct_call_of_subclass(mi: MyIterable[int]):
eval(mi.__iter__())
def no_issue_with_iterable_call(mi: Iterable[int]):
eval(mi.__iter__())
| MyIterable |
python | django__django | tests/admin_views/test_history_view.py | {
"start": 1894,
"end": 4541
} | class ____(AdminSeleniumTestCase):
available_apps = ["admin_views"] + AdminSeleniumTestCase.available_apps
def setUp(self):
self.superuser = User.objects.create_superuser(
username="super",
password="secret",
email="super@example.com",
)
for i in range(1, 1101):
LogEntry.objects.log_actions(
self.superuser.pk,
[self.superuser],
CHANGE,
change_message=f"Changed something {i}",
)
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin:index"),
)
def test_pagination(self):
from selenium.webdriver.common.by import By
user_history_url = reverse("admin:auth_user_history", args=(self.superuser.pk,))
self.selenium.get(self.live_server_url + user_history_url)
paginator = self.selenium.find_element(By.CSS_SELECTOR, ".paginator")
self.assertEqual(paginator.tag_name, "nav")
labelledby = paginator.get_attribute("aria-labelledby")
description = self.selenium.find_element(By.CSS_SELECTOR, "#%s" % labelledby)
self.assertHTMLEqual(
description.get_attribute("outerHTML"),
'<h2 id="pagination" class="visually-hidden">Pagination user entries</h2>',
)
self.assertTrue(paginator.is_displayed())
aria_current_link = paginator.find_elements(By.CSS_SELECTOR, "[aria-current]")
self.assertEqual(len(aria_current_link), 1)
# The current page.
current_page_link = aria_current_link[0]
self.assertEqual(current_page_link.get_attribute("aria-current"), "page")
self.assertEqual(current_page_link.get_attribute("href"), "")
self.assertIn("%s entries" % LogEntry.objects.count(), paginator.text)
self.assertIn(str(Paginator.ELLIPSIS), paginator.text)
self.assertEqual(current_page_link.text, "1")
# The last page.
last_page_link = self.selenium.find_element(By.XPATH, "//ul/li[last()]/a")
self.assertTrue(last_page_link.text, "20")
# Select the second page.
pages = paginator.find_elements(By.TAG_NAME, "a")
second_page_link = pages[1]
self.assertEqual(second_page_link.text, "2")
second_page_link.click()
self.assertIn("?p=2", self.selenium.current_url)
rows = self.selenium.find_elements(By.CSS_SELECTOR, "#change-history tbody tr")
self.assertIn("Changed something 101", rows[0].text)
self.assertIn("Changed something 200", rows[-1].text)
| SeleniumTests |
python | openai__openai-python | src/openai/types/fine_tuning/reinforcement_hyperparameters.py | {
"start": 240,
"end": 1426
} | class ____(BaseModel):
batch_size: Union[Literal["auto"], int, None] = None
"""Number of examples in each batch.
A larger batch size means that model parameters are updated less frequently, but
with lower variance.
"""
compute_multiplier: Union[Literal["auto"], float, None] = None
"""
Multiplier on amount of compute used for exploring search space during training.
"""
eval_interval: Union[Literal["auto"], int, None] = None
"""The number of training steps between evaluation runs."""
eval_samples: Union[Literal["auto"], int, None] = None
"""Number of evaluation samples to generate per training step."""
learning_rate_multiplier: Union[Literal["auto"], float, None] = None
"""Scaling factor for the learning rate.
A smaller learning rate may be useful to avoid overfitting.
"""
n_epochs: Union[Literal["auto"], int, None] = None
"""The number of epochs to train the model for.
An epoch refers to one full cycle through the training dataset.
"""
reasoning_effort: Optional[Literal["default", "low", "medium", "high"]] = None
"""Level of reasoning effort."""
| ReinforcementHyperparameters |
python | kamyu104__LeetCode-Solutions | Python/count-connected-components-in-lcm-graph.py | {
"start": 781,
"end": 1522
} | class ____(object):
def countComponents(self, nums, threshold):
"""
:type nums: List[int]
:type threshold: int
:rtype: int
"""
uf = UnionFind(threshold)
lookup = [-1]*threshold
result = len(nums)
for x in nums:
if x-1 >= threshold:
continue
for i in xrange(x, threshold+1, x):
if lookup[i-1] == -1:
lookup[i-1] = x-1
continue
if uf.union_set(lookup[i-1], x-1):
result -= 1
if i == x:
break
return result
# Time: O(n + tlogt), t = threshold
# Space: O(t)
# union find, number theory
| Solution |
python | python__mypy | mypyc/ir/func_ir.py | {
"start": 8024,
"end": 15944
} | class ____:
"""Intermediate representation of a function with contextual information.
Unlike FuncDecl, this includes the IR of the body (basic blocks).
"""
def __init__(
self,
decl: FuncDecl,
arg_regs: list[Register],
blocks: list[BasicBlock],
line: int = -1,
traceback_name: str | None = None,
) -> None:
# Declaration of the function, including the signature
self.decl = decl
# Registers for all the arguments to the function
self.arg_regs = arg_regs
# Body of the function
self.blocks = blocks
self.decl.line = line
# The name that should be displayed for tracebacks that
# include this function. Function will be omitted from
# tracebacks if None.
self.traceback_name = traceback_name
@property
def line(self) -> int:
return self.decl.line
@property
def args(self) -> Sequence[RuntimeArg]:
return self.decl.sig.args
@property
def ret_type(self) -> RType:
return self.decl.sig.ret_type
@property
def class_name(self) -> str | None:
return self.decl.class_name
@property
def sig(self) -> FuncSignature:
return self.decl.sig
@property
def name(self) -> str:
return self.decl.name
@property
def fullname(self) -> str:
return self.decl.fullname
@property
def id(self) -> str:
return self.decl.id
@property
def internal(self) -> bool:
return self.decl.internal
def cname(self, names: NameGenerator) -> str:
return self.decl.cname(names)
def __repr__(self) -> str:
if self.class_name:
return f"<FuncIR {self.class_name}.{self.name}>"
else:
return f"<FuncIR {self.name}>"
def serialize(self) -> JsonDict:
# We don't include blocks in the serialized version
return {
"decl": self.decl.serialize(),
"line": self.line,
"traceback_name": self.traceback_name,
}
@classmethod
def deserialize(cls, data: JsonDict, ctx: DeserMaps) -> FuncIR:
return FuncIR(
FuncDecl.deserialize(data["decl"], ctx), [], [], data["line"], data["traceback_name"]
)
INVALID_FUNC_DEF: Final = FuncDef("<INVALID_FUNC_DEF>", [], Block([]))
def all_values(args: list[Register], blocks: list[BasicBlock]) -> list[Value]:
"""Return the set of all values that may be initialized in the blocks.
This omits registers that are only read.
"""
values: list[Value] = list(args)
seen_registers = set(args)
for block in blocks:
for op in block.ops:
if not isinstance(op, ControlOp):
if isinstance(op, (Assign, AssignMulti)):
if op.dest not in seen_registers:
values.append(op.dest)
seen_registers.add(op.dest)
elif op.is_void:
continue
else:
# If we take the address of a register, it might get initialized.
if (
isinstance(op, LoadAddress)
and isinstance(op.src, Register)
and op.src not in seen_registers
):
values.append(op.src)
seen_registers.add(op.src)
values.append(op)
return values
def all_values_full(args: list[Register], blocks: list[BasicBlock]) -> list[Value]:
"""Return set of all values that are initialized or accessed."""
values: list[Value] = list(args)
seen_registers = set(args)
for block in blocks:
for op in block.ops:
for source in op.sources():
# Look for uninitialized registers that are accessed. Ignore
# non-registers since we don't allow ops outside basic blocks.
if isinstance(source, Register) and source not in seen_registers:
values.append(source)
seen_registers.add(source)
if not isinstance(op, ControlOp):
if isinstance(op, (Assign, AssignMulti)):
if op.dest not in seen_registers:
values.append(op.dest)
seen_registers.add(op.dest)
elif op.is_void:
continue
else:
values.append(op)
return values
_ARG_KIND_TO_INSPECT: Final = {
ArgKind.ARG_POS: inspect.Parameter.POSITIONAL_OR_KEYWORD,
ArgKind.ARG_OPT: inspect.Parameter.POSITIONAL_OR_KEYWORD,
ArgKind.ARG_STAR: inspect.Parameter.VAR_POSITIONAL,
ArgKind.ARG_NAMED: inspect.Parameter.KEYWORD_ONLY,
ArgKind.ARG_STAR2: inspect.Parameter.VAR_KEYWORD,
ArgKind.ARG_NAMED_OPT: inspect.Parameter.KEYWORD_ONLY,
}
# Sentinel indicating a value that cannot be represented in a text signature.
_NOT_REPRESENTABLE = object()
def get_text_signature(fn: FuncIR, *, bound: bool = False) -> str | None:
"""Return a text signature in CPython's internal doc format, or None
if the function's signature cannot be represented.
"""
parameters = []
mark_self = (fn.class_name is not None) and (fn.decl.kind != FUNC_STATICMETHOD) and not bound
sig = fn.decl.bound_sig if bound and fn.decl.bound_sig is not None else fn.decl.sig
# Pre-scan for end of positional-only parameters.
# This is needed to handle signatures like 'def foo(self, __x)', where mypy
# currently sees 'self' as being positional-or-keyword and '__x' as positional-only.
pos_only_idx = -1
for idx, arg in enumerate(sig.args):
if arg.pos_only and arg.kind in (ArgKind.ARG_POS, ArgKind.ARG_OPT):
pos_only_idx = idx
for idx, arg in enumerate(sig.args):
if arg.name.startswith(("__bitmap", "__mypyc")):
continue
kind = (
inspect.Parameter.POSITIONAL_ONLY
if idx <= pos_only_idx
else _ARG_KIND_TO_INSPECT[arg.kind]
)
default: object = inspect.Parameter.empty
if arg.optional:
default = _find_default_argument(arg.name, fn.blocks)
if default is _NOT_REPRESENTABLE:
# This default argument cannot be represented in a __text_signature__
return None
curr_param = inspect.Parameter(arg.name, kind, default=default)
parameters.append(curr_param)
if mark_self:
# Parameter.__init__/Parameter.replace do not accept $
curr_param._name = f"${arg.name}" # type: ignore[attr-defined]
mark_self = False
return f"{fn.name}{inspect.Signature(parameters)}"
def _find_default_argument(name: str, blocks: list[BasicBlock]) -> object:
# Find assignment inserted by gen_arg_defaults. Assumed to be the first assignment.
for block in blocks:
for op in block.ops:
if isinstance(op, Assign) and op.dest.name == name:
return _extract_python_literal(op.src)
return _NOT_REPRESENTABLE
def _extract_python_literal(value: Value) -> object:
if isinstance(value, Integer):
if is_none_rprimitive(value.type):
return None
val = value.numeric_value()
if is_bool_rprimitive(value.type):
return bool(val)
return val
elif isinstance(value, Float):
return value.value
elif isinstance(value, LoadLiteral):
return value.value
elif isinstance(value, Box):
return _extract_python_literal(value.src)
elif isinstance(value, TupleSet):
items = tuple(_extract_python_literal(item) for item in value.items)
if any(itm is _NOT_REPRESENTABLE for itm in items):
return _NOT_REPRESENTABLE
return items
return _NOT_REPRESENTABLE
| FuncIR |
python | openai__openai-python | src/openai/types/audio/transcription_text_delta_event.py | {
"start": 249,
"end": 564
} | class ____(BaseModel):
token: Optional[str] = None
"""The token that was used to generate the log probability."""
bytes: Optional[List[int]] = None
"""The bytes that were used to generate the log probability."""
logprob: Optional[float] = None
"""The log probability of the token."""
| Logprob |
python | apache__airflow | airflow-core/tests/unit/utils/test_file.py | {
"start": 3366,
"end": 11412
} | class ____:
@pytest.fixture
def test_dir(self, tmp_path):
# create test tree with symlinks
source = os.path.join(tmp_path, "folder")
target = os.path.join(tmp_path, "symlink")
py_file = os.path.join(source, "hello_world.py")
ignore_file = os.path.join(tmp_path, ".airflowignore")
os.mkdir(source)
os.symlink(source, target)
# write ignore files
with open(ignore_file, "w") as f:
f.write("folder")
# write sample pyfile
with open(py_file, "w") as f:
f.write("print('hello world')")
return tmp_path
def test_find_path_from_directory_regex_ignore(self):
should_ignore = [
"test_invalid_cron.py",
"test_invalid_param.py",
"test_ignore_this.py",
]
files = find_path_from_directory(TEST_DAGS_FOLDER, ".airflowignore")
assert files
assert all(os.path.basename(file) not in should_ignore for file in files)
def test_find_path_from_directory_glob_ignore(self):
should_ignore = {
"should_ignore_this.py",
"test_explicit_ignore.py",
"test_invalid_cron.py",
"test_invalid_param.py",
"test_ignore_this.py",
"test_prev_dagrun_dep.py",
"test_nested_dag.py",
".airflowignore",
}
should_not_ignore = {
"test_on_kill.py",
"test_negate_ignore.py",
"test_dont_ignore_this.py",
"test_nested_negate_ignore.py",
"test_explicit_dont_ignore.py",
}
actual_files = list(find_path_from_directory(TEST_DAGS_FOLDER, ".airflowignore_glob", "glob"))
assert actual_files
assert all(os.path.basename(file) not in should_ignore for file in actual_files)
actual_included_filenames = set(
[os.path.basename(f) for f in actual_files if os.path.basename(f) in should_not_ignore]
)
assert actual_included_filenames == should_not_ignore, (
f"actual_included_filenames: {pformat(actual_included_filenames)}\nexpected_included_filenames: {pformat(should_not_ignore)}"
)
def test_find_path_from_directory_respects_symlinks_regexp_ignore(self, test_dir):
ignore_list_file = ".airflowignore"
found = list(find_path_from_directory(test_dir, ignore_list_file))
assert os.path.join(test_dir, "symlink", "hello_world.py") in found
assert os.path.join(test_dir, "folder", "hello_world.py") not in found
def test_find_path_from_directory_respects_symlinks_glob_ignore(self, test_dir):
ignore_list_file = ".airflowignore"
found = list(find_path_from_directory(test_dir, ignore_list_file, ignore_file_syntax="glob"))
assert os.path.join(test_dir, "symlink", "hello_world.py") in found
assert os.path.join(test_dir, "folder", "hello_world.py") not in found
def test_find_path_from_directory_fails_on_recursive_link(self, test_dir):
# add a recursive link
recursing_src = os.path.join(test_dir, "folder2", "recursor")
recursing_tgt = os.path.join(test_dir, "folder2")
os.mkdir(recursing_tgt)
os.symlink(recursing_tgt, recursing_src)
ignore_list_file = ".airflowignore"
error_message = (
f"Detected recursive loop when walking DAG directory {test_dir}: "
f"{Path(recursing_tgt).resolve()} has appeared more than once."
)
with pytest.raises(RuntimeError, match=error_message):
list(find_path_from_directory(test_dir, ignore_list_file, ignore_file_syntax="glob"))
def test_might_contain_dag_with_default_callable(self):
file_path_with_dag = os.path.join(TEST_DAGS_FOLDER, "test_scheduler_dags.py")
assert file_utils.might_contain_dag(file_path=file_path_with_dag, safe_mode=True)
@conf_vars({("core", "might_contain_dag_callable"): "unit.utils.test_file.might_contain_dag"})
def test_might_contain_dag(self):
"""Test might_contain_dag_callable"""
file_path_with_dag = os.path.join(TEST_DAGS_FOLDER, "test_scheduler_dags.py")
# There is a DAG defined in the file_path_with_dag, however, the might_contain_dag_callable
# returns False no matter what, which is used to test might_contain_dag_callable actually
# overrides the default function
assert not file_utils.might_contain_dag(file_path=file_path_with_dag, safe_mode=True)
# With safe_mode is False, the user defined callable won't be invoked
assert file_utils.might_contain_dag(file_path=file_path_with_dag, safe_mode=False)
def test_get_modules(self):
file_path = os.path.join(TEST_DAGS_FOLDER, "test_imports.py")
modules = list(file_utils.iter_airflow_imports(file_path))
assert len(modules) == 4
assert "airflow.utils" in modules
assert "airflow.decorators" in modules
assert "airflow.models" in modules
assert "airflow.sensors" in modules
# this one is a local import, we don't want it.
assert "airflow.local_import" not in modules
# this one is in a comment, we don't want it
assert "airflow.in_comment" not in modules
# we don't want imports under conditions
assert "airflow.if_branch" not in modules
assert "airflow.else_branch" not in modules
def test_get_modules_from_invalid_file(self):
file_path = os.path.join(TEST_DAGS_FOLDER, "README.md") # just getting a non-python file
# should not error
modules = list(file_utils.iter_airflow_imports(file_path))
assert len(modules) == 0
def test_list_py_file_paths(self, test_zip_path):
detected_files = set()
expected_files = set()
# No_dags is empty, _invalid_ is ignored by .airflowignore
ignored_files = {
"no_dags.py",
"should_ignore_this.py",
"test_explicit_ignore.py",
"test_invalid_cron.py",
"test_invalid_dup_task.py",
"test_ignore_this.py",
"test_invalid_param.py",
"test_invalid_param2.py",
"test_invalid_param3.py",
"test_invalid_param4.py",
"test_nested_dag.py",
"test_imports.py",
"test_nested_negate_ignore.py",
"file_no_airflow_dag.py", # no_dag test case in test_zip folder
"test.py", # no_dag test case in test_zip_module folder
"__init__.py",
}
for root, _, files in os.walk(TEST_DAG_FOLDER):
for file_name in files:
if file_name.endswith((".py", ".zip")):
if file_name not in ignored_files:
expected_files.add(f"{root}/{file_name}")
detected_files = set(list_py_file_paths(TEST_DAG_FOLDER))
assert detected_files == expected_files, (
f"Detected files mismatched expected files:\ndetected_files: {pformat(detected_files)}\nexpected_files: {pformat(expected_files)}"
)
@pytest.mark.parametrize(
("edge_filename", "expected_modification"),
[
("test_dag.py", "unusual_prefix_mocked_path_hash_sha1_test_dag"),
("test-dag.py", "unusual_prefix_mocked_path_hash_sha1_test_dag"),
("test-dag-1.py", "unusual_prefix_mocked_path_hash_sha1_test_dag_1"),
("test-dag_1.py", "unusual_prefix_mocked_path_hash_sha1_test_dag_1"),
("test-dag.dev.py", "unusual_prefix_mocked_path_hash_sha1_test_dag_dev"),
("test_dag.prod.py", "unusual_prefix_mocked_path_hash_sha1_test_dag_prod"),
],
)
def test_get_unique_dag_module_name(edge_filename, expected_modification):
with mock.patch("hashlib.sha1") as mocked_sha1:
mocked_sha1.return_value.hexdigest.return_value = "mocked_path_hash_sha1"
modify_module_name = file_utils.get_unique_dag_module_name(edge_filename)
assert modify_module_name == expected_modification
| TestListPyFilesPath |
python | tensorflow__tensorflow | tensorflow/python/distribute/remote_mirrored_strategy_eager_test.py | {
"start": 1574,
"end": 2348
} | class ____(
multi_worker_test_base.SingleWorkerTestBaseEager,
strategy_test_lib.RemoteSingleWorkerMirroredStrategyBase):
def _get_num_gpus(self):
return len(get_gpus())
def testNumReplicasInSync(self, distribution):
self._testNumReplicasInSync(distribution)
def testMinimizeLoss(self, distribution):
self._testMinimizeLoss(distribution)
def testDeviceScope(self, distribution):
self._testDeviceScope(distribution)
def testMakeInputFnIteratorWithDataset(self, distribution):
self._testMakeInputFnIteratorWithDataset(distribution)
def testMakeInputFnIteratorWithCallable(self, distribution):
self._testMakeInputFnIteratorWithCallable(distribution)
if __name__ == "__main__":
test.main()
| RemoteSingleWorkerMirroredStrategyEager |
python | lazyprogrammer__machine_learning_examples | rl2/atari/dqn_tf.py | {
"start": 1024,
"end": 1793
} | class ____:
def __init__(self):
with tf.variable_scope("image_transformer"):
self.input_state = tf.placeholder(shape=[210, 160, 3], dtype=tf.uint8)
self.output = tf.image.rgb_to_grayscale(self.input_state)
self.output = tf.image.crop_to_bounding_box(self.output, 34, 0, 160, 160)
self.output = tf.image.resize_images(
self.output,
[IM_SIZE, IM_SIZE],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
self.output = tf.squeeze(self.output)
def transform(self, state, sess=None):
sess = sess or tf.get_default_session()
return sess.run(self.output, { self.input_state: state })
def update_state(state, obs_small):
return np.append(state[:,:,1:], np.expand_dims(obs_small, 2), axis=2)
| ImageTransformer |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/sparse_ops/sparse_matmul_op_test.py | {
"start": 4688,
"end": 6790
} | class ____(test.TestCase):
def _testGradients(self, tr_a, tr_b, sp_a, sp_b, a_dtype, b_dtype, delta,
name):
with self.cached_session():
a = constant_op.constant(
RandMatrix(
3, 2, tr_a, round_bfloat=True), dtype=dtypes.float32)
b = constant_op.constant(
RandMatrix(
2, 4, tr_b, round_bfloat=True), dtype=dtypes.float32)
tf_a = math_ops.cast(a, a_dtype) if a_dtype != dtypes.float32 else a
tf_b = math_ops.cast(b, b_dtype) if b_dtype != dtypes.float32 else b
m = math_ops.matmul(
tf_a,
tf_b,
name=name,
transpose_a=tr_a,
transpose_b=tr_b,
a_is_sparse=sp_a,
b_is_sparse=sp_b)
err = (
gradient_checker.compute_gradient_error(
a, [2, 3] if tr_a else [3, 2],
m, [3, 4],
x_init_value=self.evaluate(a),
delta=delta) + gradient_checker.compute_gradient_error(
b, [4, 2] if tr_b else [2, 4],
m, [3, 4],
x_init_value=self.evaluate(b),
delta=delta))
self.assertLessEqual(err, delta / 2.)
@test_util.run_deprecated_v1
def testGradientInput(self):
for tr_a in [True, False]:
for tr_b in [True, False]:
for sp_a in [True, False]:
for sp_b in [True, False]:
for a_dtype in (dtypes.float32, dtypes.bfloat16):
for b_dtype in (dtypes.float32, dtypes.bfloat16):
# Note: bfloat16 only has 7 mantissa bits, versus float32 with
# 10. Hence, we shift by 2 bits to pass the test.
if a_dtype == dtypes.bfloat16 and b_dtype == dtypes.bfloat16:
delta = 1 / 16.
else:
delta = 1 / 64.
name = "sparse_matmul_%s_%s_%s_%s" % (tr_a, tr_b, sp_a, sp_b)
self._testGradients(tr_a, tr_b, sp_a, sp_b, a_dtype, b_dtype,
delta, name)
if __name__ == "__main__":
test.main()
| MatMulGradientTest |
python | sqlalchemy__sqlalchemy | test/orm/test_transaction.py | {
"start": 48080,
"end": 55564
} | class ____(_LocalFixture):
__sparse_driver_backend__ = True
@testing.requires.savepoints
def test_savepoint_rollback(self):
User = self.classes.User
s = fixture_session()
u1 = User(name="ed")
u2 = User(name="jack")
s.add_all([u1, u2])
nt1 = s.begin_nested()
u3 = User(name="wendy")
u4 = User(name="foo")
u1.name = "edward"
u2.name = "jackward"
s.add_all([u3, u4])
eq_(
s.query(User.name).order_by(User.id).all(),
[("edward",), ("jackward",), ("wendy",), ("foo",)],
)
nt1.rollback()
assert u1.name == "ed"
assert u2.name == "jack"
eq_(s.query(User.name).order_by(User.id).all(), [("ed",), ("jack",)])
s.commit()
assert u1.name == "ed"
assert u2.name == "jack"
eq_(s.query(User.name).order_by(User.id).all(), [("ed",), ("jack",)])
@testing.requires.savepoints
def test_savepoint_delete(self):
User = self.classes.User
s = fixture_session()
u1 = User(name="ed")
s.add(u1)
s.commit()
eq_(s.query(User).filter_by(name="ed").count(), 1)
s.begin_nested()
s.delete(u1)
s.commit()
eq_(s.query(User).filter_by(name="ed").count(), 0)
s.commit()
@testing.requires.savepoints
def test_savepoint_commit(self):
User = self.classes.User
s = fixture_session()
u1 = User(name="ed")
u2 = User(name="jack")
s.add_all([u1, u2])
nt1 = s.begin_nested()
u3 = User(name="wendy")
u4 = User(name="foo")
u1.name = "edward"
u2.name = "jackward"
s.add_all([u3, u4])
eq_(
s.query(User.name).order_by(User.id).all(),
[("edward",), ("jackward",), ("wendy",), ("foo",)],
)
nt1.commit()
def go():
assert u1.name == "edward"
assert u2.name == "jackward"
eq_(
s.query(User.name).order_by(User.id).all(),
[("edward",), ("jackward",), ("wendy",), ("foo",)],
)
self.assert_sql_count(testing.db, go, 1)
s.commit()
eq_(
s.query(User.name).order_by(User.id).all(),
[("edward",), ("jackward",), ("wendy",), ("foo",)],
)
@testing.requires.savepoints
def test_savepoint_rollback_collections(self):
User, Address = self.classes.User, self.classes.Address
s = fixture_session()
u1 = User(name="ed", addresses=[Address(email_address="foo")])
s.add(u1)
s.commit()
u1.name = "edward"
u1.addresses.append(Address(email_address="bar"))
nt1 = s.begin_nested()
u2 = User(name="jack", addresses=[Address(email_address="bat")])
s.add(u2)
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name="edward",
addresses=[
Address(email_address="foo"),
Address(email_address="bar"),
],
),
User(name="jack", addresses=[Address(email_address="bat")]),
],
)
nt1.rollback()
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name="edward",
addresses=[
Address(email_address="foo"),
Address(email_address="bar"),
],
)
],
)
s.commit()
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name="edward",
addresses=[
Address(email_address="foo"),
Address(email_address="bar"),
],
)
],
)
@testing.requires.savepoints
def test_savepoint_commit_collections(self):
User, Address = self.classes.User, self.classes.Address
s = fixture_session()
u1 = User(name="ed", addresses=[Address(email_address="foo")])
s.add(u1)
s.commit()
u1.name = "edward"
u1.addresses.append(Address(email_address="bar"))
s.begin_nested()
u2 = User(name="jack", addresses=[Address(email_address="bat")])
s.add(u2)
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name="edward",
addresses=[
Address(email_address="foo"),
Address(email_address="bar"),
],
),
User(name="jack", addresses=[Address(email_address="bat")]),
],
)
s.commit()
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name="edward",
addresses=[
Address(email_address="foo"),
Address(email_address="bar"),
],
),
User(name="jack", addresses=[Address(email_address="bat")]),
],
)
s.commit()
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name="edward",
addresses=[
Address(email_address="foo"),
Address(email_address="bar"),
],
),
User(name="jack", addresses=[Address(email_address="bat")]),
],
)
@testing.requires.savepoints
def test_expunge_pending_on_rollback(self):
User = self.classes.User
sess = fixture_session()
sess.begin_nested()
u2 = User(name="newuser")
sess.add(u2)
assert u2 in sess
sess.rollback()
assert u2 not in sess
@testing.requires.savepoints
def test_update_deleted_on_rollback(self):
User = self.classes.User
s = fixture_session()
u1 = User(name="ed")
s.add(u1)
s.commit()
s.begin_nested()
s.delete(u1)
assert u1 in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
@testing.requires.savepoints_w_release
def test_savepoint_lost_still_runs(self):
User = self.classes.User
s = fixture_session()
trans = s.begin_nested()
s.connection()
u1 = User(name="ed")
s.add(u1)
# kill off the transaction
nested_trans = trans._connections[self.bind][1]
nested_trans._do_commit()
is_(s.get_nested_transaction(), trans)
with expect_warnings("nested transaction already deassociated"):
# this previously would raise
# "savepoint "sa_savepoint_1" does not exist", however as of
# #5327 the savepoint already knows it's inactive
s.rollback()
assert u1 not in s.new
is_(trans._state, _session.CLOSED)
is_not(s.get_transaction(), trans)
s.connection()
is_(s.get_transaction()._state, _session.ACTIVE)
is_(s.get_transaction().nested, False)
is_(s.get_transaction()._parent, None)
| SavepointTest |
python | zarr-developers__zarr-python | tests/test_dtype/test_npy/test_bytes.py | {
"start": 3168,
"end": 5515
} | class ____(BaseTestZDType):
test_cls = VariableLengthBytes
valid_dtype = (np.dtype("|O"),)
invalid_dtype = (
np.dtype(np.int8),
np.dtype(np.float64),
np.dtype("|U10"),
)
valid_json_v2 = ({"name": "|O", "object_codec_id": "vlen-bytes"},)
valid_json_v3 = ("variable_length_bytes",)
invalid_json_v2 = (
"|S",
"|U10",
"|f8",
)
invalid_json_v3 = (
{"name": "fixed_length_ascii", "configuration": {"length_bits": 0}},
{"name": "numpy.fixed_length_ascii", "configuration": {"length_bits": "invalid"}},
)
scalar_v2_params = (
(VariableLengthBytes(), ""),
(VariableLengthBytes(), "YWI="),
(VariableLengthBytes(), "YWJjZA=="),
)
scalar_v3_params = (
(VariableLengthBytes(), ""),
(VariableLengthBytes(), "YWI="),
(VariableLengthBytes(), "YWJjZA=="),
)
cast_value_params = (
(VariableLengthBytes(), "", b""),
(VariableLengthBytes(), "ab", b"ab"),
(VariableLengthBytes(), "abcdefg", b"abcdefg"),
)
invalid_scalar_params = ((VariableLengthBytes(), 1.0),)
item_size_params = (VariableLengthBytes(),)
def test_vlen_bytes_alias() -> None:
"""Test that "bytes" is an accepted alias for "variable_length_bytes" in JSON metadata"""
a = VariableLengthBytes.from_json("bytes", zarr_format=3)
b = VariableLengthBytes.from_json("variable_length_bytes", zarr_format=3)
assert a == b
@pytest.mark.parametrize(
"zdtype", [NullTerminatedBytes(length=10), RawBytes(length=10), VariableLengthBytes()]
)
def test_unstable_dtype_warning(
zdtype: NullTerminatedBytes | RawBytes | VariableLengthBytes,
) -> None:
"""
Test that we get a warning when serializing a dtype without a zarr v3 spec to json
when zarr_format is 3
"""
with pytest.warns(UnstableSpecificationWarning):
zdtype.to_json(zarr_format=3)
@pytest.mark.parametrize("zdtype_cls", [NullTerminatedBytes, RawBytes])
def test_invalid_size(zdtype_cls: type[NullTerminatedBytes] | type[RawBytes]) -> None:
"""
Test that it's impossible to create a data type that has no length
"""
length = 0
msg = f"length must be >= 1, got {length}."
with pytest.raises(ValueError, match=msg):
zdtype_cls(length=length)
| TestVariableLengthBytes |
python | sqlalchemy__sqlalchemy | test/dialect/mssql/test_engine.py | {
"start": 1059,
"end": 13854
} | class ____(fixtures.TestBase):
def test_pyodbc_connect_dsn_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url("mssql+pyodbc://mydsn")
connection = dialect.create_connect_args(u)
eq_((("dsn=mydsn;Trusted_Connection=Yes",), {}), connection)
def test_pyodbc_connect_old_style_dsn_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url("mssql+pyodbc:///?dsn=mydsn")
connection = dialect.create_connect_args(u)
eq_((("dsn=mydsn;Trusted_Connection=Yes",), {}), connection)
def test_pyodbc_connect_dsn_non_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url("mssql+pyodbc://username:password@mydsn")
connection = dialect.create_connect_args(u)
eq_((("dsn=mydsn;UID=username;PWD=password",), {}), connection)
def test_pyodbc_connect_dsn_extra(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc://username:password@mydsn/?LANGUAGE=us_"
"english&foo=bar"
)
connection = dialect.create_connect_args(u)
dsn_string = connection[0][0]
assert ";LANGUAGE=us_english" in dsn_string
assert ";foo=bar" in dsn_string
def test_pyodbc_hostname(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc://username:password@hostspec/database?driver=SQL+Server" # noqa
)
connection = dialect.create_connect_args(u)
eq_(
(
(
"DRIVER={SQL Server};Server=hostspec;Database=database;UI"
"D=username;PWD=password",
),
{},
),
connection,
)
def test_pyodbc_empty_url_no_warning(self):
dialect = pyodbc.dialect()
u = url.make_url("mssql+pyodbc://")
# no warning is emitted
dialect.create_connect_args(u)
def test_pyodbc_host_no_driver(self):
dialect = pyodbc.dialect()
u = url.make_url("mssql+pyodbc://username:password@hostspec/database")
def go():
return dialect.create_connect_args(u)
connection = assert_warnings(
go,
[
"No driver name specified; this is expected by "
"PyODBC when using DSN-less connections"
],
)
eq_(
(
(
"Server=hostspec;Database=database;UI"
"D=username;PWD=password",
),
{},
),
connection,
)
def test_pyodbc_connect_comma_port(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc://username:password@hostspec:12345/data"
"base?driver=SQL Server"
)
connection = dialect.create_connect_args(u)
eq_(
(
(
"DRIVER={SQL Server};Server=hostspec,12345;Database=datab"
"ase;UID=username;PWD=password",
),
{},
),
connection,
)
def test_pyodbc_connect_config_port(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc://username:password@hostspec/database?p"
"ort=12345&driver=SQL+Server"
)
connection = dialect.create_connect_args(u)
eq_(
(
(
"DRIVER={SQL Server};Server=hostspec;Database=database;UI"
"D=username;PWD=password;port=12345",
),
{},
),
connection,
)
def test_pyodbc_extra_connect(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc://username:password@hostspec/database?L"
"ANGUAGE=us_english&foo=bar&driver=SQL+Server"
)
connection = dialect.create_connect_args(u)
eq_(connection[1], {})
eq_(
connection[0][0]
in (
"DRIVER={SQL Server};Server=hostspec;Database=database;"
"UID=username;PWD=password;foo=bar;LANGUAGE=us_english",
"DRIVER={SQL Server};Server=hostspec;Database=database;UID="
"username;PWD=password;LANGUAGE=us_english;foo=bar",
),
True,
)
def test_pyodbc_extra_connect_azure(self):
# issue #5592
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc://@server_name/db_name?"
"driver=ODBC+Driver+17+for+SQL+Server&"
"authentication=ActiveDirectoryIntegrated"
)
connection = dialect.create_connect_args(u)
eq_(connection[1], {})
eq_(
connection[0][0]
in (
"DRIVER={ODBC Driver 17 for SQL Server};"
"Server=server_name;Database=db_name;"
"Authentication=ActiveDirectoryIntegrated",
),
True,
)
def test_pyodbc_odbc_connect(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc:///?odbc_connect=DRIVER%3D%7BSQL+Server"
"%7D%3BServer%3Dhostspec%3BDatabase%3Ddatabase"
"%3BUID%3Dusername%3BPWD%3Dpassword"
)
connection = dialect.create_connect_args(u)
eq_(
(
(
"DRIVER={SQL Server};Server=hostspec;Database=database;UI"
"D=username;PWD=password",
),
{},
),
connection,
)
def test_pyodbc_odbc_connect_with_dsn(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc:///?odbc_connect=dsn%3Dmydsn%3BDatabase"
"%3Ddatabase%3BUID%3Dusername%3BPWD%3Dpassword"
)
connection = dialect.create_connect_args(u)
eq_(
(("dsn=mydsn;Database=database;UID=username;PWD=password",), {}),
connection,
)
@testing.combinations(
(
"quoted_plus",
(
"mssql+pyodbc:///?odbc_connect=DSN%3Dmydsn%3B"
"UID%3Ded%3BPWD%3Dpass%2Bword"
),
"DSN=mydsn;UID=ed;PWD=pass+word",
("DSN=mydsn;UID=ed;PWD=pass+word",),
"",
),
(
"plus_for_space",
(
"mssql+pyodbc:///?odbc_connect=DSN%3Dmydsn%3B"
"UID%3Ded%3BPWD%3Dpass+word"
),
"DSN=mydsn;UID=ed;PWD=pass word",
("DSN=mydsn;UID=ed;PWD=pass word",),
"",
),
(
"issue_11250_breaking_change",
(
"mssql+pyodbc:///?odbc_connect=DSN%3Dmydsn%3B"
"UID%3Ded%3BPWD%3Dpass%252Bword"
),
"DSN=mydsn;UID=ed;PWD=pass%2Bword",
("DSN=mydsn;UID=ed;PWD=pass%2Bword",),
"pre-11250 would unquote_plus() to PWD=pass+word",
),
argnames="quoted_url, value_in_url_object, connection_string",
id_="iaaai",
)
def test_pyodbc_odbc_connect_with_pwd_plus(
self, quoted_url, value_in_url_object, connection_string
):
dialect = pyodbc.dialect()
u = url.make_url(quoted_url)
eq_(value_in_url_object, u.query["odbc_connect"])
connection = dialect.create_connect_args(u)
eq_(
(
(connection_string),
{},
),
connection,
)
def test_pyodbc_odbc_connect_ignores_other_values(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql+pyodbc://userdiff:passdiff@localhost/dbdiff?od"
"bc_connect=DRIVER%3D%7BSQL+Server%7D%3BServer"
"%3Dhostspec%3BDatabase%3Ddatabase%3BUID%3Duse"
"rname%3BPWD%3Dpassword"
)
connection = dialect.create_connect_args(u)
eq_(
(
(
"DRIVER={SQL Server};Server=hostspec;Database=database;UI"
"D=username;PWD=password",
),
{},
),
connection,
)
@testing.combinations(
(
"original",
(
"someuser%3BPORT%3D50001",
"some{strange}pw%3BPORT%3D50001",
"somehost%3BPORT%3D50001",
"somedb%3BPORT%3D50001",
),
(
"DRIVER={foob};Server=somehost%3BPORT%3D50001;"
"Database={somedb;PORT=50001};UID={someuser;PORT=50001};"
"PWD={some{strange}}pw;PORT=50001}",
),
),
(
"issue_8062",
(
"larry",
"{moe",
"localhost",
"mydb",
),
(
"DRIVER={foob};Server=localhost;"
"Database=mydb;UID=larry;"
"PWD={{moe}",
),
),
argnames="tokens, connection_string",
id_="iaa",
)
def test_pyodbc_token_injection(self, tokens, connection_string):
u = url.make_url("mssql+pyodbc://%s:%s@%s/%s?driver=foob" % tokens)
dialect = pyodbc.dialect()
connection = dialect.create_connect_args(u)
eq_(
(
connection_string,
{},
),
connection,
)
def test_pymssql_port_setting(self):
dialect = pymssql.dialect()
u = url.make_url("mssql+pymssql://scott:tiger@somehost/test")
connection = dialect.create_connect_args(u)
eq_(
(
[],
{
"host": "somehost",
"password": "tiger",
"user": "scott",
"database": "test",
},
),
connection,
)
u = url.make_url("mssql+pymssql://scott:tiger@somehost:5000/test")
connection = dialect.create_connect_args(u)
eq_(
(
[],
{
"host": "somehost:5000",
"password": "tiger",
"user": "scott",
"database": "test",
},
),
connection,
)
def test_pymssql_disconnect(self):
dialect = pymssql.dialect()
for error in [
"Adaptive Server connection timed out",
"Net-Lib error during Connection reset by peer",
"message 20003",
"Error 10054",
"Not connected to any MS SQL server",
"Connection is closed",
"message 20006", # Write to the server failed
"message 20017", # Unexpected EOF from the server
"message 20047", # DBPROCESS is dead or not enabled
"The server failed to resume the transaction",
]:
eq_(dialect.is_disconnect(error, None, None), True)
eq_(dialect.is_disconnect("not an error", None, None), False)
def test_pyodbc_disconnect(self):
dialect = pyodbc.dialect()
class MockDBAPIError(Exception):
pass
class MockProgrammingError(MockDBAPIError):
pass
dialect.dbapi = Mock(
Error=MockDBAPIError, ProgrammingError=MockProgrammingError
)
for error in [
MockDBAPIError(code, "[%s] some pyodbc message" % code)
for code in [
"08S01",
"01002",
"08003",
"08007",
"08S02",
"08001",
"HYT00",
"HY010",
]
] + [
MockProgrammingError(message)
for message in [
"(some pyodbc stuff) The cursor's connection has been closed.",
"(some pyodbc stuff) Attempt to use a closed connection.",
]
]:
eq_(dialect.is_disconnect(error, None, None), True)
eq_(
dialect.is_disconnect(
MockProgrammingError("Query with abc08007def failed"),
None,
None,
),
False,
)
@testing.requires.mssql_freetds
def test_bad_freetds_warning(self):
engine = engines.testing_engine()
def _bad_version(connection):
return 95, 10, 255
engine.dialect._get_server_version_info = _bad_version
assert_raises_message(
exc.SAWarning, "Unrecognized server version info", engine.connect
)
| ParseConnectTest |
python | getsentry__sentry | tests/sentry/flags/endpoints/test_logs.py | {
"start": 198,
"end": 13192
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-flag-logs"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.url = reverse(self.endpoint, args=(self.organization.id,))
@property
def features(self) -> dict[str, bool]:
return {}
def test_get(self) -> None:
model = FlagAuditLogModel(
action=0,
created_at=datetime.now(timezone.utc),
created_by="a@b.com",
created_by_type=0,
flag="hello",
organization_id=self.organization.id,
provider=PROVIDER_MAP["generic"],
tags={"commit_sha": "123"},
)
model.save()
with self.feature(self.features):
response = self.client.get(self.url)
assert response.status_code == 200
result = response.json()
assert len(result["data"]) == 1
assert result["data"][0]["action"] == "created"
assert "createdAt" in result["data"][0]
assert result["data"][0]["createdBy"] == "a@b.com"
assert result["data"][0]["createdByType"] == "email"
assert result["data"][0]["flag"] == "hello"
assert result["data"][0]["provider"] == "generic"
assert result["data"][0]["tags"] == {"commit_sha": "123"}
def test_get_no_provider(self) -> None:
model = FlagAuditLogModel(
action=0,
created_at=datetime.now(timezone.utc),
created_by="a@b.com",
created_by_type=0,
flag="hello",
organization_id=self.organization.id,
tags={"commit_sha": "123"},
)
model.save()
with self.feature(self.features):
response = self.client.get(self.url)
assert response.status_code == 200
result = response.json()
assert len(result["data"]) == 1
assert result["data"][0]["action"] == "created"
assert "createdAt" in result["data"][0]
assert result["data"][0]["createdBy"] == "a@b.com"
assert result["data"][0]["createdByType"] == "email"
assert result["data"][0]["flag"] == "hello"
assert result["data"][0]["provider"] is None
assert result["data"][0]["tags"] == {"commit_sha": "123"}
def test_get_no_created_by(self) -> None:
model = FlagAuditLogModel(
action=0,
created_at=datetime.now(timezone.utc),
created_by=None,
created_by_type=None,
flag="hello",
organization_id=self.organization.id,
tags={"commit_sha": "123"},
)
model.save()
with self.feature(self.features):
response = self.client.get(self.url)
assert response.status_code == 200
result = response.json()
assert len(result["data"]) == 1
assert result["data"][0]["action"] == "created"
assert "createdAt" in result["data"][0]
assert result["data"][0]["createdBy"] is None
assert result["data"][0]["createdByType"] is None
assert result["data"][0]["flag"] == "hello"
assert result["data"][0]["tags"] == {"commit_sha": "123"}
def test_get_filter_by_flag(self) -> None:
FlagAuditLogModel(
action=0,
created_at=datetime.now(timezone.utc),
created_by="a@b.com",
created_by_type=0,
flag="hello",
organization_id=self.organization.id,
tags={},
).save()
FlagAuditLogModel(
action=0,
created_at=datetime.now(timezone.utc),
created_by="a@b.com",
created_by_type=0,
flag="world",
organization_id=self.organization.id,
tags={},
).save()
with self.feature(self.features):
response = self.client.get(self.url + "?flag=world")
assert response.status_code == 200
result = response.json()
assert len(result["data"]) == 1
assert result["data"][0]["flag"] == "world"
response = self.client.get(self.url + "?flag=world&flag=hello")
assert response.status_code == 200
assert len(response.json()["data"]) == 2
response = self.client.get(self.url + "?flag=blahblah")
assert response.status_code == 200
assert len(response.json()["data"]) == 0
def test_get_filter_by_provider(self) -> None:
FlagAuditLogModel(
action=0,
created_at=datetime.now(timezone.utc) - timedelta(days=1),
created_by="a@b.com",
created_by_type=0,
flag="hello",
organization_id=self.organization.id,
provider=PROVIDER_MAP["statsig"],
tags={},
).save()
FlagAuditLogModel(
action=0,
created_at=datetime.now(timezone.utc),
created_by="a@b.com",
created_by_type=0,
flag="world",
organization_id=self.organization.id,
provider=PROVIDER_MAP["launchdarkly"],
tags={},
).save()
FlagAuditLogModel(
action=0,
created_at=datetime.now(timezone.utc),
created_by="a@b.com",
created_by_type=0,
flag="goodbye",
organization_id=self.organization.id,
tags={},
).save()
with self.feature(self.features):
response = self.client.get(self.url + "?provider=statsig")
assert response.status_code == 200
result = response.json()
assert len(result["data"]) == 1
assert result["data"][0]["flag"] == "hello"
response = self.client.get(self.url + "?provider=statsig&provider=launchdarkly")
assert response.status_code == 200
result = response.json()
assert len(result["data"]) == 2
assert result["data"][0]["flag"] == "hello"
assert result["data"][1]["flag"] == "world"
response = self.client.get(self.url + "?provider=unknown")
assert response.status_code == 200
result = response.json()
assert len(result["data"]) == 1
assert result["data"][0]["flag"] == "goodbye"
# Invalid provider
response = self.client.get(self.url + "?provider=blahblah")
assert response.status_code == 400
def test_get_unauthorized_organization(self) -> None:
org = self.create_organization()
url = reverse(self.endpoint, args=(org.id,))
with self.feature(self.features):
response = self.client.get(url)
assert response.status_code == 403
def test_get_stats_period(self) -> None:
model = FlagAuditLogModel(
action=0,
created_at=datetime.now(timezone.utc),
created_by="a@b.com",
created_by_type=0,
flag="hello",
organization_id=self.organization.id,
tags={"commit_sha": "123"},
)
model.save()
with self.feature(self.features):
response = self.client.get(self.url + "?statsPeriod=14d")
assert response.status_code == 200
assert len(response.json()["data"]) == 1
def test_get_start_end(self) -> None:
model = FlagAuditLogModel(
action=0,
created_at=datetime(2024, 1, 5, tzinfo=timezone.utc),
created_by="a@b.com",
created_by_type=0,
flag="hello",
organization_id=self.organization.id,
tags={"commit_sha": "123"},
)
model.save()
start = datetime(2024, 1, 4, tzinfo=timezone.utc)
end = datetime(2024, 1, 6, tzinfo=timezone.utc)
with self.feature(self.features):
response = self.client.get(
self.url + f"?start={start.timestamp()}&end={end.timestamp()}"
)
assert response.status_code == 200
assert len(response.json()["data"]) == 1
def test_get_sort(self) -> None:
FlagAuditLogModel(
action=0,
created_at=datetime.now(timezone.utc) - timedelta(days=1),
created_by="a@b.com",
created_by_type=0,
flag="hello",
organization_id=self.organization.id,
tags={"commit_sha": "123"},
).save()
FlagAuditLogModel(
action=1,
created_at=datetime.now(timezone.utc),
created_by="a@b.com",
created_by_type=0,
flag="goodbye",
organization_id=self.organization.id,
tags={},
).save()
with self.feature(self.features):
response = self.client.get(self.url + "?sort=created_at")
assert response.status_code == 200
assert len(response.json()["data"]) == 2
assert response.json()["data"][0]["flag"] == "hello"
assert response.json()["data"][1]["flag"] == "goodbye"
response = self.client.get(self.url + "?sort=-created_at")
assert response.status_code == 200
assert len(response.json()["data"]) == 2
assert response.json()["data"][0]["flag"] == "goodbye"
assert response.json()["data"][1]["flag"] == "hello"
response = self.client.get(self.url + "?sort=flag")
assert response.status_code == 200
assert len(response.json()["data"]) == 2
assert response.json()["data"][0]["flag"] == "goodbye"
assert response.json()["data"][1]["flag"] == "hello"
# Camel case
response = self.client.get(self.url + "?sort=createdAt")
assert response.status_code == 200
assert len(response.json()["data"]) == 2
assert response.json()["data"][0]["flag"] == "hello"
assert response.json()["data"][1]["flag"] == "goodbye"
response = self.client.get(self.url + "?sort=-createdAt")
assert response.status_code == 200
assert len(response.json()["data"]) == 2
assert response.json()["data"][0]["flag"] == "goodbye"
assert response.json()["data"][1]["flag"] == "hello"
# Invalid sorts
response = self.client.get(self.url + "?sort=blahblah")
assert response.status_code == 400
def test_get_sort_default_created_at(self) -> None:
FlagAuditLogModel(
action=0,
created_at=datetime.now(timezone.utc) - timedelta(days=1),
created_by="a@b.com",
created_by_type=0,
flag="hello",
organization_id=self.organization.id,
tags={"commit_sha": "123"},
).save()
FlagAuditLogModel(
action=1,
created_at=datetime.now(timezone.utc),
created_by="a@b.com",
created_by_type=0,
flag="hello",
organization_id=self.organization.id,
tags={},
).save()
with self.feature(self.features):
response = self.client.get(self.url)
assert response.status_code == 200
assert len(response.json()["data"]) == 2
assert response.json()["data"][0]["tags"].get("commit_sha") == "123"
assert response.json()["data"][1]["tags"].get("commit_sha") is None
def test_get_paginate(self) -> None:
FlagAuditLogModel(
action=0,
created_at=datetime.now(timezone.utc) - timedelta(days=1),
created_by="a@b.com",
created_by_type=0,
flag="hello",
organization_id=self.organization.id,
tags={"commit_sha": "123"},
).save()
FlagAuditLogModel(
action=1,
created_at=datetime.now(timezone.utc),
created_by="a@b.com",
created_by_type=0,
flag="goodbye",
organization_id=self.organization.id,
tags={},
).save()
with self.feature(self.features):
response = self.client.get(self.url + "?per_page=1")
assert response.status_code == 200
assert len(response.json()["data"]) == 1
assert response.json()["data"][0]["flag"] == "hello"
response = self.client.get(self.url + "?per_page=1&cursor=1:1:0")
assert response.status_code == 200
assert len(response.json()["data"]) == 1
assert response.json()["data"][0]["flag"] == "goodbye"
response = self.client.get(self.url + "?per_page=1&cursor=1:2:0")
assert response.status_code == 200
assert len(response.json()["data"]) == 0
| OrganizationFlagLogIndexEndpointTestCase |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType28.py | {
"start": 2930,
"end": 3038
} | class ____(Contra_TA[Contra_TA[Contra_TA[T_co]]]): ...
Ts = TypeVarTuple("Ts")
| ContraToContraToContra_WithTA |
python | huggingface__transformers | src/transformers/models/aimv2/modular_aimv2.py | {
"start": 13235,
"end": 13280
} | class ____(LlamaRMSNorm):
pass
| Aimv2RMSNorm |
python | realpython__materials | python-absolute-value/sample_code.py | {
"start": 95,
"end": 313
} | class ____:
def __init__(self, *coordinates):
self.coordinates = coordinates
def __abs__(self):
origin = [0] * len(self.coordinates)
return math.dist(origin, self.coordinates)
| VectorBound |
python | rapidsai__cudf | python/cudf/cudf/core/udf/masked_typing.py | {
"start": 13028,
"end": 13529
} | class ____(AbstractTemplate):
"""
Typing for float(Masked)
returns the result of calling "float" on the input
TODO: retains the validity of the input rather than
raising as in float(pd.NA)
"""
def generic(self, args, kws):
if isinstance(args[0], MaskedType):
# following numpy convention np.dtype(float) -> dtype('float64')
return nb_signature(MaskedType(types.float64), args[0])
@cuda_decl_registry.register_global(int)
| MaskedScalarFloatCast |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/cholesky_op_test.py | {
"start": 8136,
"end": 11928
} | class ____(test.TestCase):
_backprop_block_size = 16
def getShapes(self, shapeList):
return ((elem, int(np.floor(1.2 * elem))) for elem in shapeList)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testSmallMatrices(self):
np.random.seed(0)
shapes = self.getShapes([1, 2, 10])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float32, dtypes_lib.float64))
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testSmallMatricesComplex(self):
np.random.seed(0)
shapes = self.getShapes([1, 2, 10])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex64, dtypes_lib.complex128))
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testOneBlockMatrices(self):
np.random.seed(0)
shapes = self.getShapes([self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes,
dtypes=(dtypes_lib.float32, dtypes_lib.float64),
scalar_test=True)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testTwoBlockMatrixFloat(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float32,), scalar_test=True)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testTwoBlockMatrixDouble(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float64,), scalar_test=True)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testTwoBlockMatrixComplexFloat(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex64,), scalar_test=True)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testTwoBlockMatrixComplexDouble(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex128,), scalar_test=True)
def _runOneTest(self, shape, dtype, batch, scalar_test):
if dtype == dtypes_lib.float64:
tol = 1e-5
elif dtype == dtypes_lib.complex128:
tol = 5e-5
else:
tol = 5e-3
epsilon = np.finfo(dtype.as_numpy_dtype).eps
delta = epsilon**(1.0 / 3.0)
def RandomInput():
a = np.random.randn(shape[0], shape[1]).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
a += 1j * np.random.randn(shape[0], shape[1]).astype(
dtype.as_numpy_dtype)
return a
def Compute(x):
# Turn the random matrix x into a Hermitian matrix by
# computing the quadratic form x * x^H.
a = test_util.matmul_without_tf32(
x, math_ops.conj(array_ops.matrix_transpose(x))) / shape[0]
if batch:
a = array_ops.tile(array_ops.expand_dims(a, 0), [2, 1, 1])
# Finally take the cholesky decomposition of the Hermitian matrix.
c = linalg_ops.cholesky(a)
if scalar_test:
# Reduce to a single scalar output to speed up test.
c = math_ops.reduce_mean(c)
return c
theoretical, numerical = gradient_checker_v2.compute_gradient(
Compute, [RandomInput()], delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
def runFiniteDifferences(self,
shapes,
dtypes=(dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.complex64, dtypes_lib.complex128),
scalar_test=False):
for shape_ in shapes:
for dtype_ in dtypes:
for batch_ in False, True:
self._runOneTest(shape_, dtype_, batch_, scalar_test)
| CholeskyGradTest |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/llm_checker/base.py | {
"start": 2194,
"end": 6786
} | class ____(Chain):
"""Chain for question-answering with self-verification.
Example:
```python
from langchain_openai import OpenAI
from langchain_classic.chains import LLMCheckerChain
model = OpenAI(temperature=0.7)
checker_chain = LLMCheckerChain.from_llm(model)
```
"""
question_to_checked_assertions_chain: SequentialChain
llm: BaseLanguageModel | None = None
"""[Deprecated] LLM wrapper to use."""
create_draft_answer_prompt: PromptTemplate = CREATE_DRAFT_ANSWER_PROMPT
"""[Deprecated]"""
list_assertions_prompt: PromptTemplate = LIST_ASSERTIONS_PROMPT
"""[Deprecated]"""
check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT
"""[Deprecated]"""
revised_answer_prompt: PromptTemplate = REVISED_ANSWER_PROMPT
"""[Deprecated] Prompt to use when questioning the documents."""
input_key: str = "query"
output_key: str = "result"
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def _raise_deprecation(cls, values: dict) -> Any:
if "llm" in values:
warnings.warn(
"Directly instantiating an LLMCheckerChain with an llm is deprecated. "
"Please instantiate with question_to_checked_assertions_chain "
"or using the from_llm class method.",
stacklevel=5,
)
if (
"question_to_checked_assertions_chain" not in values
and values["llm"] is not None
):
question_to_checked_assertions_chain = (
_load_question_to_checked_assertions_chain(
values["llm"],
values.get(
"create_draft_answer_prompt",
CREATE_DRAFT_ANSWER_PROMPT,
),
values.get("list_assertions_prompt", LIST_ASSERTIONS_PROMPT),
values.get("check_assertions_prompt", CHECK_ASSERTIONS_PROMPT),
values.get("revised_answer_prompt", REVISED_ANSWER_PROMPT),
)
)
values["question_to_checked_assertions_chain"] = (
question_to_checked_assertions_chain
)
return values
@property
def input_keys(self) -> list[str]:
"""Return the singular input key."""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return the singular output key."""
return [self.output_key]
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.input_key]
output = self.question_to_checked_assertions_chain(
{"question": question},
callbacks=_run_manager.get_child(),
)
return {self.output_key: output["revised_statement"]}
@property
def _chain_type(self) -> str:
return "llm_checker_chain"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
create_draft_answer_prompt: PromptTemplate = CREATE_DRAFT_ANSWER_PROMPT,
list_assertions_prompt: PromptTemplate = LIST_ASSERTIONS_PROMPT,
check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT,
revised_answer_prompt: PromptTemplate = REVISED_ANSWER_PROMPT,
**kwargs: Any,
) -> LLMCheckerChain:
"""Create an LLMCheckerChain from a language model.
Args:
llm: a language model
create_draft_answer_prompt: prompt to create a draft answer
list_assertions_prompt: prompt to list assertions
check_assertions_prompt: prompt to check assertions
revised_answer_prompt: prompt to revise the answer
**kwargs: additional arguments
"""
question_to_checked_assertions_chain = (
_load_question_to_checked_assertions_chain(
llm,
create_draft_answer_prompt,
list_assertions_prompt,
check_assertions_prompt,
revised_answer_prompt,
)
)
return cls(
question_to_checked_assertions_chain=question_to_checked_assertions_chain,
**kwargs,
)
| LLMCheckerChain |
python | openai__openai-python | src/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py | {
"start": 2314,
"end": 3320
} | class ____(TypedDict, total=False):
type: Required[Literal["semantic_vad"]]
"""Type of turn detection, `semantic_vad` to turn on Semantic VAD."""
create_response: bool
"""
Whether or not to automatically generate a response when a VAD stop event
occurs.
"""
eagerness: Literal["low", "medium", "high", "auto"]
"""Used only for `semantic_vad` mode.
The eagerness of the model to respond. `low` will wait longer for the user to
continue speaking, `high` will respond more quickly. `auto` is the default and
is equivalent to `medium`. `low`, `medium`, and `high` have max timeouts of 8s,
4s, and 2s respectively.
"""
interrupt_response: bool
"""
Whether or not to automatically interrupt any ongoing response with output to
the default conversation (i.e. `conversation` of `auto`) when a VAD start event
occurs.
"""
RealtimeTranscriptionSessionAudioInputTurnDetectionParam: TypeAlias = Union[ServerVad, SemanticVad]
| SemanticVad |
python | getsentry__sentry | src/sentry/hybridcloud/services/organizationmember_mapping/impl.py | {
"start": 818,
"end": 4369
} | class ____(OrganizationMemberMappingService):
def upsert_mapping(
self,
*,
organization_id: int,
organizationmember_id: int,
mapping: RpcOrganizationMemberMappingUpdate,
) -> RpcOrganizationMemberMapping:
def apply_update(orm_mapping: OrganizationMemberMapping) -> None:
adding_user = orm_mapping.user_id is None and mapping.user_id is not None
orm_mapping.role = mapping.role
orm_mapping.user_id = mapping.user_id
orm_mapping.email = mapping.email
orm_mapping.inviter_id = mapping.inviter_id
orm_mapping.invite_status = mapping.invite_status
orm_mapping.organizationmember_id = organizationmember_id
orm_mapping.save()
if adding_user:
try:
user = orm_mapping.user
except User.DoesNotExist:
return
if user is not None:
for outbox in user.outboxes_for_update():
outbox.save()
orm_mapping: OrganizationMemberMapping = OrganizationMemberMapping(
organization_id=organization_id
)
try:
with outbox_context(
transaction.atomic(using=router.db_for_write(OrganizationMemberMapping))
):
orm_mapping = (
self._find_organization_member(
organization_id=organization_id,
organizationmember_id=organizationmember_id,
)
or orm_mapping
)
apply_update(orm_mapping)
return serialize_org_member_mapping(orm_mapping)
except IntegrityError as e:
# Stale user id, which will happen if a cascading deletion on the user has not reached the region.
# This is "safe" since the upsert here should be a no-op.
if "fk_auth_user" in str(e):
if "inviter_id" in str(e):
mapping.inviter_id = None
else:
mapping.user_id = None
else:
existing = self._find_organization_member(
organization_id=organization_id,
organizationmember_id=organizationmember_id,
)
if existing is None:
raise
else:
orm_mapping = existing
with outbox_context(
transaction.atomic(using=router.db_for_write(OrganizationMemberMapping))
):
apply_update(orm_mapping)
return serialize_org_member_mapping(orm_mapping)
def _find_organization_member(
self,
organization_id: int,
organizationmember_id: int,
) -> OrganizationMemberMapping | None:
return OrganizationMemberMapping.objects.filter(
organization_id=organization_id, organizationmember_id=organizationmember_id
).first()
def delete(
self,
*,
organization_id: int,
organizationmember_id: int,
) -> None:
org_member_map = self._find_organization_member(
organization_id=organization_id,
organizationmember_id=organizationmember_id,
)
if org_member_map:
with unguarded_write(using=router.db_for_write(OrganizationMemberMapping)):
org_member_map.delete()
| DatabaseBackedOrganizationMemberMappingService |
python | kamyu104__LeetCode-Solutions | Python/find-all-good-indices.py | {
"start": 42,
"end": 592
} | class ____(object):
def goodIndices(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
left = [1]*len(nums)
for i in xrange(1, len(nums)-1):
if nums[i] <= nums[i-1]:
left[i] = left[i-1]+1
right = [1]*len(nums)
for i in reversed(xrange(1, len(nums)-1)):
if nums[i] <= nums[i+1]:
right[i] = right[i+1]+1
return [i for i in xrange(k, len(nums)-k) if min(left[i-1], right[i+1]) >= k]
| Solution |
python | networkx__networkx | networkx/algorithms/isomorphism/tests/test_vf2pp_helpers.py | {
"start": 85128,
"end": 90251
} | class ____:
edges = [
(1, 3),
(3, 2),
(3, 4),
(4, 9),
(4, 5),
(3, 9),
(5, 8),
(5, 7),
(8, 7),
(7, 6),
]
mapped = {
0: "x",
1: "a",
2: "b",
3: "c",
4: "d",
5: "e",
6: "f",
7: "g",
8: "h",
9: "i",
}
G1 = nx.DiGraph(edges)
G1.add_node(0)
G2 = nx.relabel_nodes(G1, mapping=mapped)
def test_updating(self):
G2_degree = {
n: (in_degree, out_degree)
for (n, in_degree), (_, out_degree) in zip(
self.G2.in_degree, self.G2.out_degree
)
}
gparams, sparams = _initialize_parameters(self.G1, self.G2, G2_degree)
m, m_rev, T1_out, T1_in, T1_tilde, _, T2_out, T2_in, T2_tilde, _ = sparams
# Add node to the mapping
m[4] = self.mapped[4]
m_rev[self.mapped[4]] = 4
_update_Tinout(4, self.mapped[4], gparams, sparams)
assert T1_out == {5, 9}
assert T1_in == {3}
assert T2_out == {"i", "e"}
assert T2_in == {"c"}
assert T1_tilde == {0, 1, 2, 6, 7, 8}
assert T2_tilde == {"x", "a", "b", "f", "g", "h"}
# Add node to the mapping
m[5] = self.mapped[5]
m_rev[self.mapped[5]] = 5
_update_Tinout(5, self.mapped[5], gparams, sparams)
assert T1_out == {9, 8, 7}
assert T1_in == {3}
assert T2_out == {"i", "g", "h"}
assert T2_in == {"c"}
assert T1_tilde == {0, 1, 2, 6}
assert T2_tilde == {"x", "a", "b", "f"}
# Add node to the mapping
m[6] = self.mapped[6]
m_rev[self.mapped[6]] = 6
_update_Tinout(6, self.mapped[6], gparams, sparams)
assert T1_out == {9, 8, 7}
assert T1_in == {3, 7}
assert T2_out == {"i", "g", "h"}
assert T2_in == {"c", "g"}
assert T1_tilde == {0, 1, 2}
assert T2_tilde == {"x", "a", "b"}
# Add node to the mapping
m[3] = self.mapped[3]
m_rev[self.mapped[3]] = 3
_update_Tinout(3, self.mapped[3], gparams, sparams)
assert T1_out == {9, 8, 7, 2}
assert T1_in == {7, 1}
assert T2_out == {"i", "g", "h", "b"}
assert T2_in == {"g", "a"}
assert T1_tilde == {0}
assert T2_tilde == {"x"}
# Add node to the mapping
m[0] = self.mapped[0]
m_rev[self.mapped[0]] = 0
_update_Tinout(0, self.mapped[0], gparams, sparams)
assert T1_out == {9, 8, 7, 2}
assert T1_in == {7, 1}
assert T2_out == {"i", "g", "h", "b"}
assert T2_in == {"g", "a"}
assert T1_tilde == set()
assert T2_tilde == set()
def test_restoring(self):
m = {0: "x", 3: "c", 4: "d", 5: "e", 6: "f"}
m_rev = {"x": 0, "c": 3, "d": 4, "e": 5, "f": 6}
T1_out = {2, 7, 9, 8}
T1_in = {1, 7}
T2_out = {"b", "g", "i", "h"}
T2_in = {"a", "g"}
T1_tilde = set()
T2_tilde = set()
gparams = _GraphParameters(self.G1, self.G2, {}, {}, {}, {}, {})
sparams = _StateParameters(
m, m_rev, T1_out, T1_in, T1_tilde, None, T2_out, T2_in, T2_tilde, None
)
# Remove a node from the mapping
m.pop(0)
m_rev.pop("x")
_restore_Tinout_Di(0, self.mapped[0], gparams, sparams)
assert T1_out == {2, 7, 9, 8}
assert T1_in == {1, 7}
assert T2_out == {"b", "g", "i", "h"}
assert T2_in == {"a", "g"}
assert T1_tilde == {0}
assert T2_tilde == {"x"}
# Remove a node from the mapping
m.pop(6)
m_rev.pop("f")
_restore_Tinout_Di(6, self.mapped[6], gparams, sparams)
assert T1_out == {2, 9, 8, 7}
assert T1_in == {1}
assert T2_out == {"b", "i", "h", "g"}
assert T2_in == {"a"}
assert T1_tilde == {0, 6}
assert T2_tilde == {"x", "f"}
# Remove a node from the mapping
m.pop(3)
m_rev.pop("c")
_restore_Tinout_Di(3, self.mapped[3], gparams, sparams)
assert T1_out == {9, 8, 7}
assert T1_in == {3}
assert T2_out == {"i", "h", "g"}
assert T2_in == {"c"}
assert T1_tilde == {0, 6, 1, 2}
assert T2_tilde == {"x", "f", "a", "b"}
# Remove a node from the mapping
m.pop(5)
m_rev.pop("e")
_restore_Tinout_Di(5, self.mapped[5], gparams, sparams)
assert T1_out == {9, 5}
assert T1_in == {3}
assert T2_out == {"i", "e"}
assert T2_in == {"c"}
assert T1_tilde == {0, 6, 1, 2, 8, 7}
assert T2_tilde == {"x", "f", "a", "b", "h", "g"}
# Remove a node from the mapping
m.pop(4)
m_rev.pop("d")
_restore_Tinout_Di(4, self.mapped[4], gparams, sparams)
assert T1_out == set()
assert T1_in == set()
assert T2_out == set()
assert T2_in == set()
assert T1_tilde == set(self.G1.nodes())
assert T2_tilde == set(self.G2.nodes())
| TestDiGraphTinoutUpdating |
python | wireservice__csvkit | csvkit/grep.py | {
"start": 78,
"end": 4344
} | class ____:
r"""
Given any row iterator, only return rows which pass the filter.
If 'header' is False, then all rows must pass the filter; by default, the first row will be passed
through untested.
The value of patterns may be either a sequence or a dictionary. Items in the sequence and values in the
dictionary may be strings, regular expressions, or functions. For each row in the wrapped iterator,
these values will be used as tests, and the row will only be yielded by the filter if all values pass
their corresponding tests. This behavior can be toggled so that all rows which pass any of the tests
will be yielded by specifying "any_match=True" in the constructor.
Empty values (the blank string or None) not be tested; the value in that position will not affect whether
or not the filtering reader yields a prospective row. To test for explicitly blank, use a regular
expression such as "^$" or "^\s*$"
If patterns is a dictionary, the keys can be integers identifying indices in the input rows, or, if 'header'
is True (as it is by default), they can be strings matching column names in the first row of the reader.
If patterns is a sequence, then it is assumed that they will be applied to the
equivalently positioned values in the test rows.
By specifying 'inverse=True', only rows which do not match the patterns will be passed by the filter. The header,
if there is one, will always be returned regardless of the value for 'inverse'.
"""
returned_header = False
column_names = None
def __init__(self, reader, patterns, header=True, any_match=False, inverse=False):
super().__init__()
self.reader = reader
self.header = header
if self.header:
self.column_names = next(reader)
self.any_match = any_match
self.inverse = inverse
self.patterns = standardize_patterns(self.column_names, patterns)
def __iter__(self):
return self
def __next__(self):
if self.column_names and not self.returned_header:
self.returned_header = True
return self.column_names
while True:
row = next(self.reader)
if self.test_row(row):
return row
raise StopIteration()
def test_row(self, row):
for idx, test in self.patterns.items():
try:
value = row[idx]
except IndexError:
value = ''
result = test(value)
if self.any_match:
if result:
return not self.inverse # True
else:
if not result:
return self.inverse # False
if self.any_match:
return self.inverse # False
return not self.inverse # True
def standardize_patterns(column_names, patterns):
"""
Given patterns in any of the permitted input forms, return a dict whose keys
are column indices and whose values are functions which return a boolean value whether the value passes.
If patterns is a dictionary and any of its keys are values in column_names, the returned dictionary will
have those keys replaced with the integer position of that value in column_names
"""
try:
# Dictionary of patterns
patterns = {k: pattern_as_function(v) for k, v in patterns.items() if v}
if not column_names:
return patterns
p2 = {}
for k in patterns:
if k in column_names:
idx = column_names.index(k)
if idx in patterns:
raise ColumnIdentifierError("Column %s has index %i which already has a pattern." % (k, idx))
p2[idx] = patterns[k]
else:
p2[k] = patterns[k]
return p2
except AttributeError:
# Sequence of patterns
return {i: pattern_as_function(x) for i, x in enumerate(patterns)}
def pattern_as_function(obj):
# obj is function
if callable(obj):
return obj
# obj is regex object
if hasattr(obj, 'match'):
return regex_callable(obj)
# obj is string
return lambda x: obj in x
| FilteringCSVReader |
python | getsentry__sentry | src/sentry/api/endpoints/source_map_debug_blue_thunder_edition.py | {
"start": 3167,
"end": 3370
} | class ____(TypedDict):
debug_id_process: SourceMapDebugIdProcessResult
release_process: SourceMapReleaseProcessResult | None
scraping_process: SourceMapScrapingProcessResult
| SourceMapDebugFrame |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/super1.py | {
"start": 1555,
"end": 1652
} | class ____(ClassF[T]):
def __init__(self, val: T) -> None:
super().__init__(val)
| ClassG |
python | sympy__sympy | sympy/physics/quantum/spin.py | {
"start": 13414,
"end": 21415
} | class ____(UnitaryOperator):
"""Wigner D operator in terms of Euler angles.
Defines the rotation operator in terms of the Euler angles defined by
the z-y-z convention for a passive transformation. That is the coordinate
axes are rotated first about the z-axis, giving the new x'-y'-z' axes. Then
this new coordinate system is rotated about the new y'-axis, giving new
x''-y''-z'' axes. Then this new coordinate system is rotated about the
z''-axis. Conventions follow those laid out in [1]_.
Parameters
==========
alpha : Number, Symbol
First Euler Angle
beta : Number, Symbol
Second Euler angle
gamma : Number, Symbol
Third Euler angle
Examples
========
A simple example rotation operator:
>>> from sympy import pi
>>> from sympy.physics.quantum.spin import Rotation
>>> Rotation(pi, 0, pi/2)
R(pi,0,pi/2)
With symbolic Euler angles and calculating the inverse rotation operator:
>>> from sympy import symbols
>>> a, b, c = symbols('a b c')
>>> Rotation(a, b, c)
R(a,b,c)
>>> Rotation(a, b, c).inverse()
R(-c,-b,-a)
See Also
========
WignerD: Symbolic Wigner-D function
D: Wigner-D function
d: Wigner small-d function
References
==========
.. [1] Varshalovich, D A, Quantum Theory of Angular Momentum. 1988.
"""
@classmethod
def _eval_args(cls, args):
args = QExpr._eval_args(args)
if len(args) != 3:
raise ValueError('3 Euler angles required, got: %r' % args)
return args
@classmethod
def _eval_hilbert_space(cls, label):
# We consider all j values so our space is infinite.
return ComplexSpace(S.Infinity)
@property
def alpha(self):
return self.label[0]
@property
def beta(self):
return self.label[1]
@property
def gamma(self):
return self.label[2]
def _print_operator_name(self, printer, *args):
return 'R'
def _print_operator_name_pretty(self, printer, *args):
if printer._use_unicode:
return prettyForm('\N{SCRIPT CAPITAL R}' + ' ')
else:
return prettyForm("R ")
def _print_operator_name_latex(self, printer, *args):
return r'\mathcal{R}'
def _eval_inverse(self):
return Rotation(-self.gamma, -self.beta, -self.alpha)
@classmethod
def D(cls, j, m, mp, alpha, beta, gamma):
"""Wigner D-function.
Returns an instance of the WignerD class corresponding to the Wigner-D
function specified by the parameters.
Parameters
===========
j : Number
Total angular momentum
m : Number
Eigenvalue of angular momentum along axis after rotation
mp : Number
Eigenvalue of angular momentum along rotated axis
alpha : Number, Symbol
First Euler angle of rotation
beta : Number, Symbol
Second Euler angle of rotation
gamma : Number, Symbol
Third Euler angle of rotation
Examples
========
Return the Wigner-D matrix element for a defined rotation, both
numerical and symbolic:
>>> from sympy.physics.quantum.spin import Rotation
>>> from sympy import pi, symbols
>>> alpha, beta, gamma = symbols('alpha beta gamma')
>>> Rotation.D(1, 1, 0,pi, pi/2,-pi)
WignerD(1, 1, 0, pi, pi/2, -pi)
See Also
========
WignerD: Symbolic Wigner-D function
"""
return WignerD(j, m, mp, alpha, beta, gamma)
@classmethod
def d(cls, j, m, mp, beta):
"""Wigner small-d function.
Returns an instance of the WignerD class corresponding to the Wigner-D
function specified by the parameters with the alpha and gamma angles
given as 0.
Parameters
===========
j : Number
Total angular momentum
m : Number
Eigenvalue of angular momentum along axis after rotation
mp : Number
Eigenvalue of angular momentum along rotated axis
beta : Number, Symbol
Second Euler angle of rotation
Examples
========
Return the Wigner-D matrix element for a defined rotation, both
numerical and symbolic:
>>> from sympy.physics.quantum.spin import Rotation
>>> from sympy import pi, symbols
>>> beta = symbols('beta')
>>> Rotation.d(1, 1, 0, pi/2)
WignerD(1, 1, 0, 0, pi/2, 0)
See Also
========
WignerD: Symbolic Wigner-D function
"""
return WignerD(j, m, mp, 0, beta, 0)
def matrix_element(self, j, m, jp, mp):
result = self.__class__.D(
jp, m, mp, self.alpha, self.beta, self.gamma
)
result *= KroneckerDelta(j, jp)
return result
def _represent_base(self, basis, **options):
j = sympify(options.get('j', S.Half))
# TODO: move evaluation up to represent function/implement elsewhere
evaluate = sympify(options.get('doit'))
size, mvals = m_values(j)
result = zeros(size, size)
for p in range(size):
for q in range(size):
me = self.matrix_element(j, mvals[p], j, mvals[q])
if evaluate:
result[p, q] = me.doit()
else:
result[p, q] = me
return result
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(basis, **options)
def _apply_operator_uncoupled(self, state, ket, *, dummy=True, **options):
a = self.alpha
b = self.beta
g = self.gamma
j = ket.j
m = ket.m
if j.is_number:
s = []
size = m_values(j)
sz = size[1]
for mp in sz:
r = Rotation.D(j, m, mp, a, b, g)
z = r.doit()
s.append(z*state(j, mp))
return Add(*s)
else:
if dummy:
mp = Dummy('mp')
else:
mp = symbols('mp')
return Sum(Rotation.D(j, m, mp, a, b, g)*state(j, mp), (mp, -j, j))
def _apply_operator_JxKet(self, ket, **options):
return self._apply_operator_uncoupled(JxKet, ket, **options)
def _apply_operator_JyKet(self, ket, **options):
return self._apply_operator_uncoupled(JyKet, ket, **options)
def _apply_operator_JzKet(self, ket, **options):
return self._apply_operator_uncoupled(JzKet, ket, **options)
def _apply_operator_coupled(self, state, ket, *, dummy=True, **options):
a = self.alpha
b = self.beta
g = self.gamma
j = ket.j
m = ket.m
jn = ket.jn
coupling = ket.coupling
if j.is_number:
s = []
size = m_values(j)
sz = size[1]
for mp in sz:
r = Rotation.D(j, m, mp, a, b, g)
z = r.doit()
s.append(z*state(j, mp, jn, coupling))
return Add(*s)
else:
if dummy:
mp = Dummy('mp')
else:
mp = symbols('mp')
return Sum(Rotation.D(j, m, mp, a, b, g)*state(
j, mp, jn, coupling), (mp, -j, j))
def _apply_operator_JxKetCoupled(self, ket, **options):
return self._apply_operator_coupled(JxKetCoupled, ket, **options)
def _apply_operator_JyKetCoupled(self, ket, **options):
return self._apply_operator_coupled(JyKetCoupled, ket, **options)
def _apply_operator_JzKetCoupled(self, ket, **options):
return self._apply_operator_coupled(JzKetCoupled, ket, **options)
| Rotation |
python | doocs__leetcode | solution/0900-0999/0952.Largest Component Size by Common Factor/Solution.py | {
"start": 320,
"end": 698
} | class ____:
def largestComponentSize(self, nums: List[int]) -> int:
uf = UnionFind(max(nums) + 1)
for v in nums:
i = 2
while i <= v // i:
if v % i == 0:
uf.union(v, i)
uf.union(v, v // i)
i += 1
return max(Counter(uf.find(v) for v in nums).values())
| Solution |
python | django__django | tests/serializers/models/data.py | {
"start": 4634,
"end": 4729
} | class ____(models.Model):
data = models.CharField(max_length=30, primary_key=True)
| CharPKData |
python | protocolbuffers__protobuf | upb/cmake/staleness_test_lib.py | {
"start": 2111,
"end": 6136
} | class ____(object):
"""Represents the configuration for a single staleness test target."""
def __init__(self, file_list):
# Duplicate to avoid modifying our arguments.
file_list = list(file_list)
# The file list contains a few other bits of information at the end.
# This is packed by the code in build_defs.bzl.
self.target_name = file_list.pop()
self.package_name = file_list.pop()
self.pattern = file_list.pop()
self.file_list = file_list
def _GetFilePairs(config):
"""Generates the list of file pairs.
Args:
config: a Config object representing this target's config.
Returns:
A list of _FilePair objects.
"""
ret = []
has_bazel_genfiles = os.path.exists("bazel-bin")
for filename in config.file_list:
target = os.path.join(config.package_name, filename)
generated = os.path.join(config.package_name, config.pattern % filename)
if has_bazel_genfiles:
generated = os.path.join("bazel-bin", generated)
# Generated files should always exist. Blaze should guarantee this before
# we are run.
if not os.path.isfile(generated):
print("Generated file '%s' does not exist." % generated)
print("Please run this command to generate it:")
print(" bazel build %s:%s" % (config.package_name, config.target_name))
sys.exit(1)
ret.append(_FilePair(target, generated))
return ret
def _GetMissingAndStaleFiles(file_pairs):
"""Generates lists of missing and stale files.
Args:
file_pairs: a list of _FilePair objects.
Returns:
missing_files: a list of _FilePair objects representing missing files.
These target files do not exist at all.
stale_files: a list of _FilePair objects representing stale files.
These target files exist but have stale contents.
"""
missing_files = []
stale_files = []
for pair in file_pairs:
if not os.path.isfile(pair.target):
missing_files.append(pair)
continue
with open(pair.generated) as g, open(pair.target) as t:
if g.read() != t.read():
stale_files.append(pair)
return missing_files, stale_files
def _CopyFiles(file_pairs):
"""Copies all generated files to the corresponding target file.
The target files must be writable already.
Args:
file_pairs: a list of _FilePair objects that we want to copy.
"""
for pair in file_pairs:
target_dir = os.path.dirname(pair.target)
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
copyfile(pair.generated, pair.target)
def FixFiles(config):
"""Implements the --fix option: overwrites missing or out-of-date files.
Args:
config: the Config object for this test.
"""
file_pairs = _GetFilePairs(config)
missing_files, stale_files = _GetMissingAndStaleFiles(file_pairs)
_CopyFiles(stale_files + missing_files)
def CheckFilesMatch(config):
"""Checks whether each target file matches the corresponding generated file.
Args:
config: the Config object for this test.
Returns:
None if everything matches, otherwise a string error message.
"""
diff_errors = []
file_pairs = _GetFilePairs(config)
missing_files, stale_files = _GetMissingAndStaleFiles(file_pairs)
for pair in missing_files:
diff_errors.append("File %s does not exist" % pair.target)
continue
for pair in stale_files:
with open(pair.generated) as g, open(pair.target) as t:
diff = ''.join(difflib.unified_diff(g.read().splitlines(keepends=True),
t.read().splitlines(keepends=True)))
diff_errors.append("File %s is out of date:\n%s" % (pair.target, diff))
if diff_errors:
error_msg = "Files out of date!\n\n"
error_msg += "To fix run THIS command:\n"
error_msg += " bazel-bin/%s/%s --fix\n\n" % (config.package_name,
config.target_name)
error_msg += "Errors:\n"
error_msg += " " + "\n ".join(diff_errors)
return error_msg
else:
return None
| Config |
python | great-expectations__great_expectations | contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_geometry_to_be_of_type.py | {
"start": 1684,
"end": 7528
} | class ____(ColumnMapExpectation):
"""Expect values in a column to belong to one of the specified geometry types.
Args:
column (str): \
The column name.
geom_types_list (str): \
List of shapely geometry types to match against. \
e.g: Point, Polygon, LineString, MultiPoint, MultiPolygon, MultiLineString
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"point_and_polygon": [
mapping(Point(1, 2)),
mapping(Polygon([(1, 2), (3, 4), (5, 6)])),
],
"multipolygon_and_linestring": [
mapping(LineString([(0, 0), (1, 1), (1, -1)])),
mapping(MultiPolygon([Polygon([(1, 2), (3, 4), (5, 6)])])),
],
},
"tests": [
{
"title": "positive_for_point_and_polygon",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "point_and_polygon",
"geom_types_list": ["Point", "Polygon"],
"mostly": 1,
},
"out": {"success": True},
},
{
"title": "negative_for_point_and_polygon",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "point_and_polygon",
"geom_types_list": ["Point", "MultiPolygon"],
"mostly": 1.0,
},
"out": {"success": False},
},
{
"title": "positive_for_multipolygon_and_linestring",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "multipolygon_and_linestring",
"geom_types_list": ["Point", "MultiPolygon", "LineString"],
"mostly": 1,
},
"out": {"success": True},
},
{
"title": "negative_for_multipolygon_and_linestring",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "multipolygon_and_linestring",
"geom_types_list": ["Point", "Polygon"],
"mostly": 0.2,
},
"out": {"success": False},
},
],
},
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.geometry_of_type"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = (
"mostly",
"geom_types_list",
)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {
"mostly": 1,
}
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": [
"geospatial",
"hackathon-2022",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@mmi333", # Don't forget to add your github handle here!
],
"requirements": ["shapely"],
}
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: ExpectationConfiguration = None,
result: ExpectationValidationResult = None,
runtime_configuration: dict = None,
**kwargs,
) -> List[
Union[
dict,
str,
RenderedStringTemplateContent,
RenderedTableContent,
RenderedBulletListContent,
RenderedGraphContent,
Any,
]
]:
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"geom_types_list",
"mostly",
],
)
template_str = "values must belong to one of the following geometry types: $geom_types_list"
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if include_column_name:
template_str = f"$column {template_str}"
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
if __name__ == "__main__":
ExpectColumnValuesGeometryToBeOfType().print_diagnostic_checklist()
| ExpectColumnValuesGeometryToBeOfType |
python | PrefectHQ__prefect | src/prefect/utilities/pydantic.py | {
"start": 6889,
"end": 13132
} | class ____(Generic[M]):
"""
A utility for creating a Pydantic model in several steps.
Fields may be set at initialization, via attribute assignment, or at finalization
when the concrete model is returned.
Pydantic validation does not occur until finalization.
Each field can only be set once and a `ValueError` will be raised on assignment if
a field already has a value.
Example:
```python
class MyModel(BaseModel):
x: int
y: str
z: float
partial_model = PartialModel(MyModel, x=1)
partial_model.y = "two"
model = partial_model.finalize(z=3.0)
```
"""
def __init__(self, __model_cls: type[M], **kwargs: Any) -> None:
self.fields = kwargs
# Set fields first to avoid issues if `fields` is also set on the `model_cls`
# in our custom `setattr` implementation.
self.model_cls = __model_cls
for name in kwargs.keys():
self.raise_if_not_in_model(name)
def finalize(self, **kwargs: Any) -> M:
for name in kwargs.keys():
self.raise_if_already_set(name)
self.raise_if_not_in_model(name)
return self.model_cls(**self.fields, **kwargs)
def raise_if_already_set(self, name: str) -> None:
if name in self.fields:
raise ValueError(f"Field {name!r} has already been set.")
def raise_if_not_in_model(self, name: str) -> None:
if name not in self.model_cls.model_fields:
raise ValueError(f"Field {name!r} is not present in the model.")
def __setattr__(self, __name: str, __value: Any) -> None:
if __name in {"fields", "model_cls"}:
return super().__setattr__(__name, __value)
self.raise_if_already_set(__name)
self.raise_if_not_in_model(__name)
self.fields[__name] = __value
def __repr__(self) -> str:
dsp_fields = ", ".join(
f"{key}={repr(value)}" for key, value in self.fields.items()
)
return f"PartialModel(cls={self.model_cls.__name__}, {dsp_fields})"
def custom_pydantic_encoder(
type_encoders: dict[Any, Callable[[type[Any]], Any]], obj: Any
) -> Any:
# Check the class type and its superclasses for a matching encoder
for base in obj.__class__.__mro__[:-1]:
try:
encoder = type_encoders[base]
except KeyError:
continue
return encoder(obj)
else: # We have exited the for loop without finding a suitable encoder
if isinstance(obj, BaseModel):
return obj.model_dump(mode="json")
else:
return to_jsonable_python(obj)
def parse_obj_as(
type_: type[T],
data: Any,
mode: Literal["python", "json", "strings"] = "python",
) -> T:
"""Parse a given data structure as a Pydantic model via `TypeAdapter`.
Read more about `TypeAdapter` [here](https://docs.pydantic.dev/latest/concepts/type_adapter/).
Args:
type_: The type to parse the data as.
data: The data to be parsed.
mode: The mode to use for parsing, either `python`, `json`, or `strings`.
Defaults to `python`, where `data` should be a Python object (e.g. `dict`).
Returns:
The parsed `data` as the given `type_`.
Example:
Basic Usage of `parse_as`
```python
from prefect.utilities.pydantic import parse_as
from pydantic import BaseModel
class ExampleModel(BaseModel):
name: str
# parsing python objects
parsed = parse_as(ExampleModel, {"name": "Marvin"})
assert isinstance(parsed, ExampleModel)
assert parsed.name == "Marvin"
# parsing json strings
parsed = parse_as(
list[ExampleModel],
'[{"name": "Marvin"}, {"name": "Arthur"}]',
mode="json"
)
assert all(isinstance(item, ExampleModel) for item in parsed)
assert parsed[0].name == "Marvin"
assert parsed[1].name == "Arthur"
# parsing raw strings
parsed = parse_as(int, '123', mode="strings")
assert isinstance(parsed, int)
assert parsed == 123
```
"""
adapter = TypeAdapter(type_)
origin: Optional[Any] = get_origin(type_)
if origin is list and isinstance(data, dict):
values_dict: dict[Any, Any] = data
data = next(iter(values_dict.values()))
parser: Callable[[Any], T] = getattr(adapter, f"validate_{mode}")
return parser(data)
def handle_secret_render(value: object, context: dict[str, Any]) -> object:
if hasattr(value, "get_secret_value"):
return (
cast(Secret[object], value).get_secret_value()
if context.get("include_secrets", False)
else obfuscate(value)
)
elif isinstance(value, BaseModel):
# Pass the serialization mode if available in context
mode = context.get("serialization_mode", "python")
if mode == "json":
# For JSON mode with nested models, we need to recursively process fields
# because regular Pydantic models don't understand include_secrets
json_data = value.model_dump(mode="json")
for field_name in type(value).model_fields:
field_value = getattr(value, field_name)
json_data[field_name] = visit_collection(
expr=field_value,
visit_fn=partial(handle_secret_render, context=context),
return_data=True,
)
return json_data
else:
return value.model_dump(context=context)
return value
def __getattr__(name: str) -> Any:
"""
Handles imports from this module that are deprecated.
"""
if name == "JsonPatch":
warnings.warn(
"JsonPatch is deprecated and will be removed after March 2025. "
"Please use `JsonPatch` from the `jsonpatch` package instead.",
DeprecationWarning,
stacklevel=2,
)
from ._deprecated import JsonPatch
return JsonPatch
else:
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
| PartialModel |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-tplcentral/source_tplcentral/streams.py | {
"start": 6735,
"end": 8163
} | class ____(IncrementalTplcentralStream):
# https://api.3plcentral.com/rels/inventory/inventory
upstream_primary_key = "ReceiveItemId"
upstream_cursor_field = "ReceivedDate"
collection_field = "ResourceList"
page_size = 1000
def path(self, **kwargs) -> str:
return "inventory"
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
params = super().request_params(
stream_state=stream_state,
stream_slice=stream_slice,
next_page_token=next_page_token,
)
params.update(
{
"sort": self.upstream_cursor_field,
"rql": ";".join(
[
f"CustomerIdentifier.Id=={self.customer_id}",
f"FacilityIdentifier.Id=={self.facility_id}",
]
),
}
)
cursor = stream_slice.get(self.cursor_field)
if cursor:
params.update(
{
"rql": ";".join(
[
params["rql"],
f"{self.upstream_cursor_field}=ge={cursor}",
]
)
}
)
return params
| Inventory |
python | kamyu104__LeetCode-Solutions | Python/maximum-energy-boost-from-two-drinks.py | {
"start": 34,
"end": 410
} | class ____(object):
def maxEnergyBoost(self, energyDrinkA, energyDrinkB):
"""
:type energyDrinkA: List[int]
:type energyDrinkB: List[int]
:rtype: int
"""
dp = [0]*2
for i in xrange(len(energyDrinkA)):
dp = [max(dp[0]+energyDrinkA[i], dp[1]), max(dp[1]+energyDrinkB[i], dp[0])]
return max(dp)
| Solution |
python | sympy__sympy | sympy/integrals/transforms.py | {
"start": 38727,
"end": 39589
} | class ____(IntegralTransform):
"""
Base class for sine and cosine transforms.
Specify cls._kern.
"""
def a(self):
raise NotImplementedError(
"Class %s must implement a(self) but does not" % self.__class__)
def b(self):
raise NotImplementedError(
"Class %s must implement b(self) but does not" % self.__class__)
def _compute_transform(self, f, x, k, **hints):
return _sine_cosine_transform(f, x, k,
self.a(), self.b(),
self.__class__._kern,
self.__class__._name, **hints)
def _as_integral(self, f, x, k):
a = self.a()
b = self.b()
K = self.__class__._kern
return Integral(a*f*K(b*x*k), (x, S.Zero, S.Infinity))
| SineCosineTypeTransform |
python | nryoung__algorithms | tests/test_sorting.py | {
"start": 3052,
"end": 3673
} | class ____(SortingAlgorithmTestCase):
"""
Tests Quick sort in place version on a small range from 0-9
also tests partition function included in quick sort
"""
def test_quicksort_in_place(self):
self.output = quick_sort_in_place.sort(
self.input, 0,
len(self.input)-1
)
self.assertEqual(self.correct, self.output)
def test_partition(self):
self.seq = list(range(10))
self.assertIs(
quick_sort_in_place.partition(
self.seq, 0,
len(self.seq)-1, 5),
5
)
| TestQuickSortInPlace |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/library/Filters.py | {
"start": 5364,
"end": 5811
} | class ____(CtrlNode):
"""Removes anomalous spikes from data, replacing with nearby values"""
nodeName = 'DenoiseFilter'
uiTemplate = [
('radius', 'intSpin', {'value': 2, 'min': 0, 'max': 1000000}),
('threshold', 'doubleSpin', {'value': 4.0, 'min': 0, 'max': 1000})
]
def processData(self, data):
#print "DENOISE"
s = self.stateGroup.state()
return functions.denoise(data, **s)
| Denoise |
python | streamlit__streamlit | lib/streamlit/elements/widgets/button.py | {
"start": 3191,
"end": 54744
} | class ____:
@gather_metrics("button")
def button(
self,
label: str,
key: Key | None = None,
help: str | None = None,
on_click: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
type: Literal["primary", "secondary", "tertiary"] = "secondary",
icon: str | None = None,
disabled: bool = False,
use_container_width: bool | None = None,
width: Width = "content",
shortcut: str | None = None,
) -> bool:
r"""Display a button widget.
Parameters
----------
label : str
A short label explaining to the user what this button is for.
The label can optionally contain GitHub-flavored Markdown of the
following types: Bold, Italics, Strikethroughs, Inline Code, Links,
and Images. Images display like icons, with a max height equal to
the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. No two widgets may have the same key.
help : str or None
A tooltip that gets displayed when the button is hovered over. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown,
including the Markdown directives described in the ``body``
parameter of ``st.markdown``.
on_click : callable
An optional callback invoked when this button is clicked.
args : list or tuple
An optional list or tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
type : "primary", "secondary", or "tertiary"
An optional string that specifies the button type. This can be one
of the following:
- ``"primary"``: The button's background is the app's primary color
for additional emphasis.
- ``"secondary"`` (default): The button's background coordinates
with the app's background color for normal emphasis.
- ``"tertiary"``: The button is plain text without a border or
background for subtlety.
icon : str or None
An optional emoji or icon to display next to the button label. If ``icon``
is ``None`` (default), no icon is displayed. If ``icon`` is a
string, the following options are valid:
- A single-character emoji. For example, you can set ``icon="🚨"``
or ``icon="🔥"``. Emoji short codes are not supported.
- An icon from the Material Symbols library (rounded style) in the
format ``":material/icon_name:"`` where "icon_name" is the name
of the icon in snake case.
For example, ``icon=":material/thumb_up:"`` will display the
Thumb Up icon. Find additional icons in the `Material Symbols \
<https://fonts.google.com/icons?icon.set=Material+Symbols&icon.style=Rounded>`_
font library.
- ``"spinner"``: Displays a spinner as an icon.
disabled : bool
An optional boolean that disables the button if set to ``True``.
The default is ``False``.
use_container_width : bool
Whether to expand the button's width to fill its parent container.
If ``use_container_width`` is ``False`` (default), Streamlit sizes
the button to fit its contents. If ``use_container_width`` is
``True``, the width of the button matches its parent container.
In both cases, if the contents of the button are wider than the
parent container, the contents will line wrap.
.. deprecated::
``use_container_width`` is deprecated and will be removed in a
future release. For ``use_container_width=True``, use
``width="stretch"``. For ``use_container_width=False``, use
``width="content"``.
width : "content", "stretch", or int
The width of the button. This can be one of the following:
- ``"content"`` (default): The width of the button matches the
width of its content, but doesn't exceed the width of the parent
container.
- ``"stretch"``: The width of the button matches the width of the
parent container.
- An integer specifying the width in pixels: The button has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the button matches the width
of the parent container.
shortcut : str or None
An optional keyboard shortcut that triggers the button. Provide a
single alphanumeric key (e.g. ``"K"``, ``"4"``), a function key
(e.g. ``"F11"``), or a supported special key (e.g. ``"Enter"``,
``"Esc"``), optionally combined with modifiers.
Examples: ``"Ctrl+K"``, ``"Cmd+Shift+O"``, ``"Mod+Enter"``.
.. note::
The keys ``"C"`` and ``"R"`` are reserved and cannot be used,
even with modifiers. ``"Ctrl"``, ``"Cmd"``, and ``"Mod"`` are
platform-dependent: they map to ``"Command"`` (⌘) on macOS and
``"Control"`` on Windows/Linux. Punctuation keys (e.g. ``"."``,
``","``) are not currently supported.
Returns
-------
bool
True if the button was clicked on the last run of the app,
False otherwise.
Examples
--------
**Example 1: Customize your button type**
>>> import streamlit as st
>>>
>>> st.button("Reset", type="primary")
>>> if st.button("Say hello"):
... st.write("Why hello there")
... else:
... st.write("Goodbye")
>>>
>>> if st.button("Aloha", type="tertiary"):
... st.write("Ciao")
.. output::
https://doc-buton.streamlit.app/
height: 300px
**Example 2: Add icons to your button**
Although you can add icons to your buttons through Markdown, the
``icon`` parameter is a convenient and consistent alternative.
>>> import streamlit as st
>>>
>>> left, middle, right = st.columns(3)
>>> if left.button("Plain button", width="stretch"):
... left.markdown("You clicked the plain button.")
>>> if middle.button("Emoji button", icon="😃", width="stretch"):
... middle.markdown("You clicked the emoji button.")
>>> if right.button("Material button", icon=":material/mood:", width="stretch"):
... right.markdown("You clicked the Material button.")
.. output::
https://doc-button-icons.streamlit.app/
height: 220px
"""
key = to_key(key)
ctx = get_script_run_ctx()
if use_container_width is not None:
width = "stretch" if use_container_width else "content"
# Checks whether the entered button type is one of the allowed options
if type not in ["primary", "secondary", "tertiary"]:
raise StreamlitAPIException(
'The type argument to st.button must be "primary", "secondary", or "tertiary". '
f'\nThe argument passed was "{type}".'
)
return self.dg._button(
label,
key,
help,
is_form_submitter=False,
on_click=on_click,
args=args,
kwargs=kwargs,
disabled=disabled,
type=type,
icon=icon,
ctx=ctx,
width=width,
shortcut=shortcut,
)
@gather_metrics("download_button")
def download_button(
self,
label: str,
data: DownloadButtonDataType,
file_name: str | None = None,
mime: str | None = None,
key: Key | None = None,
help: str | None = None,
on_click: WidgetCallback | Literal["rerun", "ignore"] | None = "rerun",
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
type: Literal["primary", "secondary", "tertiary"] = "secondary",
icon: str | None = None,
disabled: bool = False,
use_container_width: bool | None = None,
width: Width = "content",
shortcut: str | None = None,
) -> bool:
r"""Display a download button widget.
This is useful when you would like to provide a way for your users
to download a file directly from your app.
Note that the data to be downloaded is stored in-memory while the
user is connected, so it's a good idea to keep file sizes under a
couple hundred megabytes to conserve memory.
If you want to prevent your app from rerunning when a user clicks the
download button, wrap the download button in a `fragment
<https://docs.streamlit.io/develop/concepts/architecture/fragments>`_.
Parameters
----------
label : str
A short label explaining to the user what this button is for.
The label can optionally contain GitHub-flavored Markdown of the
following types: Bold, Italics, Strikethroughs, Inline Code, Links,
and Images. Images display like icons, with a max height equal to
the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
data : str, bytes, file, or callable
The contents of the file to be downloaded.
You can also pass a ``callable`` (no-arg function) that returns
``str``, ``bytes``, or a file-like object. The callable is executed
when the user clicks the download button (deferred generation).
Streamlit commands inside the callable (for example,
``st.write("Deferred data prepared")``) are ignored and will not
render.
To prevent unnecessary recomputation, use caching when converting
your data for download. For more information, see the Example 1
below.
file_name: str
An optional string to use as the name of the file to be downloaded,
such as ``"my_file.csv"``. If not specified, the name will be
automatically generated.
mime : str or None
The MIME type of the data. If this is ``None`` (default), Streamlit
sets the MIME type depending on the value of ``data`` as follows:
- If ``data`` is a string or textual file (i.e. ``str`` or
``io.TextIOWrapper`` object), Streamlit uses the "text/plain"
MIME type.
- If ``data`` is a binary file or bytes (i.e. ``bytes``,
``io.BytesIO``, ``io.BufferedReader``, or ``io.RawIOBase``
object), Streamlit uses the "application/octet-stream" MIME type.
For more information about MIME types, see
https://www.iana.org/assignments/media-types/media-types.xhtml.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. No two widgets may have the same key.
help : str or None
A tooltip that gets displayed when the button is hovered over. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown,
including the Markdown directives described in the ``body``
parameter of ``st.markdown``.
on_click : callable, "rerun", "ignore", or None
How the button should respond to user interaction. This controls
whether or not the button triggers a rerun and if a callback
function is called. This can be one of the following values:
- ``"rerun"`` (default): The user downloads the file and the app
reruns. No callback function is called.
- ``"ignore"``: The user downloads the file and the app doesn't
rerun. No callback function is called.
- A ``callable``: The user downloads the file and app reruns. The
callable is called before the rest of the app.
- ``None``: This is same as ``on_click="rerun"``. This value exists
for backwards compatibility and shouldn't be used.
args : list or tuple
An optional list or tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
type : "primary", "secondary", or "tertiary"
An optional string that specifies the button type. This can be one
of the following:
- ``"primary"``: The button's background is the app's primary color
for additional emphasis.
- ``"secondary"`` (default): The button's background coordinates
with the app's background color for normal emphasis.
- ``"tertiary"``: The button is plain text without a border or
background for subtlety.
icon : str or None
An optional emoji or icon to display next to the button label. If ``icon``
is ``None`` (default), no icon is displayed. If ``icon`` is a
string, the following options are valid:
- A single-character emoji. For example, you can set ``icon="🚨"``
or ``icon="🔥"``. Emoji short codes are not supported.
- An icon from the Material Symbols library (rounded style) in the
format ``":material/icon_name:"`` where "icon_name" is the name
of the icon in snake case.
For example, ``icon=":material/thumb_up:"`` will display the
Thumb Up icon. Find additional icons in the `Material Symbols \
<https://fonts.google.com/icons?icon.set=Material+Symbols&icon.style=Rounded>`_
font library.
- ``"spinner"``: Displays a spinner as an icon.
disabled : bool
An optional boolean that disables the download button if set to
``True``. The default is ``False``.
use_container_width : bool
Whether to expand the button's width to fill its parent container.
If ``use_container_width`` is ``False`` (default), Streamlit sizes
the button to fit its contents. If ``use_container_width`` is
``True``, the width of the button matches its parent container.
In both cases, if the contents of the button are wider than the
parent container, the contents will line wrap.
.. deprecated::
``use_container_width`` is deprecated and will be removed in a
future release. For ``use_container_width=True``, use
``width="stretch"``. For ``use_container_width=False``, use
``width="content"``.
width : "content", "stretch", or int
The width of the download button. This can be one of the following:
- ``"content"`` (default): The width of the button matches the
width of its content, but doesn't exceed the width of the parent
container.
- ``"stretch"``: The width of the button matches the width of the
parent container.
- An integer specifying the width in pixels: The button has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the button matches the width
of the parent container.
shortcut : str or None
An optional keyboard shortcut that triggers the download button.
Provide a single alphanumeric key (e.g. ``"K"``, ``"4"``), a
function key (e.g. ``"F11"``), or a supported special key (e.g.
``"Enter"``, ``"Esc"``), optionally combined with modifiers.
Examples: ``"Ctrl+K"``, ``"Cmd+Shift+O"``, ``"Mod+Enter"``.
.. note::
The keys ``"C"`` and ``"R"`` are reserved and cannot be used,
even with modifiers. ``"Ctrl"``, ``"Cmd"``, and ``"Mod"`` are
platform-dependent: they map to ``"Command"`` (⌘) on macOS and
``"Control"`` on Windows/Linux. Punctuation keys (e.g. ``"."``,
``","``) are not currently supported.
Returns
-------
bool
True if the button was clicked on the last run of the app,
False otherwise.
Examples
--------
**Example 1: Download a dataframe as a CSV file**
When working with a large dataframe, it's recommended to fetch your
data with a cached function. When working with a download button, it's
similarly recommended to convert your data into a downloadable format
with a cached function. Caching ensures that the app reruns
efficiently.
>>> import streamlit as st
>>> import pandas as pd
>>> import numpy as np
>>>
>>> @st.cache_data
>>> def get_data():
>>> df = pd.DataFrame(
... np.random.randn(50, 20), columns=("col %d" % i for i in range(20))
... )
>>> return df
>>>
>>> @st.cache_data
>>> def convert_for_download(df):
>>> return df.to_csv().encode("utf-8")
>>>
>>> df = get_data()
>>> csv = convert_for_download(df)
>>>
>>> st.download_button(
... label="Download CSV",
... data=csv,
... file_name="data.csv",
... mime="text/csv",
... icon=":material/download:",
... )
.. output::
https://doc-download-button-csv.streamlit.app/
height: 200px
**Example 2: Download a string as a text file**
If you pass a string to the ``data`` argument, Streamlit will
automatically use the "text/plain" MIME type.
When you have a widget (like a text area) affecting the value of your
download, it's recommended to use another button to prepare the
download. In this case, use ``on_click="ignore"`` in your download
button to prevent the download button from rerunning your app. This
turns the download button into a frontend-only element that can be
nested in another button.
Without a preparation button, a user can type something into the text
area and immediately click the download button. Because a download is
initiated concurrently with the app rerun, this can create a race-like
condition where the user doesn't see the updated data in their
download.
.. important::
Even when you prevent your download button from triggering a rerun,
another widget with a pending change can still trigger a rerun. For
example, if a text area has a pending change when a user clicks a
download button, the text area will trigger a rerun.
>>> import streamlit as st
>>>
>>> message = st.text_area("Message", value="Lorem ipsum.\nStreamlit is cool.")
>>>
>>> if st.button("Prepare download"):
>>> st.download_button(
... label="Download text",
... data=message,
... file_name="message.txt",
... on_click="ignore",
... type="primary",
... icon=":material/download:",
... )
.. output::
https://doc-download-button-text.streamlit.app/
height: 250px
**Example 3: Download a file**
Use a context manager to open and read a local file on your Streamlit
server. Pass the ``io.BufferedReader`` object directly to ``data``.
Remember to specify the MIME type if you don't want the default
type of ``"application/octet-stream"`` for generic binary data. In the
example below, the MIME type is set to ``"image/png"`` for a PNG file.
>>> import streamlit as st
>>>
>>> with open("flower.png", "rb") as file:
... st.download_button(
... label="Download image",
... data=file,
... file_name="flower.png",
... mime="image/png",
... )
.. output::
https://doc-download-button-file.streamlit.app/
height: 200px
**Example 4: Generate the data on click with a callable**
Pass a function to ``data`` to generate the bytes lazily when the user
clicks the button. Streamlit commands inside this function are ignored.
>>> import streamlit as st
>>> import time
>>>
>>> def make_report():
>>> # Runs on click; Streamlit commands here won't render
>>> time.sleep(1)
>>> # st.write("Deferred data prepared") # Ignored
>>> return "col1,col2\n1,2\n3,4".encode("utf-8")
>>>
>>> st.download_button(
... label="Download report",
... data=make_report, # pass the function, don't call it
... file_name="report.csv",
... mime="text/csv",
... )
"""
ctx = get_script_run_ctx()
if use_container_width is not None:
width = "stretch" if use_container_width else "content"
if type not in ["primary", "secondary", "tertiary"]:
raise StreamlitAPIException(
'The type argument to st.download_button must be "primary", "secondary", or "tertiary". \n'
f'The argument passed was "{type}".'
)
return self._download_button(
label=label,
data=data,
file_name=file_name,
mime=mime,
key=key,
help=help,
on_click=on_click,
args=args,
kwargs=kwargs,
type=type,
icon=icon,
disabled=disabled,
ctx=ctx,
width=width,
shortcut=shortcut,
)
@gather_metrics("link_button")
def link_button(
self,
label: str,
url: str,
*,
help: str | None = None,
type: Literal["primary", "secondary", "tertiary"] = "secondary",
icon: str | None = None,
disabled: bool = False,
use_container_width: bool | None = None,
width: Width = "content",
shortcut: str | None = None,
) -> DeltaGenerator:
r"""Display a link button element.
When clicked, a new tab will be opened to the specified URL. This will
create a new session for the user if directed within the app.
Parameters
----------
label : str
A short label explaining to the user what this button is for.
The label can optionally contain GitHub-flavored Markdown of the
following types: Bold, Italics, Strikethroughs, Inline Code, Links,
and Images. Images display like icons, with a max height equal to
the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
url : str
The url to be opened on user click
help : str or None
A tooltip that gets displayed when the button is hovered over. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown,
including the Markdown directives described in the ``body``
parameter of ``st.markdown``.
type : "primary", "secondary", or "tertiary"
An optional string that specifies the button type. This can be one
of the following:
- ``"primary"``: The button's background is the app's primary color
for additional emphasis.
- ``"secondary"`` (default): The button's background coordinates
with the app's background color for normal emphasis.
- ``"tertiary"``: The button is plain text without a border or
background for subtlety.
icon : str or None
An optional emoji or icon to display next to the button label. If ``icon``
is ``None`` (default), no icon is displayed. If ``icon`` is a
string, the following options are valid:
- A single-character emoji. For example, you can set ``icon="🚨"``
or ``icon="🔥"``. Emoji short codes are not supported.
- An icon from the Material Symbols library (rounded style) in the
format ``":material/icon_name:"`` where "icon_name" is the name
of the icon in snake case.
For example, ``icon=":material/thumb_up:"`` will display the
Thumb Up icon. Find additional icons in the `Material Symbols \
<https://fonts.google.com/icons?icon.set=Material+Symbols&icon.style=Rounded>`_
font library.
- ``"spinner"``: Displays a spinner as an icon.
disabled : bool
An optional boolean that disables the link button if set to
``True``. The default is ``False``.
use_container_width : bool
Whether to expand the button's width to fill its parent container.
If ``use_container_width`` is ``False`` (default), Streamlit sizes
the button to fit its contents. If ``use_container_width`` is
``True``, the width of the button matches its parent container.
In both cases, if the contents of the button are wider than the
parent container, the contents will line wrap.
.. deprecated::
``use_container_width`` is deprecated and will be removed in a
future release. For ``use_container_width=True``, use
``width="stretch"``. For ``use_container_width=False``, use
``width="content"``.
width : "content", "stretch", or int
The width of the link button. This can be one of the following:
- ``"content"`` (default): The width of the button matches the
width of its content, but doesn't exceed the width of the parent
container.
- ``"stretch"``: The width of the button matches the width of the
parent container.
- An integer specifying the width in pixels: The button has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the button matches the width
of the parent container.
shortcut : str or None
An optional keyboard shortcut that triggers the link button.
Provide a single alphanumeric key (e.g. ``"K"``, ``"4"``), a
function key (e.g. ``"F11"``), or a supported special key (e.g.
``"Enter"``, ``"Esc"``), optionally combined with modifiers.
Examples: ``"Ctrl+K"``, ``"Cmd+Shift+O"``, ``"Mod+Enter"``.
.. note::
The keys ``"C"`` and ``"R"`` are reserved and cannot be used,
even with modifiers. ``"Ctrl"``, ``"Cmd"``, and ``"Mod"`` are
platform-dependent: they map to ``"Command"`` (⌘) on macOS and
``"Control"`` on Windows/Linux. Punctuation keys (e.g. ``"."``,
``","``) are not currently supported.
Example
-------
>>> import streamlit as st
>>>
>>> st.link_button("Go to gallery", "https://streamlit.io/gallery")
.. output::
https://doc-link-button.streamlit.app/
height: 200px
"""
# Checks whether the entered button type is one of the allowed options - either "primary" or "secondary"
if type not in ["primary", "secondary", "tertiary"]:
raise StreamlitAPIException(
'The type argument to st.link_button must be "primary", "secondary", or "tertiary". '
f'\nThe argument passed was "{type}".'
)
if use_container_width is not None:
width = "stretch" if use_container_width else "content"
return self._link_button(
label=label,
url=url,
help=help,
disabled=disabled,
type=type,
icon=icon,
width=width,
shortcut=shortcut,
)
@gather_metrics("page_link")
def page_link(
self,
page: str | Path | StreamlitPage,
*,
label: str | None = None,
icon: str | None = None,
help: str | None = None,
disabled: bool = False,
use_container_width: bool | None = None,
width: Width = "content",
query_params: QueryParamsInput | None = None,
) -> DeltaGenerator:
r"""Display a link to another page in a multipage app or to an external page.
If another page in a multipage app is specified, clicking ``st.page_link``
stops the current page execution and runs the specified page as if the
user clicked on it in the sidebar navigation.
If an external page is specified, clicking ``st.page_link`` opens a new
tab to the specified page. The current script run will continue if not
complete.
Parameters
----------
page : str, Path, or StreamlitPage
The file path (relative to the main script) or a ``StreamlitPage``
indicating the page to switch to. Alternatively, this can be the
URL to an external page (must start with "http://" or "https://").
label : str
The label for the page link. Labels are required for external pages.
The label can optionally contain GitHub-flavored Markdown of the
following types: Bold, Italics, Strikethroughs, Inline Code, Links,
and Images. Images display like icons, with a max height equal to
the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
icon : str or None
An optional emoji or icon to display next to the button label. If
``icon`` is ``None`` (default), the icon is inferred from the
``StreamlitPage`` object or no icon is displayed. If ``icon`` is a
string, the following options are valid:
- A single-character emoji. For example, you can set ``icon="🚨"``
or ``icon="🔥"``. Emoji short codes are not supported.
- An icon from the Material Symbols library (rounded style) in the
format ``":material/icon_name:"`` where "icon_name" is the name
of the icon in snake case.
For example, ``icon=":material/thumb_up:"`` will display the
Thumb Up icon. Find additional icons in the `Material Symbols \
<https://fonts.google.com/icons?icon.set=Material+Symbols&icon.style=Rounded>`_
font library.
- ``"spinner"``: Displays a spinner as an icon.
help : str or None
A tooltip that gets displayed when the link is hovered over. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown,
including the Markdown directives described in the ``body``
parameter of ``st.markdown``.
disabled : bool
An optional boolean that disables the page link if set to ``True``.
The default is ``False``.
use_container_width : bool
Whether to expand the link's width to fill its parent container.
The default is ``True`` for page links in the sidebar and ``False``
for those in the main app.
.. deprecated::
``use_container_width`` is deprecated and will be removed in a
future release. For ``use_container_width=True``, use
``width="stretch"``. For ``use_container_width=False``, use
``width="content"``.
width : "content", "stretch", or int
The width of the page-link button. This can be one of the following:
- ``"content"`` (default): The width of the button matches the
width of its content, but doesn't exceed the width of the parent
container.
- ``"stretch"``: The width of the button matches the width of the
parent container.
- An integer specifying the width in pixels: The button has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the button matches the width
of the parent container.
query_params : dict, list of tuples, or None
Query parameters to apply when navigating to the target page. This
can be a dictionary or an iterable of key-value tuples. Values can
be strings or iterables of strings (for repeated keys). When
omitted, all non-embed query parameters are cleared during navigation.
Example
-------
Consider the following example given this file structure:
>>> your-repository/
>>> ├── pages/
>>> │ ├── page_1.py
>>> │ └── page_2.py
>>> └── your_app.py
>>> import streamlit as st
>>>
>>> st.page_link("your_app.py", label="Home", icon="🏠")
>>> st.page_link("pages/page_1.py", label="Page 1", icon="1️⃣", query_params={"team": "streamlit"})
>>> st.page_link("pages/page_2.py", label="Page 2", icon="2️⃣", disabled=True)
>>> st.page_link("http://www.google.com", label="Google", icon="🌎")
The default navigation is shown here for comparison, but you can hide
the default navigation using the |client.showSidebarNavigation|_
configuration option. This allows you to create custom, dynamic
navigation menus for your apps!
.. |client.showSidebarNavigation| replace:: ``client.showSidebarNavigation``
.. _client.showSidebarNavigation: https://docs.streamlit.io/develop/api-reference/configuration/config.toml#client
.. output ::
https://doc-page-link.streamlit.app/
height: 350px
"""
if use_container_width is not None:
width = "stretch" if use_container_width else "content"
if in_sidebar(self.dg):
# Sidebar page links should always be stretch width.
width = "stretch"
return self._page_link(
page=page,
label=label,
icon=icon,
help=help,
disabled=disabled,
width=width,
query_params=query_params,
)
def _download_button(
self,
label: str,
data: DownloadButtonDataType,
file_name: str | None = None,
mime: str | None = None,
key: Key | None = None,
help: str | None = None,
on_click: WidgetCallback | Literal["rerun", "ignore"] | None = "rerun",
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
type: Literal["primary", "secondary", "tertiary"] = "secondary",
icon: str | None = None,
disabled: bool = False,
ctx: ScriptRunContext | None = None,
width: Width = "content",
shortcut: str | None = None,
) -> bool:
key = to_key(key)
on_click_callback: WidgetCallback | None = (
None
if on_click is None or on_click in {"ignore", "rerun"}
else cast("WidgetCallback", on_click)
)
normalized_shortcut: str | None = None
if shortcut is not None:
normalized_shortcut = normalize_shortcut(shortcut)
check_widget_policies(
self.dg,
key,
on_change=on_click_callback,
default_value=None,
writes_allowed=False,
)
element_id = compute_and_register_element_id(
"download_button",
user_key=key,
key_as_main_identity=True,
dg=self.dg,
label=label,
icon=icon,
file_name=file_name,
mime=mime,
help=help,
type=type,
width=width,
shortcut=normalized_shortcut,
)
if is_in_form(self.dg):
raise StreamlitAPIException(
f"`st.download_button()` can't be used in an `st.form()`.{FORM_DOCS_INFO}"
)
download_button_proto = DownloadButtonProto()
download_button_proto.id = element_id
download_button_proto.label = label
download_button_proto.default = False
download_button_proto.type = type
marshall_file(
self.dg._get_delta_path_str(), data, download_button_proto, mime, file_name
)
download_button_proto.disabled = disabled
if help is not None:
download_button_proto.help = dedent(help)
if icon is not None:
download_button_proto.icon = validate_icon_or_emoji(icon)
if on_click == "ignore":
download_button_proto.ignore_rerun = True
else:
download_button_proto.ignore_rerun = False
if normalized_shortcut is not None:
download_button_proto.shortcut = normalized_shortcut
serde = ButtonSerde()
button_state = register_widget(
download_button_proto.id,
on_change_handler=on_click_callback,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
value_type="trigger_value",
)
validate_width(width, allow_content=True)
layout_config = LayoutConfig(width=width)
self.dg._enqueue(
"download_button", download_button_proto, layout_config=layout_config
)
return button_state.value
def _link_button(
self,
label: str,
url: str,
help: str | None,
*, # keyword-only arguments:
type: Literal["primary", "secondary", "tertiary"] = "secondary",
icon: str | None = None,
disabled: bool = False,
width: Width = "content",
shortcut: str | None = None,
) -> DeltaGenerator:
link_button_proto = LinkButtonProto()
normalized_shortcut: str | None = None
if shortcut is not None:
normalized_shortcut = normalize_shortcut(shortcut)
if normalized_shortcut is not None:
# We only register the element ID if a shortcut is provide.
# The ID is required to correctly register and handle the shortcut
# on the client side.
link_button_proto.id = compute_and_register_element_id(
"link_button",
user_key=None,
key_as_main_identity=False,
dg=self.dg,
label=label,
icon=icon,
url=url,
help=help,
type=type,
width=width,
shortcut=normalized_shortcut,
)
link_button_proto.label = label
link_button_proto.url = url
link_button_proto.type = type
link_button_proto.disabled = disabled
if help is not None:
link_button_proto.help = dedent(help)
if icon is not None:
link_button_proto.icon = validate_icon_or_emoji(icon)
if normalized_shortcut is not None:
link_button_proto.shortcut = normalized_shortcut
validate_width(width, allow_content=True)
layout_config = LayoutConfig(width=width)
return self.dg._enqueue(
"link_button", link_button_proto, layout_config=layout_config
)
def _page_link(
self,
page: str | Path | StreamlitPage,
*, # keyword-only arguments:
label: str | None = None,
icon: str | None = None,
help: str | None = None,
disabled: bool = False,
width: Width = "content",
query_params: QueryParamsInput | None = None,
) -> DeltaGenerator:
page_link_proto = PageLinkProto()
if query_params:
page_link_proto.query_string = process_query_params(query_params)
validate_width(width, allow_content=True)
ctx = get_script_run_ctx()
if not ctx:
layout_config = LayoutConfig(width=width)
return self.dg._enqueue(
"page_link", page_link_proto, layout_config=layout_config
)
page_link_proto.disabled = disabled
if label is not None:
page_link_proto.label = label
if icon is not None:
page_link_proto.icon = validate_icon_or_emoji(icon)
if help is not None:
page_link_proto.help = dedent(help)
if isinstance(page, StreamlitPage):
page_link_proto.page_script_hash = page._script_hash
page_link_proto.page = page.url_path
if label is None:
page_link_proto.label = page.title
if icon is None:
page_link_proto.icon = page.icon
# Here the StreamlitPage's icon is already validated
# (using validate_icon_or_emoji) during its initialization
else:
# Convert Path to string if necessary
if isinstance(page, Path):
page = str(page)
# Handle external links:
if is_url(page):
if label is None or label == "":
raise StreamlitMissingPageLabelError()
page_link_proto.page = page
page_link_proto.external = True
layout_config = LayoutConfig(width=width)
return self.dg._enqueue(
"page_link", page_link_proto, layout_config=layout_config
)
ctx_main_script = ""
all_app_pages = {}
ctx_main_script = ctx.main_script_path
all_app_pages = ctx.pages_manager.get_pages()
main_script_directory = get_main_script_directory(ctx_main_script)
requested_page = os.path.realpath(
normalize_path_join(main_script_directory, page)
)
# Handle retrieving the page_script_hash & page
for page_data in all_app_pages.values():
full_path = page_data["script_path"]
page_name = page_data["page_name"]
url_pathname = page_data["url_pathname"]
if requested_page == full_path:
if label is None:
page_link_proto.label = page_name
page_link_proto.page_script_hash = page_data["page_script_hash"]
page_link_proto.page = url_pathname
break
if page_link_proto.page_script_hash == "":
raise StreamlitPageNotFoundError(
page=page,
main_script_directory=main_script_directory,
uses_pages_directory=bool(PagesManager.uses_pages_directory),
)
layout_config = LayoutConfig(width=width)
return self.dg._enqueue(
"page_link", page_link_proto, layout_config=layout_config
)
def _button(
self,
label: str,
key: str | None,
help: str | None,
is_form_submitter: bool,
on_click: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
type: Literal["primary", "secondary", "tertiary"] = "secondary",
icon: str | None = None,
disabled: bool = False,
ctx: ScriptRunContext | None = None,
width: Width = "content",
shortcut: str | None = None,
) -> bool:
key = to_key(key)
normalized_shortcut: str | None = None
if shortcut is not None:
normalized_shortcut = normalize_shortcut(shortcut)
check_widget_policies(
self.dg,
key,
on_click,
default_value=None,
writes_allowed=False,
enable_check_callback_rules=not is_form_submitter,
)
# Only the form submitter button needs a form ID at the moment.
form_id = current_form_id(self.dg) if is_form_submitter else ""
element_id = compute_and_register_element_id(
"form_submit_button" if is_form_submitter else "button",
user_key=key,
key_as_main_identity=True,
dg=self.dg,
label=label,
icon=icon,
help=help,
is_form_submitter=is_form_submitter,
type=type,
width=width,
shortcut=normalized_shortcut,
)
# It doesn't make sense to create a button inside a form (except
# for the "Form Submitter" button that's automatically created in
# every form). We throw an error to warn the user about this.
# We omit this check for scripts running outside streamlit, because
# they will have no script_run_ctx.
if runtime.exists():
if is_in_form(self.dg) and not is_form_submitter:
raise StreamlitAPIException(
f"`st.button()` can't be used in an `st.form()`.{FORM_DOCS_INFO}"
)
if not is_in_form(self.dg) and is_form_submitter:
raise StreamlitAPIException(
f"`st.form_submit_button()` must be used inside an `st.form()`.{FORM_DOCS_INFO}"
)
button_proto = ButtonProto()
button_proto.id = element_id
button_proto.label = label
button_proto.default = False
button_proto.is_form_submitter = is_form_submitter
button_proto.form_id = form_id
button_proto.type = type
button_proto.disabled = disabled
if help is not None:
button_proto.help = dedent(help)
if icon is not None:
button_proto.icon = validate_icon_or_emoji(icon)
if normalized_shortcut is not None:
button_proto.shortcut = normalized_shortcut
serde = ButtonSerde()
button_state = register_widget(
button_proto.id,
on_change_handler=on_click,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
value_type="trigger_value",
)
if ctx:
save_for_app_testing(ctx, element_id, button_state.value)
validate_width(width, allow_content=True)
layout_config = LayoutConfig(width=width)
self.dg._enqueue("button", button_proto, layout_config=layout_config)
return button_state.value
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
def marshall_file(
coordinates: str,
data: DownloadButtonDataType,
proto_download_button: DownloadButtonProto,
mimetype: str | None,
file_name: str | None = None,
) -> None:
# Check if data is a callable (for deferred downloads)
if callable(data):
if not runtime.exists():
# When running in "raw mode", we can't access the MediaFileManager.
proto_download_button.url = ""
return
# Register the callable for deferred execution
file_id = runtime.get_instance().media_file_mgr.add_deferred(
data,
mimetype,
coordinates,
file_name=file_name,
)
proto_download_button.deferred_file_id = file_id
proto_download_button.url = "" # No URL yet, will be generated on click
return
# Existing logic for non-callable data
data_as_bytes, inferred_mime_type = convert_data_to_bytes_and_infer_mime(
data,
unsupported_error=StreamlitAPIException(
f"Invalid binary data format: {type(data)}"
),
)
if mimetype is None:
mimetype = inferred_mime_type
if runtime.exists():
file_url = runtime.get_instance().media_file_mgr.add(
data_as_bytes,
mimetype,
coordinates,
file_name=file_name,
is_for_static_download=True,
)
else:
# When running in "raw mode", we can't access the MediaFileManager.
file_url = ""
proto_download_button.url = file_url
| ButtonMixin |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/backfills.py | {
"start": 1074,
"end": 1388
} | class ____(StrictBaseModel):
"""Object used for create backfill request."""
dag_id: str
from_date: datetime
to_date: datetime
run_backwards: bool = False
dag_run_conf: dict = {}
reprocess_behavior: ReprocessBehavior = ReprocessBehavior.NONE
max_active_runs: int = 10
| BackfillPostBody |
python | weaviate__weaviate-python-client | mock_tests/conftest.py | {
"start": 9347,
"end": 11719
} | class ____(weaviate_pb2_grpc.WeaviateServicer):
search_count = 0
tenants_count = 0
def Search(
self, request: search_get_pb2.SearchRequest, context: grpc.ServicerContext
) -> search_get_pb2.SearchReply:
if self.search_count == 0:
self.search_count += 1
context.set_code(grpc.StatusCode.INTERNAL)
context.set_details("Internal server error")
return search_get_pb2.SearchReply()
if self.search_count == 1:
self.search_count += 1
context.set_code(grpc.StatusCode.UNAVAILABLE)
context.set_details("Service is unavailable")
return search_get_pb2.SearchReply()
return search_get_pb2.SearchReply(
results=[
search_get_pb2.SearchResult(
properties=search_get_pb2.PropertiesResult(
non_ref_props=properties_pb2.Properties(
fields={"name": properties_pb2.Value(text_value="test")}
)
)
)
]
)
def TenantsGet(
self, request: tenants_pb2.TenantsGetRequest, context: ServicerContext
) -> tenants_pb2.TenantsGetReply:
if self.tenants_count == 0:
self.tenants_count += 1
context.set_code(grpc.StatusCode.INTERNAL)
context.set_details("Internal server error")
return tenants_pb2.TenantsGetReply()
if self.tenants_count == 1:
self.tenants_count += 1
context.set_code(grpc.StatusCode.UNAVAILABLE)
context.set_details("Service is unavailable")
return tenants_pb2.TenantsGetReply()
return tenants_pb2.TenantsGetReply(
tenants=[
tenants_pb2.Tenant(
name="tenant1", activity_status=tenants_pb2.TENANT_ACTIVITY_STATUS_ACTIVE
)
]
)
@pytest.fixture(scope="function")
def retries(
weaviate_client: weaviate.WeaviateClient, start_grpc_server: grpc.Server
) -> tuple[weaviate.collections.Collection, MockRetriesWeaviateService]:
service = MockRetriesWeaviateService()
weaviate_pb2_grpc.add_WeaviateServicer_to_server(service, start_grpc_server)
return weaviate_client.collections.use("RetriesCollection"), service
| MockRetriesWeaviateService |
python | tornadoweb__tornado | tornado/template.py | {
"start": 19267,
"end": 19917
} | class ____(_Node):
def __init__(self, template: Template, body: "_ChunkList") -> None:
self.template = template
self.body = body
self.line = 0
def generate(self, writer: "_CodeWriter") -> None:
writer.write_line("def _tt_execute():", self.line)
with writer.indent():
writer.write_line("_tt_buffer = []", self.line)
writer.write_line("_tt_append = _tt_buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
def each_child(self) -> Iterable["_Node"]:
return (self.body,)
| _File |
python | py-pdf__pypdf | pypdf/constants.py | {
"start": 16419,
"end": 16762
} | class ____:
"""
Page 84, PDF 1.4 reference.
Page 115, PDF 2.0 reference.
"""
SINGLE_PAGE = "/SinglePage"
ONE_COLUMN = "/OneColumn"
TWO_COLUMN_LEFT = "/TwoColumnLeft"
TWO_COLUMN_RIGHT = "/TwoColumnRight"
TWO_PAGE_LEFT = "/TwoPageLeft" # (PDF 1.5)
TWO_PAGE_RIGHT = "/TwoPageRight" # (PDF 1.5)
| PageLayouts |
python | doocs__leetcode | solution/3100-3199/3106.Lexicographically Smallest String After Operations With Constraint/Solution.py | {
"start": 0,
"end": 427
} | class ____:
def getSmallestString(self, s: str, k: int) -> str:
cs = list(s)
for i, c1 in enumerate(s):
for c2 in ascii_lowercase:
if c2 >= c1:
break
d = min(ord(c1) - ord(c2), 26 - ord(c1) + ord(c2))
if d <= k:
cs[i] = c2
k -= d
break
return "".join(cs)
| Solution |
python | astropy__astropy | astropy/visualization/stretch.py | {
"start": 28701,
"end": 29206
} | class ____(CompositeTransform, BaseStretch):
"""
A combination of two stretches.
Parameters
----------
stretch_1 : :class:`astropy.visualization.BaseStretch`
The first stretch to apply.
stretch_2 : :class:`astropy.visualization.BaseStretch`
The second stretch to apply.
"""
def __call__(self, values, clip=True, out=None):
return self.transform_2(
self.transform_1(values, clip=clip, out=out), clip=clip, out=out
)
| CompositeStretch |
python | Pylons__pyramid | tests/test_testing.py | {
"start": 22427,
"end": 22484
} | class ____(Interface):
pass
@implementer(IDummy)
| IDummy |
python | networkx__networkx | networkx/algorithms/tests/test_euler.py | {
"start": 3712,
"end": 4288
} | class ____:
def test_is_semieulerian(self):
# Test graphs with Eulerian paths but no cycles return True.
assert nx.is_semieulerian(nx.path_graph(4))
G = nx.path_graph(6, create_using=nx.DiGraph)
assert nx.is_semieulerian(G)
# Test graphs with Eulerian cycles return False.
assert not nx.is_semieulerian(nx.complete_graph(5))
assert not nx.is_semieulerian(nx.complete_graph(7))
assert not nx.is_semieulerian(nx.hypercube_graph(4))
assert not nx.is_semieulerian(nx.hypercube_graph(6))
| TestIsSemiEulerian |
python | kamyu104__LeetCode-Solutions | Python/number-of-ways-to-build-sturdy-brick-wall.py | {
"start": 94,
"end": 1308
} | class ____(object):
def buildWall(self, height, width, bricks):
"""
:type height: int
:type width: int
:type bricks: List[int]
:rtype: int
"""
MOD = 10**9+7
def backtracking(height, width, bricks, total, mask, lookup, patterns):
if mask in lookup:
return
lookup.add(mask)
if total >= width:
if total == width:
patterns.append(mask^(1<<width))
return
for x in bricks:
backtracking(height, width, bricks, total+x, mask|(1<<(total+x)), lookup, patterns)
patterns, lookup = [], set()
backtracking(height, width, bricks, 0, 0, lookup, patterns)
adj = [[j for j, r2 in enumerate(patterns) if not (r1 & r2)] for r1 in patterns]
dp = [[1]*len(patterns), [0]*len(patterns)]
for i in xrange(height-1):
dp[(i+1)%2] = [sum(dp[i%2][k] for k in adj[j]) % MOD for j in xrange(len(patterns))]
return sum(dp[(height-1)%2]) % MOD
# Time: O(p^3 * logh), p is the number of patterns, p may be up to 512
# Space: O(p^3)
# bitmask, backtracking, matrix exponentiation
| Solution |
python | google__jax | tests/experimental_rnn_test.py | {
"start": 878,
"end": 9182
} | class ____(jtu.JaxTestCase):
@jtu.sample_product(
batch_size=[1, 4],
seq_len=[1, 4],
input_size=[1, 2],
hidden_size=[1, 6],
num_layers=[1, 4],
bidirectional=[True, False],
)
@jtu.run_on_devices("cuda", "rocm")
@jax.default_matmul_precision("float32")
def test_lstm(self, batch_size: int, seq_len: int, input_size: int,
hidden_size: int, num_layers: int, bidirectional: bool):
# TODO(ruturaj4): Bidirectional doesn't quite work well with rocm.
if bidirectional and jtu.is_device_rocm():
self.skipTest("Bidirectional mode is not available for ROCm.")
num_directions = 2 if bidirectional else 1
seq_length_key, root_key = jax.random.split(jax.random.PRNGKey(0))
seq_lengths = jax.random.randint(
seq_length_key, (batch_size,), 1, seq_len, dtype=jnp.int32)
k1, k2, k3, k4 = jax.random.split(root_key, 4)
x = jax.random.normal(
k1, (batch_size, seq_len, input_size), dtype=jnp.float32)
h_0 = jax.random.normal(
k2, (num_directions * num_layers, batch_size, hidden_size),
dtype=jnp.float32)
c_0 = jax.random.normal(
k3, (num_directions * num_layers, batch_size, hidden_size),
dtype=jnp.float32)
weights = rnn.init_lstm_weight(k4, input_size, hidden_size, num_layers,
bidirectional)
def f(weights, x, h_0, c_0):
if jtu.is_device_rocm():
weights = rnn.swap_lstm_gates(weights, input_size, hidden_size, num_layers, bidirectional)
y, h, c = rnn.lstm(
x,
h_0,
c_0,
weights,
seq_lengths=seq_lengths,
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=False,
bidirectional=bidirectional)
seq_length_mask = jnp.tile(jnp.arange(seq_len, dtype=jnp.int32)[None],
[batch_size, 1]) < seq_lengths[:, None]
loss = jnp.sum(jnp.where(seq_length_mask[..., None], y, 0.))
return loss, (y, h, c)
jtu.check_grads(f, (weights, x, h_0, c_0), modes=["rev"], order=1, atol=5E-3, rtol=5E-3)
(loss, (y, h_n, c_n)), weights_grad = jax.value_and_grad(f, has_aux=True)(
weights, x, h_0, c_0)
def g(weights, x, h_0, c_0):
W_ih, W_hh, b_ih, b_hh = rnn.unpack_lstm_weights(weights, input_size,
hidden_size, num_layers,
bidirectional)
y_ref, h_n_ref, c_n_ref = rnn.lstm_ref(
x,
h_0,
c_0,
W_ih,
W_hh,
b_ih,
b_hh,
seq_lengths=seq_lengths,
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=False,
bidirectional=bidirectional)
seq_length_mask = jnp.tile(jnp.arange(seq_len, dtype=jnp.int32)[None],
[batch_size, 1]) < seq_lengths[:, None]
loss = jnp.sum(jnp.where(seq_length_mask[..., None], y_ref, 0.))
return loss, (y_ref, h_n_ref, c_n_ref)
(loss_ref, (y_ref, h_n_ref, c_n_ref)), weights_grad_ref = (
jax.value_and_grad(g, has_aux=True)(weights, x, h_0, c_0))
self.assertAllClose(weights_grad_ref, weights_grad, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(loss_ref, loss, rtol=1e-05, atol=1e-5)
np.testing.assert_allclose(y_ref, y, rtol=1e-05, atol=1e-5)
np.testing.assert_allclose(h_n_ref, h_n, rtol=1e-05, atol=1e-5)
np.testing.assert_allclose(c_n_ref, c_n, rtol=1e-05, atol=1e-5)
@jtu.sample_product(
batch_size=[1, 4],
seq_len=[1, 4],
input_size=[1, 2],
hidden_size=[1, 6],
num_layers=[1, 4],
bidirectional=[True, False],
)
def test_lstm_ref(self, batch_size: int, seq_len: int, input_size: int,
hidden_size: int, num_layers: int, bidirectional: bool):
num_directions = 2 if bidirectional else 1
seq_lengths = jax.random.randint(
jax.random.PRNGKey(0), (batch_size,), 0, seq_len, dtype=jnp.int32)
root_key = jax.random.PRNGKey(1)
k1, k2, k3, k4 = jax.random.split(root_key, 4)
x = jax.random.normal(
k1, (batch_size, seq_len, input_size), dtype=jnp.float32)
h_0 = jax.random.normal(
k2, (num_directions * num_layers, batch_size, hidden_size),
dtype=jnp.float32)
c_0 = jax.random.normal(
k3, (num_directions * num_layers, batch_size, hidden_size),
dtype=jnp.float32)
weights = rnn.init_lstm_weight(k4, input_size, hidden_size, num_layers,
bidirectional)
@partial(jax.value_and_grad, has_aux=True)
def f(weights, x, h_0, c_0):
W_ih, W_hh, b_ih, b_hh = rnn.unpack_lstm_weights(weights, input_size,
hidden_size, num_layers,
bidirectional)
y_ref, h_n_ref, c_n_ref = rnn.lstm_ref(
x,
h_0,
c_0,
W_ih,
W_hh,
b_ih,
b_hh,
seq_lengths=seq_lengths,
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=False,
bidirectional=bidirectional)
loss = jnp.sum(y_ref)
return loss, (y_ref, h_n_ref, c_n_ref)
(loss_ref, (y_ref, h_n_ref, c_n_ref)), grad_ref = f(weights, x, h_0, c_0)
self.assertFalse(np.isnan(loss_ref))
self.assertFalse(np.isnan(grad_ref).any())
self.assertEqual(y_ref.shape, (batch_size, seq_len, num_directions * hidden_size))
for i in range(batch_size):
y_padded = y_ref[i, seq_lengths[i]:]
np.testing.assert_allclose(y_padded, jnp.zeros_like(y_padded))
@jtu.run_on_devices("cuda")
def test_struct_encoding_determinism(self):
def f(k1, k2, k3, k4):
batch_size = 1
seq_len = 1
input_size = 1
hidden_size = 1
bidirectional = False
num_directions = 2 if bidirectional else 1
num_layers = 1
x = jax.random.normal(k1, (batch_size, seq_len, input_size), dtype=jnp.float32)
h_0 = jax.random.normal(
k2, (num_directions * num_layers, batch_size, hidden_size),
dtype=jnp.float32)
c_0 = jax.random.normal(
k3, (num_directions * num_layers, batch_size, hidden_size),
dtype=jnp.float32)
seq_lengths = jnp.ones((batch_size,), dtype=jnp.int32) * seq_len
weights = rnn.init_lstm_weight(k4, input_size, hidden_size, num_layers,
bidirectional)
return rnn.lstm(
x,
h_0,
c_0,
weights,
seq_lengths=seq_lengths,
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=False,
bidirectional=bidirectional)
k = jax.random.split(jax.random.PRNGKey(1), 4)
stablehlo = jax.jit(f).lower(*k).as_text("stablehlo")
self.assertIn('"\\01\\00\\00\\00\\01\\00\\00\\00\\01\\00\\00\\00\\01\\00\\00\\00\\01\\00\\00\\00\\00\\00\\00\\00\\00\\00\\00\\00\\01\\00\\00\\00@\\03\\80\\00\\00\\00\\00\\00@\\01\\00\\00\\00\\00\\00\\00"',
stablehlo)
@jtu.run_on_devices("cuda")
def test_no_workspace_overflow(self):
# Problem sizes known to cause overflows on older versions.
batch_size, max_seq_length, input_size = 256, 500, 512
num_layers, hidden_size = 1, 256
num_params = rnn.get_num_params_in_lstm(
input_size, hidden_size, num_layers, True)
x = jax.ShapeDtypeStruct(
(batch_size, max_seq_length, input_size), jnp.float32)
h_0 = jax.ShapeDtypeStruct(
(2 * num_layers, batch_size, hidden_size), jnp.float32)
c_0 = jax.ShapeDtypeStruct(
(2 * num_layers, batch_size, hidden_size), jnp.float32)
weights = jax.ShapeDtypeStruct((num_params,), jnp.float32)
seq_lengths = jax.ShapeDtypeStruct((batch_size,), jnp.int32)
fun = jax.jit(partial(
rnn.lstm, input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, dropout=0.0, bidirectional=True))
fun.lower(x, h_0, c_0, weights, seq_lengths) # Doesn't crash.
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| RnnTest |
python | keon__algorithms | tests/test_maths.py | {
"start": 15057,
"end": 16289
} | class ____(unittest.TestCase):
def test_k_three(self):
# Example which should give the answer 143
# which is the smallest possible x that
# solves the system of equations
num = [3, 7, 10]
rem = [2, 3, 3]
self.assertEqual(chinese_remainder_theorem.
solve_chinese_remainder(num, rem), 143)
def test_k_five(self):
# Example which should give the answer 3383
# which is the smallest possible x that
# solves the system of equations
num = [3, 5, 7, 11, 26]
rem = [2, 3, 2, 6, 3]
self.assertEqual(chinese_remainder_theorem.
solve_chinese_remainder(num, rem), 3383)
def test_exception_non_coprime(self):
# There should be an exception when all
# numbers in num are not pairwise coprime
num = [3, 7, 10, 14]
rem = [2, 3, 3, 1]
with self.assertRaises(Exception):
chinese_remainder_theorem.solve_chinese_remainder(num, rem)
def test_empty_lists(self):
num = []
rem = []
with self.assertRaises(Exception):
chinese_remainder_theorem.solve_chinese_remainder(num, rem)
| TestChineseRemainderSolver |
python | huggingface__transformers | src/transformers/models/zoedepth/modeling_zoedepth.py | {
"start": 23127,
"end": 27343
} | class ____(nn.Module):
def __init__(
self,
config,
n_bins,
n_attractors=16,
min_depth=1e-3,
max_depth=10,
memory_efficient=False,
):
"""
Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth)
"""
super().__init__()
self.alpha = config.attractor_alpha
self.gemma = config.attractor_gamma
self.kind = config.attractor_kind
self.n_attractors = n_attractors
self.n_bins = n_bins
self.min_depth = min_depth
self.max_depth = max_depth
self.memory_efficient = memory_efficient
# MLP to predict attractor points
in_features = mlp_dim = config.bin_embedding_dim
self.conv1 = nn.Conv2d(in_features, mlp_dim, 1, 1, 0)
self.act1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(mlp_dim, n_attractors * 2, 1, 1, 0) # x2 for linear norm
self.act2 = nn.ReLU(inplace=True)
def forward(self, x, prev_bin, prev_bin_embedding=None, interpolate=True):
"""
The forward pass of the attractor layer. This layer predicts the new bin centers based on the previous bin centers
and the attractor points (the latter are predicted by the MLP).
Args:
x (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
Feature block.
prev_bin (`torch.Tensor` of shape `(batch_size, prev_number_of_bins, height, width)`):
Previous bin centers normed.
prev_bin_embedding (`torch.Tensor`, *optional*):
Optional previous bin embeddings.
interpolate (`bool`, *optional*, defaults to `True`):
Whether to interpolate the previous bin embeddings to the size of the input features.
Returns:
`tuple[`torch.Tensor`, `torch.Tensor`]:
New bin centers normed and scaled.
"""
if prev_bin_embedding is not None:
if interpolate:
prev_bin_embedding = nn.functional.interpolate(
prev_bin_embedding, x.shape[-2:], mode="bilinear", align_corners=True
)
x = x + prev_bin_embedding
x = self.conv1(x)
x = self.act1(x)
x = self.conv2(x)
attractors = self.act2(x)
attractors = attractors + 1e-3
batch_size, _, height, width = attractors.shape
attractors = attractors.view(batch_size, self.n_attractors, 2, height, width)
# batch_size, num_attractors, 2, height, width
# note: original repo had a bug here: https://github.com/isl-org/ZoeDepth/blame/edb6daf45458569e24f50250ef1ed08c015f17a7/zoedepth/models/layers/attractor.py#L105C9-L106C50
# we include the bug to maintain compatibility with the weights
attractors_normed = attractors[:, :, 0, ...] # batch_size, batch_size*num_attractors, height, width
bin_centers = nn.functional.interpolate(prev_bin, (height, width), mode="bilinear", align_corners=True)
# note: only attractor_type = "exp" is supported here, since no checkpoints were released with other attractor types
if not self.memory_efficient:
func = {"mean": torch.mean, "sum": torch.sum}[self.kind]
# shape (batch_size, num_bins, height, width)
delta_c = func(inv_attractor(attractors_normed.unsqueeze(2) - bin_centers.unsqueeze(1)), dim=1)
else:
delta_c = torch.zeros_like(bin_centers, device=bin_centers.device)
for i in range(self.n_attractors):
# shape (batch_size, num_bins, height, width)
delta_c += inv_attractor(attractors_normed[:, i, ...].unsqueeze(1) - bin_centers)
if self.kind == "mean":
delta_c = delta_c / self.n_attractors
bin_new_centers = bin_centers + delta_c
bin_centers = (self.max_depth - self.min_depth) * bin_new_centers + self.min_depth
bin_centers, _ = torch.sort(bin_centers, dim=1)
bin_centers = torch.clip(bin_centers, self.min_depth, self.max_depth)
return bin_new_centers, bin_centers
| ZoeDepthAttractorLayer |
python | celery__celery | t/unit/events/test_state.py | {
"start": 10427,
"end": 22261
} | class ____:
def test_repr(self):
assert repr(State())
def test_pickleable(self):
state = State()
r = ev_logical_clock_ordering(state)
r.play()
assert pickle.loads(pickle.dumps(state))
def test_task_logical_clock_ordering(self):
state = State()
r = ev_logical_clock_ordering(state)
tA, tB, tC = r.uids
r.play()
now = list(state.tasks_by_time())
assert now[0][0] == tA
assert now[1][0] == tC
assert now[2][0] == tB
for _ in range(1000):
shuffle(r.uids)
tA, tB, tC = r.uids
r.rewind_with_offset(r.current_clock + 1, r.uids)
r.play()
now = list(state.tasks_by_time())
assert now[0][0] == tA
assert now[1][0] == tC
assert now[2][0] == tB
@pytest.mark.skip('TODO: not working')
def test_task_descending_clock_ordering(self):
state = State()
r = ev_logical_clock_ordering(state)
tA, tB, tC = r.uids
r.play()
now = list(state.tasks_by_time(reverse=False))
assert now[0][0] == tA
assert now[1][0] == tB
assert now[2][0] == tC
for _ in range(1000):
shuffle(r.uids)
tA, tB, tC = r.uids
r.rewind_with_offset(r.current_clock + 1, r.uids)
r.play()
now = list(state.tasks_by_time(reverse=False))
assert now[0][0] == tB
assert now[1][0] == tC
assert now[2][0] == tA
def test_get_or_create_task(self):
state = State()
task, created = state.get_or_create_task('id1')
assert task.uuid == 'id1'
assert created
task2, created2 = state.get_or_create_task('id1')
assert task2 is task
assert not created2
def test_get_or_create_worker(self):
state = State()
worker, created = state.get_or_create_worker('george@vandelay.com')
assert worker.hostname == 'george@vandelay.com'
assert created
worker2, created2 = state.get_or_create_worker('george@vandelay.com')
assert worker2 is worker
assert not created2
def test_get_or_create_worker__with_defaults(self):
state = State()
worker, created = state.get_or_create_worker(
'george@vandelay.com', pid=30,
)
assert worker.hostname == 'george@vandelay.com'
assert worker.pid == 30
assert created
worker2, created2 = state.get_or_create_worker(
'george@vandelay.com', pid=40,
)
assert worker2 is worker
assert worker2.pid == 40
assert not created2
def test_worker_online_offline(self):
r = ev_worker_online_offline(State())
next(r)
assert list(r.state.alive_workers())
assert r.state.workers['utest1'].alive
r.play()
assert not list(r.state.alive_workers())
assert not r.state.workers['utest1'].alive
def test_itertasks(self):
s = State()
s.tasks = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'}
assert len(list(s.itertasks(limit=2))) == 2
def test_worker_heartbeat_expire(self):
r = ev_worker_heartbeats(State())
next(r)
assert not list(r.state.alive_workers())
assert not r.state.workers['utest1'].alive
r.play()
assert list(r.state.alive_workers())
assert r.state.workers['utest1'].alive
def test_task_states(self):
r = ev_task_states(State())
# RECEIVED
next(r)
assert r.tid in r.state.tasks
task = r.state.tasks[r.tid]
assert task.state == states.RECEIVED
assert task.received
assert task.timestamp == task.received
assert task.worker.hostname == 'utest1'
# STARTED
next(r)
assert r.state.workers['utest1'].alive
assert task.state == states.STARTED
assert task.started
assert task.timestamp == task.started
assert task.worker.hostname == 'utest1'
# REVOKED
next(r)
assert task.state == states.REVOKED
assert task.revoked
assert task.timestamp == task.revoked
assert task.worker.hostname == 'utest1'
# RETRY
next(r)
assert task.state == states.RETRY
assert task.retried
assert task.timestamp == task.retried
assert task.worker.hostname, 'utest1'
assert task.exception == "KeyError('bar')"
assert task.traceback == 'line 2 at main'
# FAILURE
next(r)
assert task.state == states.FAILURE
assert task.failed
assert task.timestamp == task.failed
assert task.worker.hostname == 'utest1'
assert task.exception == "KeyError('foo')"
assert task.traceback == 'line 1 at main'
# SUCCESS
next(r)
assert task.state == states.SUCCESS
assert task.succeeded
assert task.timestamp == task.succeeded
assert task.worker.hostname == 'utest1'
assert task.result == '4'
assert task.runtime == 0.1234
# children, parent, root
r.play()
assert r.tid2 in r.state.tasks
task2 = r.state.tasks[r.tid2]
assert task2.parent is task
assert task2.root is task
assert task2 in task.children
def test_task_children_set_if_received_in_wrong_order(self):
r = ev_task_states(State())
r.events.insert(0, r.events.pop())
r.play()
assert r.state.tasks[r.tid2] in r.state.tasks[r.tid].children
assert r.state.tasks[r.tid2].root is r.state.tasks[r.tid]
assert r.state.tasks[r.tid2].parent is r.state.tasks[r.tid]
def assertStateEmpty(self, state):
assert not state.tasks
assert not state.workers
assert not state.event_count
assert not state.task_count
def assertState(self, state):
assert state.tasks
assert state.workers
assert state.event_count
assert state.task_count
def test_freeze_while(self):
s = State()
r = ev_snapshot(s)
r.play()
def work():
pass
s.freeze_while(work, clear_after=True)
assert not s.event_count
s2 = State()
r = ev_snapshot(s2)
r.play()
s2.freeze_while(work, clear_after=False)
assert s2.event_count
def test_clear_tasks(self):
s = State()
r = ev_snapshot(s)
r.play()
assert s.tasks
s.clear_tasks(ready=False)
assert not s.tasks
def test_clear(self):
r = ev_snapshot(State())
r.play()
assert r.state.event_count
assert r.state.workers
assert r.state.tasks
assert r.state.task_count
r.state.clear()
assert not r.state.event_count
assert not r.state.workers
assert r.state.tasks
assert not r.state.task_count
r.state.clear(False)
assert not r.state.tasks
def test_task_types(self):
r = ev_snapshot(State())
r.play()
assert sorted(r.state.task_types()) == ['task1', 'task2']
def test_tasks_by_time(self):
r = ev_snapshot(State())
r.play()
assert len(list(r.state.tasks_by_time())) == 20
assert len(list(r.state.tasks_by_time(reverse=False))) == 20
def test_tasks_by_type(self):
r = ev_snapshot(State())
r.play()
assert len(list(r.state.tasks_by_type('task1'))) == 10
assert len(list(r.state.tasks_by_type('task2'))) == 10
assert len(r.state.tasks_by_type['task1']) == 10
assert len(r.state.tasks_by_type['task2']) == 10
def test_alive_workers(self):
r = ev_snapshot(State())
r.play()
assert len(list(r.state.alive_workers())) == 3
def test_tasks_by_worker(self):
r = ev_snapshot(State())
r.play()
assert len(list(r.state.tasks_by_worker('utest1'))) == 10
assert len(list(r.state.tasks_by_worker('utest2'))) == 10
assert len(r.state.tasks_by_worker['utest1']) == 10
assert len(r.state.tasks_by_worker['utest2']) == 10
def test_survives_unknown_worker_event(self):
s = State()
s.event({
'type': 'worker-unknown-event-xxx',
'foo': 'bar',
})
s.event({
'type': 'worker-unknown-event-xxx',
'hostname': 'xxx',
'foo': 'bar',
})
def test_survives_unknown_worker_leaving(self):
s = State(on_node_leave=Mock(name='on_node_leave'))
(worker, created), subject = s.event({
'type': 'worker-offline',
'hostname': 'unknown@vandelay.com',
'timestamp': time(),
'local_received': time(),
'clock': 301030134894833,
})
assert worker == Worker('unknown@vandelay.com')
assert not created
assert subject == 'offline'
assert 'unknown@vandelay.com' not in s.workers
s.on_node_leave.assert_called_with(worker)
def test_on_node_join_callback(self):
s = State(on_node_join=Mock(name='on_node_join'))
(worker, created), subject = s.event({
'type': 'worker-online',
'hostname': 'george@vandelay.com',
'timestamp': time(),
'local_received': time(),
'clock': 34314,
})
assert worker
assert created
assert subject == 'online'
assert 'george@vandelay.com' in s.workers
s.on_node_join.assert_called_with(worker)
def test_survives_unknown_task_event(self):
s = State()
s.event({
'type': 'task-unknown-event-xxx',
'foo': 'bar',
'uuid': 'x',
'hostname': 'y',
'timestamp': time(),
'local_received': time(),
'clock': 0,
})
def test_limits_maxtasks(self):
s = State(max_tasks_in_memory=1)
s.heap_multiplier = 2
s.event({
'type': 'task-unknown-event-xxx',
'foo': 'bar',
'uuid': 'x',
'hostname': 'y',
'clock': 3,
'timestamp': time(),
'local_received': time(),
})
s.event({
'type': 'task-unknown-event-xxx',
'foo': 'bar',
'uuid': 'y',
'hostname': 'y',
'clock': 4,
'timestamp': time(),
'local_received': time(),
})
s.event({
'type': 'task-unknown-event-xxx',
'foo': 'bar',
'uuid': 'z',
'hostname': 'y',
'clock': 5,
'timestamp': time(),
'local_received': time(),
})
assert len(s._taskheap) == 2
assert s._taskheap[0].clock == 4
assert s._taskheap[1].clock == 5
s._taskheap.append(s._taskheap[0])
assert list(s.tasks_by_time())
def test_callback(self):
scratch = {}
def callback(state, event):
scratch['recv'] = True
s = State(callback=callback)
s.event({'type': 'worker-online'})
assert scratch.get('recv')
def test_deepcopy(self):
import copy
s = State()
s.event({
'type': 'task-success',
'root_id': 'x',
'uuid': 'x',
'hostname': 'y',
'clock': 3,
'timestamp': time(),
'local_received': time(),
})
s.event({
'type': 'task-success',
'root_id': 'y',
'uuid': 'y',
'hostname': 'y',
'clock': 4,
'timestamp': time(),
'local_received': time(),
})
copy.deepcopy(s)
| test_State |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 114739,
"end": 115530
} | class ____(BaseModel, extra="forbid"):
shard_key: Optional["ShardKeySelector"] = Field(
default=None,
description="Specify in which shards to look for the points, if not specified - look in all shards",
)
filter: Optional["Filter"] = Field(default=None, description="Look only for points which satisfies this conditions")
sample: Optional[int] = Field(
default=None, description="How many points to select and search within. Default is 10."
)
limit: Optional[int] = Field(default=None, description="How many neighbours per sample to find. Default is 3.")
using: Optional[str] = Field(
default=None,
description="Define which vector name to use for querying. If missing, the default vector is used.",
)
| SearchMatrixRequest |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 48062,
"end": 48314
} | class ____(VOTableSpecError):
"""
The table had only *x* fields defined, but the data itself has more
columns than that.
"""
message_template = "Data has more columns than are defined in the header ({})"
default_args = ("x",)
| E20 |
python | joke2k__faker | faker/providers/currency/el_GR/__init__.py | {
"start": 46,
"end": 5680
} | class ____(CurrencyProvider):
# Source https://el.wikipedia.org/wiki/Κατάλογος_νομισμάτων_των_χωρών_του_κόσμου
# Format: (code, name)
currencies = (
("AED", "Ντιρχάμ των Ηνωμένων Αραβικών Εμιράτων"),
("AFN", "Αφγάνι"),
("ALL", "Λεκ"),
("AMD", "Ντραμ"),
("AOA", "Κουάνζα"),
("ARS", "Πέσο Αργεντινής"),
("AUD", "Δολάριο Αυστραλίας"),
("AZN", "Μανάτ του Αζερμπαϊτζάν"),
("BAM", "Μετατρέψιμο μάρκο Βοσνίας και Ερζεγοβίνης"),
("BBD", "Δολάριο των Μπαρμπάντος"),
("BDT", "Τάκα"),
("BGN", "Λεβ"),
("BHD", "Δηνάριο του Μπαχρέιν"),
("BIF", "Φράγκο του Μπουρούντι"),
("BND", "Κυάτ Μιανμάρ"),
("BOB", "Μπολιβιάνο"),
("BRL", "Ρεάλ Βραζιλίας"),
("BSD", "Δολάριο Μπαχάμας"),
("BTN", "Νγκούλντρουμ"),
("BWP", "Πούλα"),
("BYΝ", "Ρούβλι Λευκορωσίας"),
("BZD", "Δολάριο Μπελίζ"),
("CAD", "Δολάριο Καναδά"),
("CDF", "Φράγκο του Κονγκό"),
("CHF", "Ελβετικό Φράγκο"),
("CLP", "Πέσο Χιλής"),
("CNY", "Γιουάν |"),
("COP", "Πέσο Κολομβίας"),
("CRC", "Κολόν"),
("CSD", "Δηνάριο Σερβίας"),
("CUC", "Μετατρέψιμο πέσο Κούβας"),
("CUP", "Πέσος Κούβας"),
("CVE", "Εσκούδο Πρασίνου Ακρωτηρίου"),
("CZK", "Κορόνα Τσεχίας (koruna)"),
("DJF", "Φράγκο του Τζιμπουτί"),
("DKK", "Κορόνα Δανίας"),
("DOP", "Πέσο Δομινικανής Δημοκρατίας"),
("DZD", "Δηνάριο της Αλγερίας"),
("EGP", "Λίρα Αιγύπτου"),
("ERN", "Νάκφα"),
("ETB", "Μπιρ"),
("EUR", "Ευρώ"),
("FJD", "Δολάριο Νησιών Φίτζι"),
("GBP", "Στερλίνα"),
("GEL", "Λάρι"),
("GHC", "Σέντι της Γκάνας"),
("GMD", "Νταλάζι (Dalasi)"),
("GNF", "Φράγκο Γουινέας"),
("GTQ", "Κετσάλ"),
("GYD", "Δολάριο Γουιάνας"),
("HNL", "Λεμπίρα"),
("HRK", "Κούνα"),
("HTG", "Γκουρντ"),
("HUF", "Φιορίνι Ουγγαρίας"),
("IDR", "Ρουπία Ινδονησίας"),
("ILS", "Νέο σέκελ"),
("INR", "Ρουπία Ινδίας[6]"),
("IQD", "Δηνάριο του Ιράκ"),
("IRR", "Ριάλ του Ιράν"),
("ISK", "Κορόνα Ισλανδίας (króna)"),
("JMD", "Δολάριο Τζαμάικας"),
("JOD", "Ιορδανικό δηνάριο"),
("JPY", "Γιέν"),
("KES", "Σελίνι Κένυας"),
("KGS", "Σομ της Κιργιζίας"),
("KHR", "Ριέλ Καμπότζης"),
("KMF", "Φράγκο Κομόρων"),
("KPW", "Γουόν Βόρειας Κορέας"),
("KRW", "Γουόν Νότιας Κορέας"),
("KWD", "Δηνάριο του Κουβέιτ"),
("KZT", "Τένγκε"),
("LAK", "Κιπ"),
("LBP", "Λίρα Λιβάνου"),
("LKR", "Ρουπία της Σρι Λάνκας (rupee)"),
("LRD", "Δολάριο Λιβερίας"),
("LSL", "Λότι"),
("LYD", "Δηνάριο Λιβύης"),
("MAD", "Ντιρχάμ Μαρόκου"),
("MDL", "Μολδαβικό Λέου"),
("MGA", "Αριάρι[10]"),
("MKD", "Δηνάριο Βόρειας Μακεδονίας"),
("MNT", "Τουγκρίκ"),
("MRU", "Ουγκίγια[10]"),
("MUR", "Ρουπία Μαυρίκιου"),
("MVR", "Ρουφίγια"),
("MWK", "Κουάτσα του Μαλάουι"),
("MXN", "Πέσο Μεξικού"),
("MYR", "Ρινγκίτ"),
("MZN", "Μετικάλ"),
("NAD", "Δολάριο Ναμίμπιας"),
("NGN", "Νάιρα"),
("NIO", "Χρυσό κόρντομπα της Νικαράγουας"),
("NOK", "Κορόνα Νορβηγίας (krone)"),
("NPR", "Ρουπία του Νεπάλ (rupee)"),
("NZD", "Δολάριο Νέας Ζηλανδίας"),
("OMR", "Ριάλ του Ομάν"),
("PAB", "Μπαλμπόα Παναμά"),
("PEK", "ΠΕΚΕΡΟΝ"),
("PEN", "Σολ Περού (sol)"),
("PGK", "Κίνα Παπούα-Νέας Γουινέας"),
("PHP", "Πέσο Φιλιππίνων"),
("PKR", "Ρουπία του Πακιστάν (rupee)"),
("PLN", "Ζλότι"),
("PYG", "Γκουαρανί"),
("QAR", "Ριγιάλ του Κατάρ"),
("RON", "Λέου Ρουμανίας"),
("RUB", "Ρούβλι Ρωσίας"),
("RWF", "Φράγκο της Ρουάντα"),
("SAR", "Ριάλ Σαουδικής Αραβίας (riyal)"),
("SBD", "Δολάριο των Νήσων του Σολομώντα"),
("SCR", "Ρουπία των Σεϋχελλών (Seychellois rupee)"),
("SDG", "Λίρα του Σουδάν"),
("SEK", "Κορόνα Σουηδίας (krona)"),
("SGD", "Δολάριο Σιγκαπούρης"),
("SLL", "Λεόνε της Σιέρα Λεόνε"),
("SOS", "Σελίνι Σομαλίας"),
("SRD", "Δολάριο του Σουρινάμ"),
("SSP", "Λίρα Νοτίου Σουδάν"),
("STN", "Ντόμπρα"),
("SYP", "Λίρα Συρίας"),
("SZL", "Λιλανγκένι"),
("THB", "Μπαχτ"),
("TJS", "Σομόνι"),
("TMM", "Μανάτ του Τουρκμενιστάν"),
("TND", "Δηνάριο Τυνησίας"),
("TOP", "Παάνγκα"),
("TRY", "Τουρκική Λίρα"),
("TTD", "Δολάριο Τρινιντάντ και Τομπάγκο"),
("TZS", "Σελίνι Τανζανίας (shilling)"),
("UAH", "Γρίβνα Ουκρανίας"),
("UGX", "Σελίνι Ουγκάντας"),
("USD", "Δολάριο ΗΠΑ"),
("UYU", "Πέσο Ουρουγουάης"),
("UZS", "Σομ του Ουζμπεκιστάν"),
("VES", "Μπολίβαρ Σομπεράνο"),
("VND", "Ντονγκ"),
("VUV", "Βάτου"),
("WST", "Τάλα Σαμόα"),
("XAF", "Φράγκο CFA Κεντρικής Αφρικής"),
("XCD", "Δολάριο Ανατολικής Καραϊβικής"),
("XOF", "Φράγκο CFA Δυτικής Αφρικής"),
("YER", "Ριάλ Υεμένης"),
("ZAR", "Ραντ Νότιας Αφρικής"),
("ZMK", "Κουάτσα της Ζάμπιας"),
("ZWD", "RTGS Dollar"),
)
price_formats = ["#,##", "%#,##", "%##,##", "%.###,##", "%#.###,##"]
def pricetag(self) -> str:
return self.numerify(self.random_element(self.price_formats)) + "\N{NO-BREAK SPACE}\N{EURO SIGN}"
| Provider |
python | astropy__astropy | astropy/io/fits/hdu/compressed/_codecs.py | {
"start": 8694,
"end": 10423
} | class ____(Codec):
"""
The FITS PLIO1 compression and decompression algorithm.
The IRAF PLIO (pixel list) algorithm was developed to store integer-valued
image masks in a compressed form. Such masks often have large regions of
constant value hence are highly compressible. The compression algorithm
used is based on run-length encoding, with the ability to dynamically
follow level changes in the image, allowing a 16-bit encoding to be used
regardless of the image depth.
"""
codec_id = "FITS_PLIO1"
def __init__(self, *, tilesize: int):
self.tilesize = tilesize
def decode(self, buf):
"""
Decompress buffer using the PLIO_1 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to decompress.
Returns
-------
buf : np.ndarray
The decompressed buffer.
"""
cbytes = np.frombuffer(_as_native_endian_array(buf), dtype=np.uint8).tobytes()
dbytes = decompress_plio_1_c(cbytes, self.tilesize)
return np.frombuffer(dbytes, dtype="i4")
def encode(self, buf):
"""
Compress the data in the buffer using the PLIO_1 algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to compress.
Returns
-------
bytes
The compressed bytes.
"""
# We convert the data to native endian because it is passed to the
# C compression code which will interpret it as being native endian.
dbytes = _as_native_endian_array(buf).astype("i4", copy=False).tobytes()
return compress_plio_1_c(dbytes, self.tilesize)
| PLIO1 |
python | paramiko__paramiko | tests/agent.py | {
"start": 320,
"end": 469
} | class ____(AgentKey):
def __init__(self, name, blob):
self.name = name
self.blob = blob
self.inner_key = None
| _BareAgentKey |
python | doocs__leetcode | solution/2200-2299/2202.Maximize the Topmost Element After K Moves/Solution.py | {
"start": 0,
"end": 354
} | class ____:
def maximumTop(self, nums: List[int], k: int) -> int:
if k == 0:
return nums[0]
n = len(nums)
if n == 1:
if k % 2:
return -1
return nums[0]
ans = max(nums[: k - 1], default=-1)
if k < n:
ans = max(ans, nums[k])
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_tensor_shape_test.py | {
"start": 1251,
"end": 21585
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
def assertShapeEq(self, x, y):
assert isinstance(x, RaggedTensorDynamicShape)
assert isinstance(y, RaggedTensorDynamicShape)
self.assertLen(x.partitioned_dim_sizes, len(y.partitioned_dim_sizes))
for x_dims, y_dims in zip(x.partitioned_dim_sizes, y.partitioned_dim_sizes):
self.assertAllEqual(x_dims, y_dims)
self.assertAllEqual(x.inner_dim_sizes, y.inner_dim_sizes)
@parameterized.parameters([
dict(value='x', expected_dim_sizes=[]),
dict(value=['a', 'b', 'c'], expected_dim_sizes=[3]),
dict(value=[['a', 'b', 'c'], ['d', 'e', 'f']], expected_dim_sizes=[2, 3]),
dict(
value=[[['a', 'b', 'c'], ['d', 'e', 'f']]],
expected_dim_sizes=[1, 2, 3]),
dict(
value=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d',
'e']]),
expected_dim_sizes=[2, [3, 2]]),
dict(
value=ragged_factory_ops.constant_value([[['a', 'b', 'c'], ['d',
'e']]]),
expected_dim_sizes=[1, [2], [3, 2]]),
dict(
value=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d', 'e', 'f']]], ragged_rank=1),
expected_dim_sizes=[1, [2], 3]),
dict(
value=ragged_factory_ops.constant_value(
[[[[1], [2]], [[3], [4]]], [[[5], [6]]]], ragged_rank=1),
expected_dim_sizes=[2, [2, 1], 2, 1]),
dict(
value=ragged_factory_ops.constant_value([[10, 20], [30]]),
expected_dim_sizes=[2, [2, 1]]),
# Docstring examples:
dict(value=[[1, 2, 3], [4, 5, 6]], expected_dim_sizes=[2, 3]),
dict(
value=ragged_factory_ops.constant_value([[1, 2], [], [3, 4, 5]]),
expected_dim_sizes=[3, [2, 0, 3]]),
dict(
value=ragged_factory_ops.constant_value([[[1, 2], [3, 4]], [[5, 6]]],
ragged_rank=1),
expected_dim_sizes=[2, [2, 1], 2]),
dict(
value=ragged_factory_ops.constant_value([[[1, 2], [3]], [[4, 5]]]),
expected_dim_sizes=[2, [2, 1], [2, 1, 2]]),
])
def testFromTensor(self, value, expected_dim_sizes):
shape = RaggedTensorDynamicShape.from_tensor(value)
expected = RaggedTensorDynamicShape.from_dim_sizes(expected_dim_sizes)
self.assertShapeEq(shape, expected)
@parameterized.parameters([
dict(dim_sizes=[], rank=0, expected_dim_sizes=[]),
dict(dim_sizes=[], rank=3, expected_dim_sizes=[1, 1, 1]),
dict(dim_sizes=[3], rank=1, expected_dim_sizes=[3]),
dict(dim_sizes=[3], rank=3, expected_dim_sizes=[1, 1, 3]),
dict(dim_sizes=[2, 3], rank=3, expected_dim_sizes=[1, 2, 3]),
dict(dim_sizes=[3, [3, 2, 4]], rank=2, expected_dim_sizes=[3, [3, 2, 4]]),
dict(
dim_sizes=[3, [3, 2, 4]],
rank=4,
expected_dim_sizes=[1, 1, 3, [3, 2, 4]]),
dict(
dim_sizes=[3, [3, 2, 4], 2, 3],
rank=5,
expected_dim_sizes=[1, 3, [3, 2, 4], 2, 3]),
])
def testBroadcastToRank(self, dim_sizes, rank, expected_dim_sizes):
shape = RaggedTensorDynamicShape.from_dim_sizes(dim_sizes)
expected = RaggedTensorDynamicShape.from_dim_sizes(expected_dim_sizes)
broadcasted_shape = shape.broadcast_to_rank(rank)
self.assertShapeEq(broadcasted_shape, expected)
self.assertEqual(broadcasted_shape.rank, rank)
@parameterized.parameters([
#=========================================================================
# dimension[axis] is uniform inner; and row_lengths is a scalar
#=========================================================================
# shape: [BROADCAST(UNIFORM), UNIFORM, UNIFORM]
dict(axis=0,
row_length=3,
original_dim_sizes=[1, 4, 5],
broadcast_dim_sizes=[3, 4, 5]),
# shape: [UNIFORM, UNIFORM, BROADCAST(UNIFORM)]
dict(axis=2,
row_length=5,
original_dim_sizes=[3, 4, 1],
broadcast_dim_sizes=[3, 4, 5]),
# shape: [UNIFORM, RAGGED, BROADCAST(UNIFORM)]
dict(axis=2,
row_length=5,
original_dim_sizes=[3, [3, 2, 8], 1],
broadcast_dim_sizes=[3, [3, 2, 8], 5]),
# shape: [UNIFORM, RAGGED, RAGGED, UNIFORM, UNIFORM, BROADCAST(UNIFORM)]
dict(axis=5,
row_length=5,
original_dim_sizes=[2, [2, 1], [3, 2, 8], 3, 4, 1],
broadcast_dim_sizes=[2, [2, 1], [3, 2, 8], 3, 4, 5]),
#=========================================================================
# dimension[axis] is uniform inner; and row_lengths is a vector
#=========================================================================
# shape: [UNIFORM, BROADCAST(UNIFORM)]
dict(axis=1,
row_length=[2, 0, 1],
original_dim_sizes=[3, 1],
broadcast_dim_sizes=[3, [2, 0, 1]]),
# shape: [UNIFORM, BROADCAST(UNIFORM), UNIFORM]
dict(axis=1,
row_length=[2, 0, 1],
original_dim_sizes=[3, 1, 5],
broadcast_dim_sizes=[3, [2, 0, 1], 5]),
# shape: [UNIFORM, UNIFORM, BROADCAST(UNIFORM)]
dict(axis=2,
row_length=[2, 0, 1, 3, 8, 2, 3, 4, 1, 8, 7, 0],
original_dim_sizes=[4, 3, 1],
broadcast_dim_sizes=[4, 3, [2, 0, 1, 3, 8, 2, 3, 4, 1, 8, 7, 0]]),
# shape: [UNIFORM, RAGGED, BROADCAST(UNIFORM)]
dict(axis=2,
row_length=[2, 5, 3],
original_dim_sizes=[2, [2, 1], 1],
broadcast_dim_sizes=[2, [2, 1], [2, 5, 3]]),
# shape: [UNIFORM, RAGGED, UNIFORM, UNIFORM, BROADCAST(UNIFORM), UNIFORM]
dict(axis=4,
row_length=list(range(18)),
original_dim_sizes=[2, [2, 1], 3, 2, 1, 8],
broadcast_dim_sizes=[2, [2, 1], 3, 2, list(range(18)), 8]),
#=========================================================================
# dimension[axis] is uniform partitioned; and row_lengths is a scalar
#=========================================================================
# shape: [BROADCAST(UNIFORM), RAGGED]
dict(axis=0,
row_length=3,
original_dim_sizes=[1, [5]],
broadcast_dim_sizes=[3, [5, 5, 5]]),
# shape: [BROADCAST(UNIFORM), UNIFORM, RAGGED]
dict(axis=0,
row_length=2,
original_dim_sizes=[1, 3, [3, 0, 2]],
broadcast_dim_sizes=[2, 3, [3, 0, 2, 3, 0, 2]]),
# shape: [BROADCAST(UNIFORM), RAGGED, RAGGED, UNIFORM, UNIFORM]
dict(axis=0,
row_length=3,
original_dim_sizes=[1, [3], [3, 5, 2], 9, 4, 5],
broadcast_dim_sizes=[3, [3, 3, 3], [3, 5, 2, 3, 5, 2, 3, 5, 2],
9, 4, 5]),
# shape: [BROADCAST(UNIFORM), UNIFORM, RAGGED, UNIFORM]
dict(axis=0,
row_length=2,
original_dim_sizes=[1, 2, [2, 1], [3, 5, 2], 2],
broadcast_dim_sizes=[2, 2, [2, 1, 2, 1], [3, 5, 2, 3, 5, 2], 2]),
# shape: [UNIFORM, BROADCAST(UNIFORM), RAGGED, UNIFORM]
dict(axis=1,
row_length=2,
original_dim_sizes=[3, 1, [4, 0, 2], 5],
broadcast_dim_sizes=[3, 2, [4, 0, 2, 4, 0, 2], 5]),
# shape: [UNIFORM, BROADCAST(UNIFORM), RAGGED]
dict(axis=1,
row_length=1,
original_dim_sizes=[2, 3, (1, 2, 3, 4, 5, 6)],
broadcast_dim_sizes=[2, 3, (1, 2, 3, 4, 5, 6)]),
#=========================================================================
# dimension[axis] is uniform partitioned; and row_lengths is a vector
#=========================================================================
# shape: [UNIFORM, BROADCAST(UNIFORM), RAGGED, UNIFORM]
dict(axis=1,
row_length=[4, 1, 2],
original_dim_sizes=[
3, # axis=0
1, # axis=1 (broadcast)
[3, 1, 2], # axis=2
5], # axis=3
broadcast_dim_sizes=[
3, # axis=0
[4, 1, 2], # axis=1 (broadcast)
[3, 3, 3, 3, 1, 2, 2], # axis=2
5]), # axis=3
# shape: [UNIFORM, BROADCAST(UNIFORM), RAGGED, RAGGED]
dict(axis=1,
row_length=[2, 0, 3],
original_dim_sizes=[
3, # axis=0
1, # axis=1 (broadcast)
[3, 1, 2], # axis=2
[3, 1, 4, 1, 5, 9]], # axis=3
broadcast_dim_sizes=[
3, # axis=0
[2, 0, 3], # axis=1 (broadcast)
[3, 3, 2, 2, 2], # axis=2
[3, 1, 4, 3, 1, 4, 5, 9, 5, 9, 5, 9]]), # axis=3
# shape: [UNIFORM, RAGGED, BROADCAST(UNIFORM), RAGGED, RAGGED, UNIFORM]
dict(axis=2,
row_length=[4, 1, 2],
original_dim_sizes=[
3, # axis=0
[2, 0, 1], # axis=1
1, # axis=2 (broadcast)
[3, 2, 1], # axis=3
[1, 0, 1, 0, 2, 3], # axis=4
5], # axis=5
broadcast_dim_sizes=[
3, # axis=0
[2, 0, 1], # axis=2
[4, 1, 2], # axis=2 (broadcast)
[3, 3, 3, 3, 2, 1, 1], # axis=3
[1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, # axis=4
2, 3, 3],
5]), # axis=5
dict(axis=0,
row_length=2,
original_dim_sizes=[1, 1, 2, (2, 1)],
broadcast_dim_sizes=[2, 1, 2, (2, 1, 2, 1)]),
dict(axis=1,
row_length=(2, 1),
original_dim_sizes=[2, 1, 2, (2, 1, 2, 1)],
broadcast_dim_sizes=[2, (2, 1), 2, (2, 1, 2, 1, 2, 1)]),
dict(axis=2,
row_length=2,
original_dim_sizes=[2, (2, 1), 2, (2, 1, 2, 1, 2, 1)],
broadcast_dim_sizes=[2, (2, 1), 2, (2, 1, 2, 1, 2, 1)]),
dict(axis=3,
row_length=(2, 1, 2, 1, 2, 1),
original_dim_sizes=[2, (2, 1), 2, 1],
broadcast_dim_sizes=[2, (2, 1), 2, (2, 1, 2, 1, 2, 1)]),
]) # pyformat: disable
def testBroadcastDimension(self, axis, row_length, original_dim_sizes,
broadcast_dim_sizes):
"""Tests for the broadcast_dimension method.
Verifies that:
* `original.broadcast_dimension(axis, row_length) == broadcast`
* `broadcast.broadcast_dimension(axis, row_length) == broadcast`
* `broadcast.broadcast_dimension(axis, 1) == broadcast`
Args:
axis: The axis to broadcast
row_length: The slice lengths to broadcast to.
original_dim_sizes: The dimension sizes before broadcasting.
original_dim_sizes[axis] should be equal to `1` or `row_length`.
broadcast_dim_sizes: THe dimension sizes after broadcasting.
"""
original_shape = RaggedTensorDynamicShape.from_dim_sizes(original_dim_sizes)
bcast_shape = RaggedTensorDynamicShape.from_dim_sizes(broadcast_dim_sizes)
self.assertEqual(original_shape.rank, bcast_shape.rank)
# shape[axis].value == 1 and row_length > 1:
bcast1 = original_shape.broadcast_dimension(axis, row_length)
# shape[axis].value > 1 and row_length == shape[axis].value:
bcast2 = bcast_shape.broadcast_dimension(axis, row_length)
# shape[axis].value > 1 and row_length == 1:
bcast3 = bcast_shape.broadcast_dimension(axis, 1)
self.assertShapeEq(bcast1, bcast_shape)
self.assertShapeEq(bcast2, bcast_shape)
self.assertShapeEq(bcast3, bcast_shape)
@parameterized.parameters(
[
# Broadcast scalar
dict(x_dims=[], y_dims=[], expected_dims=[]),
dict(x_dims=[], y_dims=[2], expected_dims=[2]),
dict(x_dims=[], y_dims=[2, 3], expected_dims=[2, 3]),
dict(
x_dims=[],
y_dims=[2, (2, 3), (5, 7, 2, 0, 9)],
expected_dims=[2, (2, 3), (5, 7, 2, 0, 9)]),
# Broadcast vector
dict(x_dims=[3], y_dims=[4, 2, 3], expected_dims=[4, 2, 3]),
dict(x_dims=[1], y_dims=[4, 2, 3], expected_dims=[4, 2, 3]),
dict(x_dims=[3], y_dims=[4, 2, 1], expected_dims=[4, 2, 3]),
dict(
x_dims=[3],
y_dims=[3, (2, 3, 1), 1],
expected_dims=[3, (2, 3, 1), 3]),
dict(x_dims=[1], y_dims=[3, (2, 1, 3)], expected_dims=[3, (2, 1, 3)]),
dict(
x_dims=[1],
y_dims=[3, (2, 1, 3), 8],
expected_dims=[3, (2, 1, 3), 8]),
dict(
x_dims=[1],
y_dims=[2, (2, 3), (5, 7, 2, 0, 9)],
expected_dims=[2, (2, 3), (5, 7, 2, 0, 9)]),
# Mixed broadcasting
dict(
x_dims=[
1, # axis=0
3, # axis=1
(3, 0, 2), # axis=2
1, # axis=3
2, # axis=4
],
y_dims=[
2, # axis=0
1, # axis=1
1, # axis=2
(7, 2), # axis=3
1, # axis=4
],
expected_dims=[
2, # axis=0
3, # axis=1
(3, 0, 2, 3, 0, 2), # axis=2
(7, 7, 7, 7, 7, 2, 2, 2, 2, 2), # axis=3
2, # axis=4
]),
dict(
x_dims=[2, (2, 1), 2, 1],
y_dims=[1, 1, 2, (2, 1)],
expected_dims=[2, (2, 1), 2, (2, 1, 2, 1, 2, 1)]),
])
def testBroadcastDynamicShape(self, x_dims, y_dims, expected_dims):
x_shape = RaggedTensorDynamicShape.from_dim_sizes(x_dims)
y_shape = RaggedTensorDynamicShape.from_dim_sizes(y_dims)
expected = RaggedTensorDynamicShape.from_dim_sizes(expected_dims)
result1 = ragged_tensor_shape.broadcast_dynamic_shape(x_shape, y_shape)
result2 = ragged_tensor_shape.broadcast_dynamic_shape(y_shape, x_shape)
self.assertShapeEq(expected, result1)
self.assertShapeEq(expected, result2)
def testRepr(self):
shape = RaggedTensorDynamicShape.from_dim_sizes([2, (2, 1), 2, 1])
self.assertRegex(
repr(shape), r'RaggedTensorDynamicShape\('
r'partitioned_dim_sizes=\(<[^>]+>, <[^>]+>\), '
r'inner_dim_sizes=<[^>]+>\)')
@parameterized.parameters([
dict(
x=[[10], [20], [30]], # shape=[3, 1]
dim_sizes=[3, 2],
expected=[[10, 10], [20, 20], [30, 30]]),
dict(
x=[[10], [20], [30]], # shape=[3, 1]
dim_sizes=[3, [3, 0, 2]],
expected=ragged_factory_ops.constant_value(
[[10, 10, 10], [], [30, 30]], dtype=np.int32)),
dict(
x=[[[1, 2, 3]], [[4, 5, 6]]], # shape = [2, 1, 3]
dim_sizes=[2, [2, 3], 3],
expected=ragged_factory_ops.constant_value(
[[[1, 2, 3], [1, 2, 3]], [[4, 5, 6], [4, 5, 6], [4, 5, 6]]],
dtype=np.int32,
ragged_rank=1)),
dict(
x=[[[1]], [[2]]], # shape = [2, 1, 1]
dim_sizes=[2, [2, 3], [0, 2, 1, 2, 0]],
expected=ragged_factory_ops.constant_value(
[[[], [1, 1]], [[2], [2, 2], []]], dtype=np.int32,
ragged_rank=2)),
dict(
x=10,
dim_sizes=[3, [3, 0, 2]],
expected=ragged_factory_ops.constant_value([[10, 10, 10], [],
[10, 10]])),
dict(
x=ragged_factory_ops.constant_value([[[1], [2]], [[3]]],
ragged_rank=1),
dim_sizes=[2, [2, 1], 2],
expected=ragged_factory_ops.constant_value(
[[[1, 1], [2, 2]], [[3, 3]]], ragged_rank=1)),
])
def testRaggedBroadcastTo(self, x, dim_sizes, expected):
shape = RaggedTensorDynamicShape.from_dim_sizes(dim_sizes)
result = ragged_tensor_shape.broadcast_to(x, shape)
self.assertEqual(
getattr(result, 'ragged_rank', 0), getattr(expected, 'ragged_rank', 0))
self.assertAllEqual(result, expected)
@parameterized.parameters(
[
dict(
doc='x.shape=[3, (D1)]; y.shape=[3, 1]; bcast.shape=[3, (D1)]',
x=ragged_factory_ops.constant_value([[1, 2, 3], [], [4, 5]],
dtype=np.int32),
y=[[10], [20], [30]],
expected=ragged_factory_ops.constant_value([[11, 12, 13], [],
[34, 35]])),
dict(
doc='x.shape=[3, (D1)]; y.shape=[]; bcast.shape=[3, (D1)]',
x=ragged_factory_ops.constant_value([[1, 2, 3], [], [4, 5]],
dtype=np.int32),
y=10,
expected=ragged_factory_ops.constant_value([[11, 12, 13], [],
[14, 15]])),
dict(
doc='x.shape=[1, (D1)]; y.shape=[3, 1]; bcast.shape=[3, (D1)]',
x=ragged_factory_ops.constant_value([[1, 2, 3]], dtype=np.int32),
y=[[10], [20], [30]],
expected=ragged_factory_ops.constant_value(
[[11, 12, 13], [21, 22, 23], [31, 32, 33]], dtype=np.int32)),
dict(
doc=('x.shape=[2, (D1), 1]; y.shape=[1, (D2)]; '
'bcast.shape=[2, (D1), (D2)]'),
x=ragged_factory_ops.constant_value([[[1], [2], [3]], [[4]]],
ragged_rank=1),
y=ragged_factory_ops.constant_value([[10, 20, 30]]),
expected=ragged_factory_ops.constant_value([[[11, 21, 31],
[12, 22, 32],
[13, 23, 33]],
[[14, 24, 34]]])),
dict(
doc=('x.shape=[2, (D1), 1]; y.shape=[1, 1, 4]; '
'bcast.shape=[2, (D1), 4]'),
x=ragged_factory_ops.constant_value([[[10], [20]], [[30]]],
ragged_rank=1),
y=[[[1, 2, 3, 4]]],
expected=ragged_factory_ops.constant_value(
[[[11, 12, 13, 14], [21, 22, 23, 24]], [[31, 32, 33, 34]]],
ragged_rank=1)),
dict(
doc=('x.shape=[2, (D1), 2, 1]; y.shape=[2, (D2)]; '
'bcast.shape=[2, (D1), (2), (D2)'),
x=ragged_factory_ops.constant_value(
[[[[1], [2]], [[3], [4]]], [[[5], [6]]]], ragged_rank=1),
y=ragged_factory_ops.constant_value([[10, 20], [30]]),
expected=ragged_factory_ops.constant_value([[[[11, 21], [32]],
[[13, 23], [34]]],
[[[15, 25], [36]]]])),
])
def testRaggedAddWithBroadcasting(self, x, y, expected, doc):
expected_rrank = getattr(expected, 'ragged_rank', 0)
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, dtype=dtypes.int32)
y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, dtype=dtypes.int32)
result = x + y
result_rrank = getattr(result, 'ragged_rank', 0)
self.assertEqual(expected_rrank, result_rrank)
if hasattr(expected, 'tolist'):
expected = expected.tolist()
self.assertAllEqual(result, expected)
if __name__ == '__main__':
googletest.main()
| RaggedTensorShapeTest |
python | sympy__sympy | sympy/series/fourier.py | {
"start": 3947,
"end": 12746
} | class ____(SeriesBase):
r"""Represents Fourier sine/cosine series.
Explanation
===========
This class only represents a fourier series.
No computation is performed.
For how to compute Fourier series, see the :func:`fourier_series`
docstring.
See Also
========
sympy.series.fourier.fourier_series
"""
def __new__(cls, *args):
args = map(sympify, args)
return Expr.__new__(cls, *args)
@property
def function(self):
return self.args[0]
@property
def x(self):
return self.args[1][0]
@property
def period(self):
return (self.args[1][1], self.args[1][2])
@property
def a0(self):
return self.args[2][0]
@property
def an(self):
return self.args[2][1]
@property
def bn(self):
return self.args[2][2]
@property
def interval(self):
return Interval(0, oo)
@property
def start(self):
return self.interval.inf
@property
def stop(self):
return self.interval.sup
@property
def length(self):
return oo
@property
def L(self):
return abs(self.period[1] - self.period[0]) / 2
def _eval_subs(self, old, new):
x = self.x
if old.has(x):
return self
def truncate(self, n=3):
"""
Return the first n nonzero terms of the series.
If ``n`` is None return an iterator.
Parameters
==========
n : int or None
Amount of non-zero terms in approximation or None.
Returns
=======
Expr or iterator :
Approximation of function expanded into Fourier series.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x, (x, -pi, pi))
>>> s.truncate(4)
2*sin(x) - sin(2*x) + 2*sin(3*x)/3 - sin(4*x)/2
See Also
========
sympy.series.fourier.FourierSeries.sigma_approximation
"""
if n is None:
return iter(self)
terms = []
for t in self:
if len(terms) == n:
break
if t is not S.Zero:
terms.append(t)
return Add(*terms)
def sigma_approximation(self, n=3):
r"""
Return :math:`\sigma`-approximation of Fourier series with respect
to order n.
Explanation
===========
Sigma approximation adjusts a Fourier summation to eliminate the Gibbs
phenomenon which would otherwise occur at discontinuities.
A sigma-approximated summation for a Fourier series of a T-periodical
function can be written as
.. math::
s(\theta) = \frac{1}{2} a_0 + \sum _{k=1}^{m-1}
\operatorname{sinc} \Bigl( \frac{k}{m} \Bigr) \cdot
\left[ a_k \cos \Bigl( \frac{2\pi k}{T} \theta \Bigr)
+ b_k \sin \Bigl( \frac{2\pi k}{T} \theta \Bigr) \right],
where :math:`a_0, a_k, b_k, k=1,\ldots,{m-1}` are standard Fourier
series coefficients and
:math:`\operatorname{sinc} \Bigl( \frac{k}{m} \Bigr)` is a Lanczos
:math:`\sigma` factor (expressed in terms of normalized
:math:`\operatorname{sinc}` function).
Parameters
==========
n : int
Highest order of the terms taken into account in approximation.
Returns
=======
Expr :
Sigma approximation of function expanded into Fourier series.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x, (x, -pi, pi))
>>> s.sigma_approximation(4)
2*sin(x)*sinc(pi/4) - 2*sin(2*x)/pi + 2*sin(3*x)*sinc(3*pi/4)/3
See Also
========
sympy.series.fourier.FourierSeries.truncate
Notes
=====
The behaviour of
:meth:`~sympy.series.fourier.FourierSeries.sigma_approximation`
is different from :meth:`~sympy.series.fourier.FourierSeries.truncate`
- it takes all nonzero terms of degree smaller than n, rather than
first n nonzero ones.
References
==========
.. [1] https://en.wikipedia.org/wiki/Gibbs_phenomenon
.. [2] https://en.wikipedia.org/wiki/Sigma_approximation
"""
terms = [sinc(pi * i / n) * t for i, t in enumerate(self[:n])
if t is not S.Zero]
return Add(*terms)
def shift(self, s):
"""
Shift the function by a term independent of x.
Explanation
===========
f(x) -> f(x) + s
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.shift(1).truncate()
-4*cos(x) + cos(2*x) + 1 + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
a0 = self.a0 + s
sfunc = self.function + s
return self.func(sfunc, self.args[1], (a0, self.an, self.bn))
def shiftx(self, s):
"""
Shift x by a term independent of x.
Explanation
===========
f(x) -> f(x + s)
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.shiftx(1).truncate()
-4*cos(x + 1) + cos(2*x + 2) + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.subs(x, x + s)
bn = self.bn.subs(x, x + s)
sfunc = self.function.subs(x, x + s)
return self.func(sfunc, self.args[1], (self.a0, an, bn))
def scale(self, s):
"""
Scale the function by a term independent of x.
Explanation
===========
f(x) -> s * f(x)
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.scale(2).truncate()
-8*cos(x) + 2*cos(2*x) + 2*pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.coeff_mul(s)
bn = self.bn.coeff_mul(s)
a0 = self.a0 * s
sfunc = self.args[0] * s
return self.func(sfunc, self.args[1], (a0, an, bn))
def scalex(self, s):
"""
Scale x by a term independent of x.
Explanation
===========
f(x) -> f(s*x)
This is fast, if Fourier series of f(x) is already
computed.
Examples
========
>>> from sympy import fourier_series, pi
>>> from sympy.abc import x
>>> s = fourier_series(x**2, (x, -pi, pi))
>>> s.scalex(2).truncate()
-4*cos(2*x) + cos(4*x) + pi**2/3
"""
s, x = sympify(s), self.x
if x in s.free_symbols:
raise ValueError("'%s' should be independent of %s" % (s, x))
an = self.an.subs(x, x * s)
bn = self.bn.subs(x, x * s)
sfunc = self.function.subs(x, x * s)
return self.func(sfunc, self.args[1], (self.a0, an, bn))
def _eval_as_leading_term(self, x, logx, cdir):
for t in self:
if t is not S.Zero:
return t
def _eval_term(self, pt):
if pt == 0:
return self.a0
return self.an.coeff(pt) + self.bn.coeff(pt)
def __neg__(self):
return self.scale(-1)
def __add__(self, other):
if isinstance(other, FourierSeries):
if self.period != other.period:
raise ValueError("Both the series should have same periods")
x, y = self.x, other.x
function = self.function + other.function.subs(y, x)
if self.x not in function.free_symbols:
return function
an = self.an + other.an
bn = self.bn + other.bn
a0 = self.a0 + other.a0
return self.func(function, self.args[1], (a0, an, bn))
return Add(self, other)
def __sub__(self, other):
return self.__add__(-other)
| FourierSeries |
python | huggingface__transformers | src/transformers/trainer_callback.py | {
"start": 10340,
"end": 12795
} | class ____(ExportableState):
"""
A class that handles the [`Trainer`] control flow. This class is used by the [`TrainerCallback`] to activate some
switches in the training loop.
Args:
should_training_stop (`bool`, *optional*, defaults to `False`):
Whether or not the training should be interrupted.
If `True`, this variable will not be set back to `False`. The training will just stop.
should_epoch_stop (`bool`, *optional*, defaults to `False`):
Whether or not the current epoch should be interrupted.
If `True`, this variable will be set back to `False` at the beginning of the next epoch.
should_save (`bool`, *optional*, defaults to `False`):
Whether or not the model should be saved at this step.
If `True`, this variable will be set back to `False` at the beginning of the next step.
should_evaluate (`bool`, *optional*, defaults to `False`):
Whether or not the model should be evaluated at this step.
If `True`, this variable will be set back to `False` at the beginning of the next step.
should_log (`bool`, *optional*, defaults to `False`):
Whether or not the logs should be reported at this step.
If `True`, this variable will be set back to `False` at the beginning of the next step.
"""
should_training_stop: bool = False
should_epoch_stop: bool = False
should_save: bool = False
should_evaluate: bool = False
should_log: bool = False
def _new_training(self):
"""Internal method that resets the variable for a new training."""
self.should_training_stop = False
def _new_epoch(self):
"""Internal method that resets the variable for a new epoch."""
self.should_epoch_stop = False
def _new_step(self):
"""Internal method that resets the variable for a new step."""
self.should_save = False
self.should_evaluate = False
self.should_log = False
def state(self) -> dict:
return {
"args": {
"should_training_stop": self.should_training_stop,
"should_epoch_stop": self.should_epoch_stop,
"should_save": self.should_save,
"should_evaluate": self.should_evaluate,
"should_log": self.should_log,
},
"attributes": {},
}
| TrainerControl |
python | kamyu104__LeetCode-Solutions | Python/subsequence-sum-after-capping-elements.py | {
"start": 89,
"end": 795
} | class ____(object):
def subsequenceSumAfterCapping(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[bool]
"""
result = [False]*len(nums)
nums.sort()
mask = (1<<(k+1))-1
dp = 1
i = 0
for x in xrange(1, len(nums)+1):
while i < len(nums) and nums[i] < x:
dp |= (dp<<nums[i])&mask
i += 1
for j in xrange(max(k%x, k-(len(nums)-i)*x), k+1, x):
if dp&(1<<j):
result[x-1] = True
break
return result
# Time: O(nlogn + n * k + klogn) = O(nlogn + n * k)
# Space: O(k)
# sort, dp
| Solution |
python | kamyu104__LeetCode-Solutions | Python/count-number-of-pairs-with-absolute-difference-k.py | {
"start": 50,
"end": 495
} | class ____(object):
def countKDifference(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
lookup = collections.defaultdict(int)
result = 0
for x in nums:
if x-k in lookup:
result += lookup[x-k]
if x+k in lookup:
result += lookup[x+k]
lookup[x] += 1
return result
| Solution |
python | apache__airflow | providers/google/tests/unit/google/cloud/transfers/test_s3_to_gcs.py | {
"start": 2813,
"end": 11857
} | class ____:
def test_init(self):
"""Test S3ToGCSOperator instance is properly initialized."""
operator = S3ToGCSOperator(
task_id=TASK_ID,
bucket=S3_BUCKET,
prefix=S3_PREFIX,
delimiter=S3_DELIMITER,
gcp_conn_id=GCS_CONN_ID,
dest_gcs=GCS_PATH_PREFIX,
google_impersonation_chain=IMPERSONATION_CHAIN,
apply_gcs_prefix=APPLY_GCS_PREFIX,
deferrable=DEFERRABLE,
poll_interval=POLL_INTERVAL,
)
assert operator.task_id == TASK_ID
assert operator.bucket == S3_BUCKET
assert operator.prefix == S3_PREFIX
assert operator.delimiter == S3_DELIMITER
assert operator.gcp_conn_id == GCS_CONN_ID
assert operator.dest_gcs == GCS_PATH_PREFIX
assert operator.google_impersonation_chain == IMPERSONATION_CHAIN
assert operator.apply_gcs_prefix == APPLY_GCS_PREFIX
assert operator.deferrable == DEFERRABLE
assert operator.poll_interval == POLL_INTERVAL
@mock.patch("airflow.providers.google.cloud.transfers.s3_to_gcs.S3Hook")
@mock.patch("airflow.providers.google.cloud.transfers.s3_to_gcs.GCSHook")
def test_execute(self, gcs_mock_hook, s3_mock_hook):
"""Test the execute function when the run is successful."""
operator = S3ToGCSOperator(
task_id=TASK_ID,
bucket=S3_BUCKET,
prefix=S3_PREFIX,
delimiter=S3_DELIMITER,
gcp_conn_id=GCS_CONN_ID,
dest_gcs=GCS_PATH_PREFIX,
google_impersonation_chain=IMPERSONATION_CHAIN,
)
operator.hook = mock.MagicMock()
operator.hook.list_keys.return_value = MOCK_FILES
uploaded_files = operator.execute(context={})
gcs_mock_hook.return_value.upload.assert_has_calls(
[
mock.call(GCS_BUCKET, GCS_PREFIX + MOCK_FILE_1, mock.ANY, gzip=False),
mock.call(GCS_BUCKET, GCS_PREFIX + MOCK_FILE_2, mock.ANY, gzip=False),
mock.call(GCS_BUCKET, GCS_PREFIX + MOCK_FILE_3, mock.ANY, gzip=False),
],
any_order=True,
)
operator.hook.list_keys.assert_called_once()
s3_mock_hook.assert_called_once_with(aws_conn_id=AWS_CONN_ID, verify=None)
gcs_mock_hook.assert_called_once_with(
gcp_conn_id=GCS_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
# we expect MOCK_FILES to be uploaded
assert sorted(MOCK_FILES) == sorted(uploaded_files)
@mock.patch("airflow.providers.google.cloud.transfers.s3_to_gcs.S3Hook")
@mock.patch("airflow.providers.google.cloud.transfers.s3_to_gcs.GCSHook")
def test_execute_with_gzip(self, gcs_mock_hook, s3_mock_hook):
"""Test the execute function when the run is successful."""
operator = S3ToGCSOperator(
task_id=TASK_ID,
bucket=S3_BUCKET,
prefix=S3_PREFIX,
delimiter=S3_DELIMITER,
gcp_conn_id=GCS_CONN_ID,
dest_gcs=GCS_PATH_PREFIX,
gzip=True,
)
operator.hook = mock.MagicMock()
operator.hook.list_keys.return_value = MOCK_FILES
operator.execute(context={})
gcs_mock_hook.assert_called_once_with(
gcp_conn_id=GCS_CONN_ID,
impersonation_chain=None,
)
gcs_mock_hook.return_value.upload.assert_has_calls(
[
mock.call(GCS_BUCKET, GCS_PREFIX + MOCK_FILE_1, mock.ANY, gzip=True),
mock.call(GCS_BUCKET, GCS_PREFIX + MOCK_FILE_2, mock.ANY, gzip=True),
mock.call(GCS_BUCKET, GCS_PREFIX + MOCK_FILE_3, mock.ANY, gzip=True),
],
any_order=True,
)
@pytest.mark.parametrize(
("source_objects", "existing_objects", "objects_expected"),
[
(MOCK_FILES, [], MOCK_FILES),
(MOCK_FILES, [MOCK_FILE_1], [MOCK_FILE_2, MOCK_FILE_3]),
(MOCK_FILES, [MOCK_FILE_1, MOCK_FILE_2], [MOCK_FILE_3]),
(MOCK_FILES, [MOCK_FILE_3, MOCK_FILE_2], [MOCK_FILE_1]),
(MOCK_FILES, MOCK_FILES, []),
],
)
@mock.patch("airflow.providers.google.cloud.transfers.s3_to_gcs.GCSHook")
def test_exclude_existing_objects(
self, mock_gcs_hook, source_objects, existing_objects, objects_expected
):
operator = S3ToGCSOperator(
task_id=TASK_ID,
bucket=S3_BUCKET,
prefix=S3_PREFIX,
delimiter=S3_DELIMITER,
gcp_conn_id=GCS_CONN_ID,
dest_gcs=GCS_PATH_PREFIX,
gzip=True,
)
mock_gcs_hook.list.return_value = existing_objects
files_reduced = operator.exclude_existing_objects(s3_objects=source_objects, gcs_hook=mock_gcs_hook)
assert set(files_reduced) == set(objects_expected)
@pytest.mark.parametrize(*PARAMETRIZED_OBJECT_PATHS)
def test_s3_to_gcs_object(self, apply_gcs_prefix, s3_prefix, s3_object, gcs_destination, gcs_object):
operator = S3ToGCSOperator(
task_id=TASK_ID,
bucket=S3_BUCKET,
prefix=s3_prefix,
delimiter=S3_DELIMITER,
gcp_conn_id=GCS_CONN_ID,
dest_gcs=gcs_destination,
gzip=True,
apply_gcs_prefix=apply_gcs_prefix,
)
assert operator.s3_to_gcs_object(s3_object=s3_prefix + s3_object) == gcs_object
@pytest.mark.parametrize(*PARAMETRIZED_OBJECT_PATHS)
def test_gcs_to_s3_object(self, apply_gcs_prefix, s3_prefix, s3_object, gcs_destination, gcs_object):
operator = S3ToGCSOperator(
task_id=TASK_ID,
bucket=S3_BUCKET,
prefix=s3_prefix,
delimiter=S3_DELIMITER,
gcp_conn_id=GCS_CONN_ID,
dest_gcs=gcs_destination,
gzip=True,
apply_gcs_prefix=apply_gcs_prefix,
)
assert operator.gcs_to_s3_object(gcs_object=gcs_object) == s3_prefix + s3_object
@pytest.mark.parametrize(*PARAMETRIZED_OBJECT_PATHS)
@mock.patch("airflow.providers.google.cloud.transfers.s3_to_gcs.S3Hook")
@mock.patch("airflow.providers.google.cloud.transfers.s3_to_gcs.GCSHook")
def test_execute_apply_gcs_prefix(
self,
gcs_mock_hook,
s3_mock_hook,
apply_gcs_prefix,
s3_prefix,
s3_object,
gcs_destination,
gcs_object,
):
operator = S3ToGCSOperator(
task_id=TASK_ID,
bucket=S3_BUCKET,
prefix=s3_prefix,
delimiter=S3_DELIMITER,
gcp_conn_id=GCS_CONN_ID,
dest_gcs=gcs_destination,
google_impersonation_chain=IMPERSONATION_CHAIN,
apply_gcs_prefix=apply_gcs_prefix,
)
operator.hook = mock.MagicMock()
operator.hook.list_keys.return_value = [s3_prefix + s3_object]
uploaded_files = operator.execute(context={})
gcs_mock_hook.return_value.upload.assert_has_calls(
[
mock.call(GCS_BUCKET, gcs_object, mock.ANY, gzip=False),
],
any_order=True,
)
operator.hook.list_keys.assert_called_once()
s3_mock_hook.assert_called_once_with(aws_conn_id=AWS_CONN_ID, verify=None)
gcs_mock_hook.assert_called_once_with(
gcp_conn_id=GCS_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
assert sorted([s3_prefix + s3_object]) == sorted(uploaded_files)
@pytest.mark.parametrize(
("s3_prefix", "gcs_destination", "apply_gcs_prefix", "expected_input", "expected_output"),
[
("dir/pre", "gs://bucket/dest_dir/", False, "dir/pre", "dest_dir/dir"),
("dir/pre/", "gs://bucket/dest_dir/", False, "dir/pre", "dest_dir/dir/pre"),
("dir/pre", "gs://bucket/dest_dir/", True, "dir/pre", "dest_dir"),
("dir/pre", "gs://bucket/", False, "dir/pre", "dir"),
("dir/pre", "gs://bucket/", True, "dir/pre", "/"),
("", "gs://bucket/", False, "/", "/"),
("", "gs://bucket/", True, "/", "/"),
],
)
def test_get_openlineage_facets_on_start(
self, s3_prefix, gcs_destination, apply_gcs_prefix, expected_input, expected_output
):
operator = S3ToGCSOperator(
task_id=TASK_ID,
bucket=S3_BUCKET,
prefix=s3_prefix,
dest_gcs=gcs_destination,
apply_gcs_prefix=apply_gcs_prefix,
)
result = operator.get_openlineage_facets_on_start()
assert not result.job_facets
assert not result.run_facets
assert len(result.outputs) == 1
assert len(result.inputs) == 1
assert result.outputs[0].namespace == "gs://bucket"
assert result.outputs[0].name == expected_output
assert result.inputs[0].namespace == f"s3://{S3_BUCKET}"
assert result.inputs[0].name == expected_input
| TestS3ToGoogleCloudStorageOperator |
python | pypa__warehouse | tests/unit/tuf/test_tuf.py | {
"start": 129,
"end": 2560
} | class ____:
server = "rstuf.api"
task_id = "123456"
def test_get_task_state(self, monkeypatch):
state = "SUCCESS"
resp_json = {"data": {"state": state}}
resp = stub(
raise_for_status=(lambda *a: None), json=(lambda *a, **kw: resp_json)
)
get = call_recorder(lambda *a: resp)
monkeypatch.setattr(tuf.requests, "get", get)
result = tuf.get_task_state(self.server, self.task_id)
assert result == state
assert get.calls == [call(f"{self.server}/api/v1/task?task_id={self.task_id}")]
def test_post_bootstrap(self, monkeypatch):
payload = ["foo"]
resp_json = {"data": {"task_id": self.task_id}}
resp = stub(
raise_for_status=(lambda *a: None), json=(lambda *a, **kw: resp_json)
)
post = call_recorder(lambda *a, **kw: resp)
monkeypatch.setattr(tuf.requests, "post", post)
# Test success
result = tuf.post_bootstrap(self.server, payload)
assert result == self.task_id
assert post.calls == [call(f"{self.server}/api/v1/bootstrap", json=payload)]
# Test fail with incomplete response json
del resp_json["data"]
with pytest.raises(tuf.RSTUFError):
tuf.post_bootstrap(self.server, payload)
def test_wait_for_success(self, monkeypatch):
get_task_state = call_recorder(lambda *a: "SUCCESS")
monkeypatch.setattr(tuf, "get_task_state", get_task_state)
tuf.wait_for_success(self.server, self.task_id)
assert get_task_state.calls == [call(self.server, self.task_id)]
@pytest.mark.parametrize(
("state", "iterations"),
[
("PENDING", 20),
("RUNNING", 20),
("RECEIVED", 20),
("STARTED", 20),
("FAILURE", 1),
("ERRORED", 1),
("REVOKED", 1),
("REJECTED", 1),
("bogus", 1),
],
)
def test_wait_for_success_error(self, state, iterations, monkeypatch):
monkeypatch.setattr(tuf.time, "sleep", lambda *a: None)
get_task_state = call_recorder(lambda *a: state)
monkeypatch.setattr(tuf, "get_task_state", get_task_state)
with pytest.raises(tuf.RSTUFError):
tuf.wait_for_success(self.server, self.task_id)
assert get_task_state.calls == [call(self.server, self.task_id)] * iterations
| TestTUF |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVar4.py | {
"start": 308,
"end": 1408
} | class ____(Generic[_T, _T_co, _T_contra]):
def func1(self, a: _T):
pass
# This should generate an error because covariant
# TypeVars are not allowed for input parameters.
def func2(self, a: _T_co):
def inner(b: _T_co) -> None:
pass
return inner
def func3(self, a: int | _T_co):
pass
def func4(self, a: list[_T_co]):
pass
def func5(self, a: _T_contra):
pass
def func6(self) -> _T | None:
pass
def func7(self) -> _T_co | None:
pass
# This should generate an error because contravariant
# TypeVars are not allowed for return parameters.
def func8(self) -> _T_contra: ...
# This should generate an error because contravariant
# TypeVars are not allowed for return parameters.
def func9(self) -> _T_contra | int:
return 3
# This should generate an error because contravariant
# TypeVars are not allowed for return parameters.
def func10(self, x: _T_contra):
return x
def func11(self) -> list[_T_contra]:
return []
| ClassA |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/callbackProtocol8.py | {
"start": 140,
"end": 303
} | class ____(Protocol):
def __call__(self, *args: Any, kwarg0: Any, kwarg1: Any) -> None: ...
def f(*args: Any, kwarg0: Any, kwarg1: Any) -> None: ...
p: P = f
| P |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/core.py | {
"start": 4765,
"end": 28274
} | class ____:
"""
Add an explicit input to a Hypothesis test, which Hypothesis will always
try before generating random inputs. This combines the randomized nature of
Hypothesis generation with a traditional parametrized test.
For example:
.. code-block:: python
@example("Hello world")
@example("some string with special significance")
@given(st.text())
def test_strings(s):
pass
will call ``test_strings("Hello World")`` and
``test_strings("some string with special significance")`` before generating
any random inputs. |@example| may be placed in any order relative to |@given|
and |@settings|.
Explicit inputs from |@example| are run in the |Phase.explicit| phase.
Explicit inputs do not count towards |settings.max_examples|. Note that
explicit inputs added by |@example| do not shrink. If an explicit input
fails, Hypothesis will stop and report the failure without generating any
random inputs.
|@example| can also be used to easily reproduce a failure. For instance, if
Hypothesis reports that ``f(n=[0, math.nan])`` fails, you can add
``@example(n=[0, math.nan])`` to your test to quickly reproduce that failure.
Arguments to ``@example``
-------------------------
Arguments to |@example| have the same behavior and restrictions as arguments
to |@given|. This means they may be either positional or keyword arguments
(but not both in the same |@example|):
.. code-block:: python
@example(1, 2)
@example(x=1, y=2)
@given(st.integers(), st.integers())
def test(x, y):
pass
Noting that while arguments to |@given| are strategies (like |st.integers|),
arguments to |@example| are values instead (like ``1``).
See the :ref:`given-arguments` section for full details.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
if args and kwargs:
raise InvalidArgument(
"Cannot mix positional and keyword arguments for examples"
)
if not (args or kwargs):
raise InvalidArgument("An example must provide at least one argument")
self.hypothesis_explicit_examples: list[Example] = []
self._this_example = Example(tuple(args), kwargs)
def __call__(self, test: TestFunc) -> TestFunc:
if not hasattr(test, "hypothesis_explicit_examples"):
test.hypothesis_explicit_examples = self.hypothesis_explicit_examples # type: ignore
test.hypothesis_explicit_examples.append(self._this_example) # type: ignore
return test
def xfail(
self,
condition: bool = True, # noqa: FBT002
*,
reason: str = "",
raises: type[BaseException] | tuple[type[BaseException], ...] = BaseException,
) -> "example":
"""Mark this example as an expected failure, similarly to
:obj:`pytest.mark.xfail(strict=True) <pytest.mark.xfail>`.
Expected-failing examples allow you to check that your test does fail on
some examples, and therefore build confidence that *passing* tests are
because your code is working, not because the test is missing something.
.. code-block:: python
@example(...).xfail()
@example(...).xfail(reason="Prices must be non-negative")
@example(...).xfail(raises=(KeyError, ValueError))
@example(...).xfail(sys.version_info[:2] >= (3, 12), reason="needs py 3.12")
@example(...).xfail(condition=sys.platform != "linux", raises=OSError)
def test(x):
pass
.. note::
Expected-failing examples are handled separately from those generated
by strategies, so you should usually ensure that there is no overlap.
.. code-block:: python
@example(x=1, y=0).xfail(raises=ZeroDivisionError)
@given(x=st.just(1), y=st.integers()) # Missing `.filter(bool)`!
def test_fraction(x, y):
# This test will try the explicit example and see it fail as
# expected, then go on to generate more examples from the
# strategy. If we happen to generate y=0, the test will fail
# because only the explicit example is treated as xfailing.
x / y
"""
check_type(bool, condition, "condition")
check_type(str, reason, "reason")
if not (
isinstance(raises, type) and issubclass(raises, BaseException)
) and not (
isinstance(raises, tuple)
and raises # () -> expected to fail with no error, which is impossible
and all(
isinstance(r, type) and issubclass(r, BaseException) for r in raises
)
):
raise InvalidArgument(
f"{raises=} must be an exception type or tuple of exception types"
)
if condition:
self._this_example = dataclasses.replace(
self._this_example, raises=raises, reason=reason
)
return self
def via(self, whence: str, /) -> "example":
"""Attach a machine-readable label noting what the origin of this example
was. |example.via| is completely optional and does not change runtime
behavior.
|example.via| is intended to support self-documenting behavior, as well as
tooling which might add (or remove) |@example| decorators automatically.
For example:
.. code-block:: python
# Annotating examples is optional and does not change runtime behavior
@example(...)
@example(...).via("regression test for issue #42")
@example(...).via("discovered failure")
def test(x):
pass
.. note::
`HypoFuzz <https://hypofuzz.com/>`_ uses |example.via| to tag examples
in the patch of its high-coverage set of explicit inputs, on
`the patches page <https://hypofuzz.com/example-dashboard/#/patches>`_.
"""
if not isinstance(whence, str):
raise InvalidArgument(".via() must be passed a string")
# This is deliberately a no-op at runtime; the tools operate on source code.
return self
def seed(seed: Hashable) -> Callable[[TestFunc], TestFunc]:
"""
Seed the randomness for this test.
``seed`` may be any hashable object. No exact meaning for ``seed`` is provided
other than that for a fixed seed value Hypothesis will produce the same
examples (assuming that there are no other sources of nondeterminisim, such
as timing, hash randomization, or external state).
For example, the following test function and |RuleBasedStateMachine| will
each generate the same series of examples each time they are executed:
.. code-block:: python
@seed(1234)
@given(st.integers())
def test(n): ...
@seed(6789)
class MyMachine(RuleBasedStateMachine): ...
If using pytest, you can alternatively pass ``--hypothesis-seed`` on the
command line.
Setting a seed overrides |settings.derandomize|, which is designed to enable
deterministic CI tests rather than reproducing observed failures.
Hypothesis will only print the seed which would reproduce a failure if a test
fails in an unexpected way, for instance inside Hypothesis internals.
"""
def accept(test):
test._hypothesis_internal_use_seed = seed
current_settings = getattr(test, "_hypothesis_internal_use_settings", None)
test._hypothesis_internal_use_settings = Settings(
current_settings, database=None
)
return test
return accept
# TODO_DOCS: link to /explanation/choice-sequence
def reproduce_failure(version: str, blob: bytes) -> Callable[[TestFunc], TestFunc]:
"""
Run the example corresponding to the binary ``blob`` in order to reproduce a
failure. ``blob`` is a serialized version of the internal input representation
of Hypothesis.
A test decorated with |@reproduce_failure| always runs exactly one example,
which is expected to cause a failure. If the provided ``blob`` does not
cause a failure, Hypothesis will raise |DidNotReproduce|.
Hypothesis will print an |@reproduce_failure| decorator if
|settings.print_blob| is ``True`` (which is the default in CI).
|@reproduce_failure| is intended to be temporarily added to your test suite in
order to reproduce a failure. It is not intended to be a permanent addition to
your test suite. Because of this, no compatibility guarantees are made across
Hypothesis versions, and |@reproduce_failure| will error if used on a different
Hypothesis version than it was created for.
.. seealso::
See also the :doc:`/tutorial/replaying-failures` tutorial.
"""
def accept(test):
test._hypothesis_internal_use_reproduce_failure = (version, blob)
return test
return accept
def reproduction_decorator(choices: Iterable[ChoiceT]) -> str:
return f"@reproduce_failure({__version__!r}, {encode_failure(choices)!r})"
def encode_failure(choices: Iterable[ChoiceT]) -> bytes:
blob = choices_to_bytes(choices)
compressed = zlib.compress(blob)
if len(compressed) < len(blob):
blob = b"\1" + compressed
else:
blob = b"\0" + blob
return base64.b64encode(blob)
def decode_failure(blob: bytes) -> Sequence[ChoiceT]:
try:
decoded = base64.b64decode(blob)
except Exception:
raise InvalidArgument(f"Invalid base64 encoded string: {blob!r}") from None
prefix = decoded[:1]
if prefix == b"\0":
decoded = decoded[1:]
elif prefix == b"\1":
try:
decoded = zlib.decompress(decoded[1:])
except zlib.error as err:
raise InvalidArgument(
f"Invalid zlib compression for blob {blob!r}"
) from err
else:
raise InvalidArgument(
f"Could not decode blob {blob!r}: Invalid start byte {prefix!r}"
)
choices = choices_from_bytes(decoded)
if choices is None:
raise InvalidArgument(f"Invalid serialized choice sequence for blob {blob!r}")
return choices
def _invalid(message, *, exc=InvalidArgument, test, given_kwargs):
@impersonate(test)
def wrapped_test(*arguments, **kwargs): # pragma: no cover # coverage limitation
raise exc(message)
wrapped_test.is_hypothesis_test = True
wrapped_test.hypothesis = HypothesisHandle(
inner_test=test,
_get_fuzz_target=wrapped_test,
_given_kwargs=given_kwargs,
)
return wrapped_test
def is_invalid_test(test, original_sig, given_arguments, given_kwargs):
"""Check the arguments to ``@given`` for basic usage constraints.
Most errors are not raised immediately; instead we return a dummy test
function that will raise the appropriate error if it is actually called.
When the user runs a subset of tests (e.g via ``pytest -k``), errors will
only be reported for tests that actually ran.
"""
invalid = partial(_invalid, test=test, given_kwargs=given_kwargs)
if not (given_arguments or given_kwargs):
return invalid("given must be called with at least one argument")
params = list(original_sig.parameters.values())
pos_params = [p for p in params if p.kind is p.POSITIONAL_OR_KEYWORD]
kwonly_params = [p for p in params if p.kind is p.KEYWORD_ONLY]
if given_arguments and params != pos_params:
return invalid(
"positional arguments to @given are not supported with varargs, "
"varkeywords, positional-only, or keyword-only arguments"
)
if len(given_arguments) > len(pos_params):
return invalid(
f"Too many positional arguments for {test.__name__}() were passed to "
f"@given - expected at most {len(pos_params)} "
f"arguments, but got {len(given_arguments)} {given_arguments!r}"
)
if ... in given_arguments:
return invalid(
"... was passed as a positional argument to @given, but may only be "
"passed as a keyword argument or as the sole argument of @given"
)
if given_arguments and given_kwargs:
return invalid("cannot mix positional and keyword arguments to @given")
extra_kwargs = [
k for k in given_kwargs if k not in {p.name for p in pos_params + kwonly_params}
]
if extra_kwargs and (params == [] or params[-1].kind is not params[-1].VAR_KEYWORD):
arg = extra_kwargs[0]
extra = ""
if arg in all_settings:
extra = f". Did you mean @settings({arg}={given_kwargs[arg]!r})?"
return invalid(
f"{test.__name__}() got an unexpected keyword argument {arg!r}, "
f"from `{arg}={given_kwargs[arg]!r}` in @given{extra}"
)
if any(p.default is not p.empty for p in params):
return invalid("Cannot apply @given to a function with defaults.")
# This case would raise Unsatisfiable *anyway*, but by detecting it here we can
# provide a much more helpful error message for people e.g. using the Ghostwriter.
empty = [
f"{s!r} (arg {idx})" for idx, s in enumerate(given_arguments) if s is NOTHING
] + [f"{name}={s!r}" for name, s in given_kwargs.items() if s is NOTHING]
if empty:
strats = "strategies" if len(empty) > 1 else "strategy"
return invalid(
f"Cannot generate examples from empty {strats}: " + ", ".join(empty),
exc=Unsatisfiable,
)
def execute_explicit_examples(state, wrapped_test, arguments, kwargs, original_sig):
assert isinstance(state, StateForActualGivenExecution)
posargs = [
p.name
for p in original_sig.parameters.values()
if p.kind is p.POSITIONAL_OR_KEYWORD
]
for example in reversed(getattr(wrapped_test, "hypothesis_explicit_examples", ())):
assert isinstance(example, Example)
# All of this validation is to check that @example() got "the same" arguments
# as @given, i.e. corresponding to the same parameters, even though they might
# be any mixture of positional and keyword arguments.
if example.args:
assert not example.kwargs
if any(
p.kind is p.POSITIONAL_ONLY for p in original_sig.parameters.values()
):
raise InvalidArgument(
"Cannot pass positional arguments to @example() when decorating "
"a test function which has positional-only parameters."
)
if len(example.args) > len(posargs):
raise InvalidArgument(
"example has too many arguments for test. Expected at most "
f"{len(posargs)} but got {len(example.args)}"
)
example_kwargs = dict(
zip(posargs[-len(example.args) :], example.args, strict=True)
)
else:
example_kwargs = dict(example.kwargs)
given_kws = ", ".join(
repr(k) for k in sorted(wrapped_test.hypothesis._given_kwargs)
)
example_kws = ", ".join(repr(k) for k in sorted(example_kwargs))
if given_kws != example_kws:
raise InvalidArgument(
f"Inconsistent args: @given() got strategies for {given_kws}, "
f"but @example() got arguments for {example_kws}"
) from None
# This is certainly true because the example_kwargs exactly match the params
# reserved by @given(), which are then remove from the function signature.
assert set(example_kwargs).isdisjoint(kwargs)
example_kwargs.update(kwargs)
if Phase.explicit not in state.settings.phases:
continue
with local_settings(state.settings):
fragments_reported = []
empty_data = ConjectureData.for_choices([])
try:
execute_example = partial(
state.execute_once,
empty_data,
is_final=True,
print_example=True,
example_kwargs=example_kwargs,
)
with with_reporter(fragments_reported.append):
if example.raises is None:
execute_example()
else:
# @example(...).xfail(...)
bits = ", ".join(nicerepr(x) for x in arguments) + ", ".join(
f"{k}={nicerepr(v)}" for k, v in example_kwargs.items()
)
try:
execute_example()
except failure_exceptions_to_catch() as err:
if not isinstance(err, example.raises):
raise
# Save a string form of this example; we'll warn if it's
# ever generated by the strategy (which can't be xfailed)
state.xfail_example_reprs.add(
repr_call(state.test, arguments, example_kwargs)
)
except example.raises as err:
# We'd usually check this as early as possible, but it's
# possible for failure_exceptions_to_catch() to grow when
# e.g. pytest is imported between import- and test-time.
raise InvalidArgument(
f"@example({bits}) raised an expected {err!r}, "
"but Hypothesis does not treat this as a test failure"
) from err
else:
# Unexpectedly passing; always raise an error in this case.
reason = f" because {example.reason}" * bool(example.reason)
if example.raises is BaseException:
name = "exception" # special-case no raises= arg
elif not isinstance(example.raises, tuple):
name = example.raises.__name__
elif len(example.raises) == 1:
name = example.raises[0].__name__
else:
name = (
", ".join(ex.__name__ for ex in example.raises[:-1])
+ f", or {example.raises[-1].__name__}"
)
vowel = name.upper()[0] in "AEIOU"
raise AssertionError(
f"Expected a{'n' * vowel} {name} from @example({bits})"
f"{reason}, but no exception was raised."
)
except UnsatisfiedAssumption:
# Odd though it seems, we deliberately support explicit examples that
# are then rejected by a call to `assume()`. As well as iterative
# development, this is rather useful to replay Hypothesis' part of
# a saved failure when other arguments are supplied by e.g. pytest.
# See https://github.com/HypothesisWorks/hypothesis/issues/2125
with contextlib.suppress(StopTest):
empty_data.conclude_test(Status.INVALID)
except BaseException as err:
# In order to support reporting of multiple failing examples, we yield
# each of the (report text, error) pairs we find back to the top-level
# runner. This also ensures that user-facing stack traces have as few
# frames of Hypothesis internals as possible.
err = err.with_traceback(get_trimmed_traceback())
# One user error - whether misunderstanding or typo - we've seen a few
# times is to pass strategies to @example() where values are expected.
# Checking is easy, and false-positives not much of a problem, so:
if isinstance(err, failure_exceptions_to_catch()) and any(
isinstance(arg, SearchStrategy)
for arg in example.args + tuple(example.kwargs.values())
):
new = HypothesisWarning(
"The @example() decorator expects to be passed values, but "
"you passed strategies instead. See https://hypothesis."
"readthedocs.io/en/latest/reference/api.html#hypothesis"
".example for details."
)
new.__cause__ = err
err = new
with contextlib.suppress(StopTest):
empty_data.conclude_test(Status.INVALID)
yield (fragments_reported, err)
if (
state.settings.report_multiple_bugs
and pytest_shows_exceptiongroups
and isinstance(err, failure_exceptions_to_catch())
and not isinstance(err, skip_exceptions_to_reraise())
):
continue
break
finally:
if fragments_reported:
assert fragments_reported[0].startswith("Falsifying example")
fragments_reported[0] = fragments_reported[0].replace(
"Falsifying example", "Falsifying explicit example", 1
)
empty_data.freeze()
if observability_enabled():
tc = make_testcase(
run_start=state._start_timestamp,
property=state.test_identifier,
data=empty_data,
how_generated="explicit example",
representation=state._string_repr,
timing=state._timing_features,
)
deliver_observation(tc)
if fragments_reported:
verbose_report(fragments_reported[0].replace("Falsifying", "Trying", 1))
for f in fragments_reported[1:]:
verbose_report(f)
def get_random_for_wrapped_test(test, wrapped_test):
settings = wrapped_test._hypothesis_internal_use_settings
wrapped_test._hypothesis_internal_use_generated_seed = None
if wrapped_test._hypothesis_internal_use_seed is not None:
return Random(wrapped_test._hypothesis_internal_use_seed)
if settings.derandomize:
return Random(int_from_bytes(function_digest(test)))
if global_force_seed is not None:
return Random(global_force_seed)
if threadlocal._hypothesis_global_random is None: # pragma: no cover
threadlocal._hypothesis_global_random = Random()
seed = threadlocal._hypothesis_global_random.getrandbits(128)
wrapped_test._hypothesis_internal_use_generated_seed = seed
return Random(seed)
@dataclass(slots=True, frozen=False)
| example |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/_internal/templater.py | {
"start": 1445,
"end": 1879
} | class ____(ResolveMixin):
"""
A wrapper for a value that should be rendered as-is, without applying jinja templating to its contents.
:param value: The value to be rendered without templating
"""
value: Any
def iter_references(self) -> Iterable[tuple[Operator, str]]:
return ()
def resolve(self, context: Context) -> Any:
return self.value
log = logging.getLogger(__name__)
| LiteralValue |
python | mlflow__mlflow | tests/transformers/test_transformers_llm_inference_utils.py | {
"start": 1362,
"end": 2476
} | class ____:
def __call__(self, text: str, **kwargs):
input_ids = list(map(int, text.split(" ")))
return {"input_ids": torch.tensor([input_ids])}
def decode(self, tensor, **kwargs):
if isinstance(tensor, torch.Tensor):
tensor = tensor.tolist()
return " ".join([str(x) for x in tensor])
def convert_tokens_to_ids(self, tokens: list[str]):
return [int(x) for x in tokens]
def _tokenize(self, text: str):
return [x for x in text.split(" ") if x]
def apply_chat_template(self, messages: list[dict[str, str]], **kwargs):
return " ".join(message["content"] for message in messages)
def test_apply_chat_template():
data1 = [{"role": "A", "content": "one"}, {"role": "B", "content": "two"}]
# Test that the function modifies the data in place for Chat task
prompt = convert_messages_to_prompt(data1, DummyTokenizer())
assert prompt == "one two"
with pytest.raises(MlflowException, match=r"Input messages should be list of"):
convert_messages_to_prompt([["one", "two"]], DummyTokenizer())
| DummyTokenizer |
python | walkccc__LeetCode | solutions/3501. Maximize Active Section with Trade II/3501.py | {
"start": 696,
"end": 3641
} | class ____:
def maxActiveSectionsAfterTrade(
self,
s: str,
queries: list[list[int]]
) -> list[int]:
ones = s.count('1')
zeroGroups, zeroGroupIndex = self._getZeroGroups(s)
if not zeroGroups:
return [ones] * len(queries)
st = SparseTable(self._getZeroMergeLengths(zeroGroups))
def getMaxActiveSections(l: int, r: int) -> int:
left = (-1 if zeroGroupIndex[l] == -1
else (zeroGroups[zeroGroupIndex[l]].length -
(l - zeroGroups[zeroGroupIndex[l]].start)))
right = (-1 if zeroGroupIndex[r] == -1
else (r - zeroGroups[zeroGroupIndex[r]].start + 1))
startAdjacentGroupIndex, endAdjacentGroupIndex = self._mapToAdjacentGroupIndices(
zeroGroupIndex[l] + 1, zeroGroupIndex[r] if s[r] == '1' else zeroGroupIndex[r] - 1)
activeSections = ones
if (s[l] == '0' and s[r] == '0' and
zeroGroupIndex[l] + 1 == zeroGroupIndex[r]):
activeSections = max(activeSections, ones + left + right)
elif startAdjacentGroupIndex <= endAdjacentGroupIndex:
activeSections = max(
activeSections,
ones + st.query(startAdjacentGroupIndex, endAdjacentGroupIndex))
if (s[l] == '0' and
zeroGroupIndex[l] + 1 <= (zeroGroupIndex[r]
if s[r] == '1' else zeroGroupIndex[r] - 1)):
activeSections = max(activeSections, ones + left +
zeroGroups[zeroGroupIndex[l] + 1].length)
if (s[r] == '0' and zeroGroupIndex[l] < zeroGroupIndex[r] - 1):
activeSections = max(activeSections, ones + right +
zeroGroups[zeroGroupIndex[r] - 1].length)
return activeSections
return [getMaxActiveSections(l, r) for l, r in queries]
def _getZeroGroups(self, s: str) -> tuple[list[Group], list[int]]:
"""
Returns the zero groups and the index of the zero group that contains the
i-th character.
"""
zeroGroups = []
zeroGroupIndex = []
for i in range(len(s)):
if s[i] == '0':
if i > 0 and s[i - 1] == '0':
zeroGroups[-1].length += 1
else:
zeroGroups.append(Group(i, 1))
zeroGroupIndex.append(len(zeroGroups) - 1)
return zeroGroups, zeroGroupIndex
def _getZeroMergeLengths(self, zeroGroups: list[Group]) -> list[int]:
"""Returns the sums of the lengths of the adjacent groups."""
return [a.length + b.length for a, b in itertools.pairwise(zeroGroups)]
def _mapToAdjacentGroupIndices(
self,
startGroupIndex: int,
endGroupIndex: int
) -> tuple[int, int]:
"""
Returns the indices of the adjacent groups that contain l and r completely.
e.g. groupIndices = [0, 1, 2, 3]
adjacentGroupIndices = [0 (0, 1), 1 (1, 2), 2 (2, 3)]
map(startGroupIndex = 1, endGroupIndex = 3) -> (1, 2)
"""
return startGroupIndex, endGroupIndex - 1
| Solution |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/supervised_learning/regression.py | {
"start": 418,
"end": 684
} | class ____():
""" Regularization for Ridge Regression """
def __init__(self, alpha):
self.alpha = alpha
def __call__(self, w):
return self.alpha * 0.5 * w.T.dot(w)
def grad(self, w):
return self.alpha * w
| l2_regularization |
python | openai__openai-python | src/openai/resources/models.py | {
"start": 9999,
"end": 10440
} | class ____:
def __init__(self, models: AsyncModels) -> None:
self._models = models
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
models.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
models.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
models.delete,
)
| AsyncModelsWithRawResponse |
python | ansible__ansible | lib/ansible/module_utils/facts/network/freebsd.py | {
"start": 1029,
"end": 1137
} | class ____(NetworkCollector):
_fact_class = FreeBSDNetwork
_platform = 'FreeBSD'
| FreeBSDNetworkCollector |
python | OmkarPathak__pygorithm | pygorithm/data_structures/graph.py | {
"start": 7566,
"end": 8836
} | class ____(Graph):
def topological_sort(self):
"""
function for sorting graph elements using topological sort
"""
# Marking all vertices as not visited
visited = [False] * self.count
# Stack for storing the vertex
stack = []
for vertex in range(self.count):
# Call the recursive function only if not visited
if not visited[vertex]:
self.__topological_sort_rec(vertex, visited, stack)
return stack
def __topological_sort_rec(self, vertex, visited, stack):
"""
Recursive function for topological Sort
"""
# Mark the current node in visited
visited[vertex] = True
# mark all adjacent nodes of the current node
try:
for adjacent_node in self.graph[vertex]:
if not visited[adjacent_node]:
self.__topological_sort_rec(adjacent_node, visited, stack)
except KeyError:
return
# Push current vertex to stack which stores the result
stack.insert(0, vertex)
def get_code(self):
"""
returns the code for the current class
"""
return inspect.getsource(TopologicalSort)
| TopologicalSort |
python | joblib__joblib | joblib/externals/loky/backend/process.py | {
"start": 324,
"end": 1139
} | class ____(BaseProcess):
_start_method = "loky"
def __init__(
self,
group=None,
target=None,
name=None,
args=(),
kwargs={},
daemon=None,
init_main_module=False,
env=None,
):
super().__init__(
group=group,
target=target,
name=name,
args=args,
kwargs=kwargs,
daemon=daemon,
)
self.env = {} if env is None else env
self.authkey = self.authkey
self.init_main_module = init_main_module
@staticmethod
def _Popen(process_obj):
if sys.platform == "win32":
from .popen_loky_win32 import Popen
else:
from .popen_loky_posix import Popen
return Popen(process_obj)
| LokyProcess |
python | huggingface__transformers | src/transformers/models/gemma3n/modeling_gemma3n.py | {
"start": 6409,
"end": 13288
} | class ____(nn.Module):
def __init__(self, config: Gemma3nAudioConfig):
super().__init__()
self.config = config
self.num_heads = self.config.conf_num_attention_heads
self.channels = self.config.hidden_size
self.head_dim = self.channels // self.num_heads
self.max_backward = max(0, self.config.conf_attention_context_left - 1)
self.max_forward = self.config.conf_attention_context_right
self.pos_proj = nn.Linear(self.channels, self.num_heads * self.head_dim, bias=False)
min_timescale = 1.0
max_timescale = 1.0e4
num_timescales = self.channels // 2
log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / max(num_timescales - 1, 1)
inv_timescales = min_timescale * torch.exp(torch.arange(num_timescales) * -log_timescale_increment)
self.register_buffer(
"inv_timescales",
inv_timescales.float().unsqueeze(0).unsqueeze(0),
persistent=False,
)
def _get_timing_signal_1d_pos(self, position: torch.Tensor, dtype: torch.dtype) -> torch.Tensor:
position = position.float().unsqueeze(-1)
scaled_time = position * self.inv_timescales.to(device=position.device, dtype=torch.float32)
timing_signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=-1)
return timing_signal.type(dtype)
def _relative_shift(
self,
term_bd_before_shift: torch.Tensor,
batch_size: int,
num_heads: int,
num_query_blocks: int,
query_block_size: int,
key_context_size: int,
max_span_plus_1: int,
) -> torch.Tensor:
"""Performs the relative shift.
Args:
term_bd_before_shift: Tensor of shape [B, N, U, W, F_span]. batch_size
(B), num_heads (N), num_query_blocks (U), query_block_size (W),
key_context_size (C = W+L+R), max_span_plus_1 (F_span = L+R+1).
Returns:
Tensor of shape [B, N, U, W, C].
"""
# term_bd_before_shift shape: [B, N, U, W, F_span]
# Target shape after shift: [B, N, U, W, C]
# Padding amount for the last dimension (F_span) to become (C + 1)
# C = key_context_size
# F_span = max_span_plus_1
pad_amount_last_dim = (key_context_size + 1) - max_span_plus_1
# PyTorch F.pad expects (pad_left, pad_right, pad_top, pad_bottom ...)
# We only pad the last dimension on the right.
padding_tuple = (0, pad_amount_last_dim)
term_bd_padded = nn.functional.pad(term_bd_before_shift, padding_tuple)
# Shape after pad: [B, N, U, W, C+1]
# Reshape for slicing (emulating JAX's behavior)
# [B, N, U, W * (C+1)]
term_bd_reshaped = term_bd_padded.reshape(
(
batch_size,
num_heads,
num_query_blocks,
query_block_size * (key_context_size + 1),
)
)
# Slice to effective [B, N, U, W * C]
term_bd_sliced = term_bd_reshaped[:, :, :, : query_block_size * key_context_size]
# Reshape back to [B, N, U, W, C]
term_bd_shifted = term_bd_sliced.reshape(
(
batch_size,
num_heads,
num_query_blocks,
query_block_size,
key_context_size,
)
)
return term_bd_shifted
def forward(self, queries: torch.Tensor, keys: torch.Tensor) -> torch.Tensor:
# queries: [B, U, W, N, H] (batch, num_query_blocks, query_block_size, num_heads, head_dim)
# keys: [B, U, C, N, H] (batch, num_query_blocks, key_context_size, num_heads, head_dim)
# C = W + L + R (key_context_size)
# F_span = L + R + 1 (max_span + 1)
batch_size, num_query_blocks, query_block_size, num_heads, head_dim = queries.shape
_, _, key_context_size, _, _ = keys.shape
# Relative positions for sinusoidal embeddings: [L, L-1, ..., -R]
# Length is L+R+1 = self.max_span + 1
pos_indices = torch.arange(self.max_backward, -self.max_forward - 1, -1, device=queries.device).unsqueeze(
0
) # Shape [1, F_span]
max_span_plus_1 = pos_indices.shape[1] # F_span
sin_emb_timing_signal = self._get_timing_signal_1d_pos(
pos_indices, dtype=queries.dtype
) # Shape [1, F_span, self.channels]
# Project sinusoidal embeddings: [1, F_span, self.channels] -> [1, F_span, N*H]
projected_sin_emb = self.pos_proj(sin_emb_timing_signal)
# Reshape to [1, F_span, N, H] then squeeze to [F_span, N, H]
sin_emb = projected_sin_emb.reshape(1, max_span_plus_1, self.num_heads, self.head_dim).squeeze(
0
) # Shape [F, N, H]
# term_ac: Query-Key content interaction
# queries: [B, U, W, N, H] -> permute to [B, N, U, W, H] for matmul
# keys: [B, U, C, N, H] -> permute to [B, N, U, H, C] for matmul
queries_p = queries.permute(0, 3, 1, 2, 4) # [B, N, U, W, H]
keys_p_t = keys.permute(0, 3, 1, 4, 2) # [B, N, U, H, C]
term_ac = torch.matmul(queries_p, keys_p_t) # [B, N, U, W, C]
# term_bd: Query-Position interaction
# Original einsum: term_bd_unshifed = torch.einsum('buwnh,fnh->bnuwf', queries, sin_emb)
# queries shape: [B, U, W, N, H]
# sin_emb shape: [F, N, H]
# Target output shape: [B, N, U, W, F]
# Permute queries to [B, N, U, W, H] for easier broadcasting with sin_emb
q_permuted = queries.permute(0, 3, 1, 2, 4)
# Permute sin_emb to [N, H, F] to prepare for matmul
# sin_emb original is [F, N, H]
s_permuted = sin_emb.permute(1, 2, 0) # Shape: [N, H, F]
# Reshape queries for matmul: [B, N, U*W, H]
q_reshaped = q_permuted.reshape(batch_size, num_heads, num_query_blocks * query_block_size, head_dim)
# Perform matmul: [B, N, U*W, H] @ [N, H, F]
# s_permuted ([N, H, F]) will be broadcast to [B, N, H, F]
# Result: [B, N, U*W, F]
term_bd_unshifed_matmul = torch.matmul(q_reshaped, s_permuted)
# Reshape to target [B, N, U, W, F]
term_bd_unshifed = term_bd_unshifed_matmul.reshape(
batch_size,
num_heads,
num_query_blocks,
query_block_size,
max_span_plus_1,
)
# Apply relative shift to term_bd_unshifed
term_bd_shifted = self._relative_shift(
term_bd_unshifed,
batch_size,
num_heads,
num_query_blocks,
query_block_size,
key_context_size,
max_span_plus_1,
) # Shape [B, N, U, W, C]
return term_ac + term_bd_shifted
| Gemma3nAudioRelativePositionEmbedding |
python | pypa__pip | src/pip/_vendor/dependency_groups/_implementation.py | {
"start": 1706,
"end": 8041
} | class ____:
"""
A resolver for Dependency Group data.
This class handles caching, name normalization, cycle detection, and other
parsing requirements. There are only two public methods for exploring the data:
``lookup()`` and ``resolve()``.
:param dependency_groups: A mapping, as provided via pyproject
``[dependency-groups]``.
"""
def __init__(
self,
dependency_groups: Mapping[str, str | Mapping[str, str]],
) -> None:
if not isinstance(dependency_groups, Mapping):
raise TypeError("Dependency Groups table is not a mapping")
self.dependency_groups = _normalize_group_names(dependency_groups)
# a map of group names to parsed data
self._parsed_groups: dict[
str, tuple[Requirement | DependencyGroupInclude, ...]
] = {}
# a map of group names to their ancestors, used for cycle detection
self._include_graph_ancestors: dict[str, tuple[str, ...]] = {}
# a cache of completed resolutions to Requirement lists
self._resolve_cache: dict[str, tuple[Requirement, ...]] = {}
def lookup(self, group: str) -> tuple[Requirement | DependencyGroupInclude, ...]:
"""
Lookup a group name, returning the parsed dependency data for that group.
This will not resolve includes.
:param group: the name of the group to lookup
:raises ValueError: if the data does not appear to be valid dependency group
data
:raises TypeError: if the data is not a string
:raises LookupError: if group name is absent
:raises packaging.requirements.InvalidRequirement: if a specifier is not valid
"""
if not isinstance(group, str):
raise TypeError("Dependency group name is not a str")
group = _normalize_name(group)
return self._parse_group(group)
def resolve(self, group: str) -> tuple[Requirement, ...]:
"""
Resolve a dependency group to a list of requirements.
:param group: the name of the group to resolve
:raises TypeError: if the inputs appear to be the wrong types
:raises ValueError: if the data does not appear to be valid dependency group
data
:raises LookupError: if group name is absent
:raises packaging.requirements.InvalidRequirement: if a specifier is not valid
"""
if not isinstance(group, str):
raise TypeError("Dependency group name is not a str")
group = _normalize_name(group)
return self._resolve(group, group)
def _parse_group(
self, group: str
) -> tuple[Requirement | DependencyGroupInclude, ...]:
# short circuit -- never do the work twice
if group in self._parsed_groups:
return self._parsed_groups[group]
if group not in self.dependency_groups:
raise LookupError(f"Dependency group '{group}' not found")
raw_group = self.dependency_groups[group]
if not isinstance(raw_group, list):
raise TypeError(f"Dependency group '{group}' is not a list")
elements: list[Requirement | DependencyGroupInclude] = []
for item in raw_group:
if isinstance(item, str):
# packaging.requirements.Requirement parsing ensures that this is a
# valid PEP 508 Dependency Specifier
# raises InvalidRequirement on failure
elements.append(Requirement(item))
elif isinstance(item, dict):
if tuple(item.keys()) != ("include-group",):
raise ValueError(f"Invalid dependency group item: {item}")
include_group = next(iter(item.values()))
elements.append(DependencyGroupInclude(include_group=include_group))
else:
raise ValueError(f"Invalid dependency group item: {item}")
self._parsed_groups[group] = tuple(elements)
return self._parsed_groups[group]
def _resolve(self, group: str, requested_group: str) -> tuple[Requirement, ...]:
"""
This is a helper for cached resolution to strings.
:param group: The name of the group to resolve.
:param requested_group: The group which was used in the original, user-facing
request.
"""
if group in self._resolve_cache:
return self._resolve_cache[group]
parsed = self._parse_group(group)
resolved_group = []
for item in parsed:
if isinstance(item, Requirement):
resolved_group.append(item)
elif isinstance(item, DependencyGroupInclude):
include_group = _normalize_name(item.include_group)
if include_group in self._include_graph_ancestors.get(group, ()):
raise CyclicDependencyError(
requested_group, group, item.include_group
)
self._include_graph_ancestors[include_group] = (
*self._include_graph_ancestors.get(group, ()),
group,
)
resolved_group.extend(self._resolve(include_group, requested_group))
else: # unreachable
raise NotImplementedError(
f"Invalid dependency group item after parse: {item}"
)
self._resolve_cache[group] = tuple(resolved_group)
return self._resolve_cache[group]
def resolve(
dependency_groups: Mapping[str, str | Mapping[str, str]], /, *groups: str
) -> tuple[str, ...]:
"""
Resolve a dependency group to a tuple of requirements, as strings.
:param dependency_groups: the parsed contents of the ``[dependency-groups]`` table
from ``pyproject.toml``
:param groups: the name of the group(s) to resolve
:raises TypeError: if the inputs appear to be the wrong types
:raises ValueError: if the data does not appear to be valid dependency group data
:raises LookupError: if group name is absent
:raises packaging.requirements.InvalidRequirement: if a specifier is not valid
"""
resolver = DependencyGroupResolver(dependency_groups)
return tuple(str(r) for group in groups for r in resolver.resolve(group))
| DependencyGroupResolver |
python | PrefectHQ__prefect | src/integrations/prefect-azure/tests/conftest.py | {
"start": 6002,
"end": 6600
} | class ____(MagicMock):
def from_connection_string(connection_string):
return CosmosClientMock()
def get_client(self):
return CosmosClientMock(client="client")
def get_database_client(self, database):
return CosmosClientMock(database=database)
def get_container_client(self, container):
return CosmosClientMock(container=container)
@pytest.fixture
def cosmos_connection_string(monkeypatch):
monkeypatch.setattr("prefect_azure.credentials.CosmosClient", CosmosClientMock)
return "AccountEndpoint=url/;AccountKey=AccountKey==;"
| CosmosClientMock |
python | walkccc__LeetCode | solutions/139. Word Break/139.py | {
"start": 0,
"end": 366
} | class ____:
def wordBreak(self, s: str, wordDict: list[str]) -> bool:
wordSet = set(wordDict)
@functools.lru_cache(None)
def wordBreak(s: str) -> bool:
"""Returns True if s can be segmented."""
if s in wordSet:
return True
return any(s[:i] in wordSet and wordBreak(s[i:]) for i in range(len(s)))
return wordBreak(s)
| Solution |
python | FactoryBoy__factory_boy | factory/builder.py | {
"start": 6584,
"end": 7841
} | class ____:
def __init__(self, builder, sequence, parent_step=None):
self.builder = builder
self.sequence = sequence
self.attributes = {}
self.parent_step = parent_step
self.stub = None
def resolve(self, declarations):
self.stub = Resolver(
declarations=declarations,
step=self,
sequence=self.sequence,
)
for field_name in declarations:
self.attributes[field_name] = getattr(self.stub, field_name)
@property
def chain(self):
if self.parent_step:
parent_chain = self.parent_step.chain
else:
parent_chain = ()
return (self.stub,) + parent_chain
def recurse(self, factory, declarations, force_sequence=None):
from . import base
if not issubclass(factory, base.BaseFactory):
raise errors.AssociatedClassError(
"%r: Attempting to recursing into a non-factory object %r"
% (self, factory))
builder = self.builder.recurse(factory._meta, declarations)
return builder.build(parent_step=self, force_sequence=force_sequence)
def __repr__(self):
return f"<BuildStep for {self.builder!r}>"
| BuildStep |
python | docker__docker-py | scripts/versions.py | {
"start": 232,
"end": 2186
} | class ____(namedtuple('_Version', 'major minor patch stage edition')):
@classmethod
def parse(cls, version):
edition = None
version = version.lstrip('v')
version, _, stage = version.partition('-')
if stage:
if not any(marker in stage for marker in STAGES):
edition = stage
stage = None
elif '-' in stage:
edition, stage = stage.split('-', 1)
major, minor, patch = version.split('.', 2)
return cls(major, minor, patch, stage, edition)
@property
def major_minor(self):
return self.major, self.minor
@property
def order(self):
"""Return a representation that allows this object to be sorted
correctly with the default comparator.
"""
# non-GA releases should appear before GA releases
# Order: tp -> beta -> rc -> GA
if self.stage:
for st in STAGES:
if st in self.stage:
stage = (STAGES.index(st), self.stage)
break
else:
stage = (len(STAGES),)
return (int(self.major), int(self.minor), int(self.patch)) + stage
def __str__(self):
stage = f'-{self.stage}' if self.stage else ''
edition = f'-{self.edition}' if self.edition else ''
return '.'.join(map(str, self[:3])) + edition + stage
def main():
results = set()
for url in [base_url.format(cat) for cat in categories]:
res = requests.get(url)
content = res.text
versions = [Version.parse(v) for v in re.findall(
r'"docker-([0-9]+\.[0-9]+\.[0-9]+-?.*)\.tgz"', content
)]
sorted_versions = sorted(
versions, reverse=True, key=operator.attrgetter('order')
)
latest = sorted_versions[0]
results.add(str(latest))
print(' '.join(results))
if __name__ == '__main__':
main()
| Version |
python | django__django | tests/proxy_models/tests.py | {
"start": 863,
"end": 14438
} | class ____(TestCase):
def test_same_manager_queries(self):
"""
The MyPerson model should be generating the same database queries as
the Person model (when the same manager is used in each case).
"""
my_person_sql = (
MyPerson.other.all().query.get_compiler(DEFAULT_DB_ALIAS).as_sql()
)
person_sql = (
Person.objects.order_by("name")
.query.get_compiler(DEFAULT_DB_ALIAS)
.as_sql()
)
self.assertEqual(my_person_sql, person_sql)
def test_inheritance_new_table(self):
"""
The StatusPerson models should have its own table (it's using ORM-level
inheritance).
"""
sp_sql = (
StatusPerson.objects.all().query.get_compiler(DEFAULT_DB_ALIAS).as_sql()
)
p_sql = Person.objects.all().query.get_compiler(DEFAULT_DB_ALIAS).as_sql()
self.assertNotEqual(sp_sql, p_sql)
def test_basic_proxy(self):
"""
Creating a Person makes them accessible through the MyPerson proxy.
"""
person = Person.objects.create(name="Foo McBar")
self.assertEqual(len(Person.objects.all()), 1)
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(MyPerson.objects.get(name="Foo McBar").id, person.id)
self.assertFalse(MyPerson.objects.get(id=person.id).has_special_name())
def test_no_proxy(self):
"""
Person is not proxied by StatusPerson subclass.
"""
Person.objects.create(name="Foo McBar")
self.assertEqual(list(StatusPerson.objects.all()), [])
def test_basic_proxy_reverse(self):
"""
A new MyPerson also shows up as a standard Person.
"""
MyPerson.objects.create(name="Bazza del Frob")
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(len(Person.objects.all()), 1)
LowerStatusPerson.objects.create(status="low", name="homer")
lsps = [lsp.name for lsp in LowerStatusPerson.objects.all()]
self.assertEqual(lsps, ["homer"])
def test_correct_type_proxy_of_proxy(self):
"""
Correct type when querying a proxy of proxy
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
pp = sorted(mpp.name for mpp in MyPersonProxy.objects.all())
self.assertEqual(pp, ["Bazza del Frob", "Foo McBar", "homer"])
def test_proxy_included_in_ancestors(self):
"""
Proxy models are included in the ancestors for a model's DoesNotExist
and MultipleObjectsReturned
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
max_id = Person.objects.aggregate(max_id=models.Max("id"))["max_id"]
with self.assertRaises(Person.DoesNotExist):
MyPersonProxy.objects.get(name="Zathras")
with self.assertRaises(Person.MultipleObjectsReturned):
MyPersonProxy.objects.get(id__lt=max_id + 1)
with self.assertRaises(Person.DoesNotExist):
StatusPerson.objects.get(name="Zathras")
StatusPerson.objects.create(name="Bazza Jr.")
StatusPerson.objects.create(name="Foo Jr.")
max_id = Person.objects.aggregate(max_id=models.Max("id"))["max_id"]
with self.assertRaises(Person.MultipleObjectsReturned):
StatusPerson.objects.get(id__lt=max_id + 1)
def test_abstract_base_with_model_fields(self):
msg = (
"Abstract base class containing model fields not permitted for proxy model "
"'NoAbstract'."
)
with self.assertRaisesMessage(TypeError, msg):
class NoAbstract(Abstract):
class Meta:
proxy = True
def test_too_many_concrete_classes(self):
msg = (
"Proxy model 'TooManyBases' has more than one non-abstract model base "
"class."
)
with self.assertRaisesMessage(TypeError, msg):
class TooManyBases(User, Person):
class Meta:
proxy = True
def test_no_base_classes(self):
msg = "Proxy model 'NoBaseClasses' has no non-abstract model base class."
with self.assertRaisesMessage(TypeError, msg):
class NoBaseClasses(models.Model):
class Meta:
proxy = True
@isolate_apps("proxy_models")
def test_new_fields(self):
class NoNewFields(Person):
newfield = models.BooleanField()
class Meta:
proxy = True
errors = NoNewFields.check()
expected = [
checks.Error(
"Proxy model 'NoNewFields' contains model fields.",
id="models.E017",
)
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPABLE_MODEL="proxy_models.AlternateModel")
@isolate_apps("proxy_models")
def test_swappable(self):
class SwappableModel(models.Model):
class Meta:
swappable = "TEST_SWAPPABLE_MODEL"
class AlternateModel(models.Model):
pass
# You can't proxy a swapped model
with self.assertRaises(TypeError):
class ProxyModel(SwappableModel):
class Meta:
proxy = True
def test_myperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in MyPerson.objects.all()]
self.assertEqual(resp, ["barney", "fred"])
resp = [p.name for p in MyPerson._default_manager.all()]
self.assertEqual(resp, ["barney", "fred"])
def test_otherperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in OtherPerson.objects.all()]
self.assertEqual(resp, ["barney", "wilma"])
resp = [p.name for p in OtherPerson.excluder.all()]
self.assertEqual(resp, ["barney", "fred"])
resp = [p.name for p in OtherPerson._default_manager.all()]
self.assertEqual(resp, ["barney", "wilma"])
def test_permissions_created(self):
from django.contrib.auth.models import Permission
Permission.objects.get(name="May display users information")
def test_proxy_model_signals(self):
"""
Test save signals for proxy models
"""
output = []
def make_handler(model, event):
def _handler(*args, **kwargs):
output.append("%s %s save" % (model, event))
return _handler
h1 = make_handler("MyPerson", "pre")
h2 = make_handler("MyPerson", "post")
h3 = make_handler("Person", "pre")
h4 = make_handler("Person", "post")
signals.pre_save.connect(h1, sender=MyPerson)
signals.post_save.connect(h2, sender=MyPerson)
signals.pre_save.connect(h3, sender=Person)
signals.post_save.connect(h4, sender=Person)
MyPerson.objects.create(name="dino")
self.assertEqual(output, ["MyPerson pre save", "MyPerson post save"])
output = []
h5 = make_handler("MyPersonProxy", "pre")
h6 = make_handler("MyPersonProxy", "post")
signals.pre_save.connect(h5, sender=MyPersonProxy)
signals.post_save.connect(h6, sender=MyPersonProxy)
MyPersonProxy.objects.create(name="pebbles")
self.assertEqual(output, ["MyPersonProxy pre save", "MyPersonProxy post save"])
signals.pre_save.disconnect(h1, sender=MyPerson)
signals.post_save.disconnect(h2, sender=MyPerson)
signals.pre_save.disconnect(h3, sender=Person)
signals.post_save.disconnect(h4, sender=Person)
signals.pre_save.disconnect(h5, sender=MyPersonProxy)
signals.post_save.disconnect(h6, sender=MyPersonProxy)
def test_content_type(self):
ctype = ContentType.objects.get_for_model
self.assertIs(ctype(Person), ctype(OtherPerson))
def test_user_proxy_models(self):
User.objects.create(name="Bruce")
resp = [u.name for u in User.objects.all()]
self.assertEqual(resp, ["Bruce"])
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ["Bruce"])
resp = [u.name for u in UserProxyProxy.objects.all()]
self.assertEqual(resp, ["Bruce"])
self.assertEqual([u.name for u in MultiUserProxy.objects.all()], ["Bruce"])
def test_proxy_for_model(self):
self.assertEqual(UserProxy, UserProxyProxy._meta.proxy_for_model)
def test_concrete_model(self):
self.assertEqual(User, UserProxyProxy._meta.concrete_model)
def test_proxy_delete(self):
"""
Proxy objects can be deleted
"""
User.objects.create(name="Bruce")
u2 = UserProxy.objects.create(name="George")
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ["Bruce", "George"])
u2.delete()
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ["Bruce"])
def test_proxy_update(self):
user = User.objects.create(name="Bruce")
with self.assertNumQueries(1):
UserProxy.objects.filter(id=user.id).update(name="George")
user.refresh_from_db()
self.assertEqual(user.name, "George")
def test_select_related(self):
"""
We can still use `select_related()` to include related models in our
querysets.
"""
country = Country.objects.create(name="Australia")
State.objects.create(name="New South Wales", country=country)
resp = [s.name for s in State.objects.select_related()]
self.assertEqual(resp, ["New South Wales"])
resp = [s.name for s in StateProxy.objects.select_related()]
self.assertEqual(resp, ["New South Wales"])
self.assertEqual(
StateProxy.objects.get(name="New South Wales").name, "New South Wales"
)
resp = StateProxy.objects.select_related().get(name="New South Wales")
self.assertEqual(resp.name, "New South Wales")
def test_filter_proxy_relation_reverse(self):
tu = TrackerUser.objects.create(name="Contributor", status="contrib")
ptu = ProxyTrackerUser.objects.get()
issue = Issue.objects.create(assignee=tu)
self.assertEqual(tu.issues.get(), issue)
self.assertEqual(ptu.issues.get(), issue)
self.assertSequenceEqual(TrackerUser.objects.filter(issues=issue), [tu])
self.assertSequenceEqual(ProxyTrackerUser.objects.filter(issues=issue), [ptu])
def test_proxy_bug(self):
contributor = ProxyTrackerUser.objects.create(
name="Contributor", status="contrib"
)
someone = BaseUser.objects.create(name="Someone")
Bug.objects.create(
summary="fix this",
version="1.1beta",
assignee=contributor,
reporter=someone,
)
pcontributor = ProxyTrackerUser.objects.create(
name="OtherContributor", status="proxy"
)
Improvement.objects.create(
summary="improve that",
version="1.1beta",
assignee=contributor,
reporter=pcontributor,
associated_bug=ProxyProxyBug.objects.all()[0],
)
# Related field filter on proxy
resp = ProxyBug.objects.get(version__icontains="beta")
self.assertEqual(repr(resp), "<ProxyBug: ProxyBug:fix this>")
# Select related + filter on proxy
resp = ProxyBug.objects.select_related().get(version__icontains="beta")
self.assertEqual(repr(resp), "<ProxyBug: ProxyBug:fix this>")
# Proxy of proxy, select_related + filter
resp = ProxyProxyBug.objects.select_related().get(version__icontains="beta")
self.assertEqual(repr(resp), "<ProxyProxyBug: ProxyProxyBug:fix this>")
# Select related + filter on a related proxy field
resp = ProxyImprovement.objects.select_related().get(
reporter__name__icontains="butor"
)
self.assertEqual(
repr(resp), "<ProxyImprovement: ProxyImprovement:improve that>"
)
# Select related + filter on a related proxy of proxy field
resp = ProxyImprovement.objects.select_related().get(
associated_bug__summary__icontains="fix"
)
self.assertEqual(
repr(resp), "<ProxyImprovement: ProxyImprovement:improve that>"
)
def test_proxy_load_from_fixture(self):
management.call_command("loaddata", "mypeople.json", verbosity=0)
p = MyPerson.objects.get(pk=100)
self.assertEqual(p.name, "Elvis Presley")
def test_select_related_only(self):
user = ProxyTrackerUser.objects.create(name="Joe Doe", status="test")
issue = Issue.objects.create(summary="New issue", assignee=user)
qs = Issue.objects.select_related("assignee").only("assignee__status")
self.assertEqual(qs.get(), issue)
def test_eq(self):
self.assertEqual(MyPerson(id=100), Person(id=100))
@override_settings(ROOT_URLCONF="proxy_models.urls")
| ProxyModelTests |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/lexers/pygments.py | {
"start": 4070,
"end": 4517
} | class ____(Dict[Tuple[str, ...], str]):
"""
Cache that converts Pygments tokens into `prompt_toolkit` style objects.
``Token.A.B.C`` will be converted into:
``class:pygments,pygments.A,pygments.A.B,pygments.A.B.C``
"""
def __missing__(self, key: tuple[str, ...]) -> str:
result = "class:" + pygments_token_to_classname(key)
self[key] = result
return result
_token_cache = _TokenCache()
| _TokenCache |
python | scikit-learn__scikit-learn | sklearn/ensemble/_gb.py | {
"start": 13779,
"end": 43921
} | class ____(BaseEnsemble, metaclass=ABCMeta):
"""Abstract base class for Gradient Boosting."""
_parameter_constraints: dict = {
**DecisionTreeRegressor._parameter_constraints,
"learning_rate": [Interval(Real, 0.0, None, closed="left")],
"n_estimators": [Interval(Integral, 1, None, closed="left")],
"criterion": [StrOptions({"friedman_mse", "squared_error"})],
"subsample": [Interval(Real, 0.0, 1.0, closed="right")],
"verbose": ["verbose"],
"warm_start": ["boolean"],
"validation_fraction": [Interval(Real, 0.0, 1.0, closed="neither")],
"n_iter_no_change": [Interval(Integral, 1, None, closed="left"), None],
"tol": [Interval(Real, 0.0, None, closed="left")],
}
_parameter_constraints.pop("splitter")
_parameter_constraints.pop("monotonic_cst")
@abstractmethod
def __init__(
self,
*,
loss,
learning_rate,
n_estimators,
criterion,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_depth,
min_impurity_decrease,
init,
subsample,
max_features,
ccp_alpha,
random_state,
alpha=0.9,
verbose=0,
max_leaf_nodes=None,
warm_start=False,
validation_fraction=0.1,
n_iter_no_change=None,
tol=1e-4,
):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.criterion = criterion
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.min_impurity_decrease = min_impurity_decrease
self.ccp_alpha = ccp_alpha
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.validation_fraction = validation_fraction
self.n_iter_no_change = n_iter_no_change
self.tol = tol
@abstractmethod
def _encode_y(self, y=None, sample_weight=None):
"""Called by fit to validate and encode y."""
@abstractmethod
def _get_loss(self, sample_weight):
"""Get loss object from sklearn._loss.loss."""
def _fit_stage(
self,
i,
X,
y,
raw_predictions,
sample_weight,
sample_mask,
random_state,
X_csc=None,
X_csr=None,
):
"""Fit another stage of ``n_trees_per_iteration_`` trees."""
original_y = y
if isinstance(self._loss, HuberLoss):
set_huber_delta(
loss=self._loss,
y_true=y,
raw_prediction=raw_predictions,
sample_weight=sample_weight,
)
# TODO: Without oob, i.e. with self.subsample = 1.0, we could call
# self._loss.loss_gradient and use it to set train_score_.
# But note that train_score_[i] is the score AFTER fitting the i-th tree.
# Note: We need the negative gradient!
neg_gradient = -self._loss.gradient(
y_true=y,
raw_prediction=raw_predictions,
sample_weight=None, # We pass sample_weights to the tree directly.
)
# 2-d views of shape (n_samples, n_trees_per_iteration_) or (n_samples, 1)
# on neg_gradient to simplify the loop over n_trees_per_iteration_.
if neg_gradient.ndim == 1:
neg_g_view = neg_gradient.reshape((-1, 1))
else:
neg_g_view = neg_gradient
for k in range(self.n_trees_per_iteration_):
if self._loss.is_multiclass:
y = np.array(original_y == k, dtype=np.float64)
# induce regression tree on the negative gradient
tree = DecisionTreeRegressor(
criterion=self.criterion,
splitter="best",
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_decrease=self.min_impurity_decrease,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
ccp_alpha=self.ccp_alpha,
)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
X = X_csc if X_csc is not None else X
tree.fit(
X, neg_g_view[:, k], sample_weight=sample_weight, check_input=False
)
# update tree leaves
X_for_tree_update = X_csr if X_csr is not None else X
_update_terminal_regions(
self._loss,
tree.tree_,
X_for_tree_update,
y,
neg_g_view[:, k],
raw_predictions,
sample_weight,
sample_mask,
learning_rate=self.learning_rate,
k=k,
)
# add tree to ensemble
self.estimators_[i, k] = tree
return raw_predictions
def _set_max_features(self):
"""Set self.max_features_."""
if isinstance(self.max_features, str):
if self.max_features == "auto":
if is_classifier(self):
max_features = max(1, int(np.sqrt(self.n_features_in_)))
else:
max_features = self.n_features_in_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_in_)))
else: # self.max_features == "log2"
max_features = max(1, int(np.log2(self.n_features_in_)))
elif self.max_features is None:
max_features = self.n_features_in_
elif isinstance(self.max_features, Integral):
max_features = self.max_features
else: # float
max_features = max(1, int(self.max_features * self.n_features_in_))
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures."""
self.init_ = self.init
if self.init_ is None:
if is_classifier(self):
self.init_ = DummyClassifier(strategy="prior")
elif isinstance(self._loss, (AbsoluteError, HuberLoss)):
self.init_ = DummyRegressor(strategy="quantile", quantile=0.5)
elif isinstance(self._loss, PinballLoss):
self.init_ = DummyRegressor(strategy="quantile", quantile=self.alpha)
else:
self.init_ = DummyRegressor(strategy="mean")
self.estimators_ = np.empty(
(self.n_estimators, self.n_trees_per_iteration_), dtype=object
)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators), dtype=np.float64)
self.oob_scores_ = np.zeros((self.n_estimators), dtype=np.float64)
self.oob_score_ = np.nan
def _clear_state(self):
"""Clear the state of the gradient boosting model."""
if hasattr(self, "estimators_"):
self.estimators_ = np.empty((0, 0), dtype=object)
if hasattr(self, "train_score_"):
del self.train_score_
if hasattr(self, "oob_improvement_"):
del self.oob_improvement_
if hasattr(self, "oob_scores_"):
del self.oob_scores_
if hasattr(self, "oob_score_"):
del self.oob_score_
if hasattr(self, "init_"):
del self.init_
if hasattr(self, "_rng"):
del self._rng
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes."""
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError(
"resize with smaller n_estimators %d < %d"
% (total_n_estimators, self.estimators_[0])
)
self.estimators_ = np.resize(
self.estimators_, (total_n_estimators, self.n_trees_per_iteration_)
)
self.train_score_ = np.resize(self.train_score_, total_n_estimators)
if self.subsample < 1 or hasattr(self, "oob_improvement_"):
# if do oob resize arrays or create new if not available
if hasattr(self, "oob_improvement_"):
self.oob_improvement_ = np.resize(
self.oob_improvement_, total_n_estimators
)
self.oob_scores_ = np.resize(self.oob_scores_, total_n_estimators)
self.oob_score_ = np.nan
else:
self.oob_improvement_ = np.zeros(
(total_n_estimators,), dtype=np.float64
)
self.oob_scores_ = np.zeros((total_n_estimators,), dtype=np.float64)
self.oob_score_ = np.nan
def _is_fitted(self):
return len(getattr(self, "estimators_", [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
check_is_fitted(self)
@_fit_context(
# GradientBoosting*.init is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
y : array-like of shape (n_samples,)
Target values (strings or integers in classification, real numbers
in regression)
For classification, labels must correspond to classes.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, default=None
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshotting.
Returns
-------
self : object
Fitted estimator.
"""
if not self.warm_start:
self._clear_state()
# Check input
# Since check_array converts both X and y to the same dtype, but the
# trees use different types for X and y, checking them separately.
X, y = validate_data(
self,
X,
y,
accept_sparse=["csr", "csc", "coo"],
dtype=DTYPE,
multi_output=True,
)
sample_weight_is_none = sample_weight is None
sample_weight = _check_sample_weight(sample_weight, X)
if sample_weight_is_none:
y = self._encode_y(y=y, sample_weight=None)
else:
y = self._encode_y(y=y, sample_weight=sample_weight)
y = column_or_1d(y, warn=True) # TODO: Is this still required?
self._set_max_features()
# self.loss is guaranteed to be a string
self._loss = self._get_loss(sample_weight=sample_weight)
if self.n_iter_no_change is not None:
stratify = y if is_classifier(self) else None
(
X_train,
X_val,
y_train,
y_val,
sample_weight_train,
sample_weight_val,
) = train_test_split(
X,
y,
sample_weight,
random_state=self.random_state,
test_size=self.validation_fraction,
stratify=stratify,
)
if is_classifier(self):
if self.n_classes_ != np.unique(y_train).shape[0]:
# We choose to error here. The problem is that the init
# estimator would be trained on y, which has some missing
# classes now, so its predictions would not have the
# correct shape.
raise ValueError(
"The training data after the early stopping split "
"is missing some classes. Try using another random "
"seed."
)
else:
X_train, y_train, sample_weight_train = X, y, sample_weight
X_val = y_val = sample_weight_val = None
n_samples = X_train.shape[0]
# First time calling fit.
if not self._is_fitted():
# init state
self._init_state()
# fit initial model and initialize raw predictions
if self.init_ == "zero":
raw_predictions = np.zeros(
shape=(n_samples, self.n_trees_per_iteration_),
dtype=np.float64,
)
else:
# XXX clean this once we have a support_sample_weight tag
if sample_weight_is_none:
self.init_.fit(X_train, y_train)
else:
msg = (
"The initial estimator {} does not support sample "
"weights.".format(self.init_.__class__.__name__)
)
try:
self.init_.fit(
X_train, y_train, sample_weight=sample_weight_train
)
except TypeError as e:
if "unexpected keyword argument 'sample_weight'" in str(e):
# regular estimator without SW support
raise ValueError(msg) from e
else: # regular estimator whose input checking failed
raise
except ValueError as e:
if (
"pass parameters to specific steps of "
"your pipeline using the "
"stepname__parameter" in str(e)
): # pipeline
raise ValueError(msg) from e
else: # regular estimator whose input checking failed
raise
raw_predictions = _init_raw_predictions(
X_train, self.init_, self._loss, is_classifier(self)
)
begin_at_stage = 0
# The rng state must be preserved if warm_start is True
self._rng = check_random_state(self.random_state)
# warm start: this is not the first time fit was called
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError(
"n_estimators=%d must be larger or equal to "
"estimators_.shape[0]=%d when "
"warm_start==True" % (self.n_estimators, self.estimators_.shape[0])
)
begin_at_stage = self.estimators_.shape[0]
# The requirements of _raw_predict
# are more constrained than fit. It accepts only CSR
# matrices. Finite values have already been checked in _validate_data.
X_train = check_array(
X_train,
dtype=DTYPE,
order="C",
accept_sparse="csr",
ensure_all_finite=False,
)
raw_predictions = self._raw_predict(X_train)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(
X_train,
y_train,
raw_predictions,
sample_weight_train,
self._rng,
X_val,
y_val,
sample_weight_val,
begin_at_stage,
monitor,
)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, "oob_improvement_"):
# OOB scores were computed
self.oob_improvement_ = self.oob_improvement_[:n_stages]
self.oob_scores_ = self.oob_scores_[:n_stages]
self.oob_score_ = self.oob_scores_[-1]
self.n_estimators_ = n_stages
return self
def _fit_stages(
self,
X,
y,
raw_predictions,
sample_weight,
random_state,
X_val,
y_val,
sample_weight_val,
begin_at_stage=0,
monitor=None,
):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples,), dtype=bool)
n_inbag = max(1, int(self.subsample * n_samples))
if self.verbose:
verbose_reporter = VerboseReporter(verbose=self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
if self.n_iter_no_change is not None:
loss_history = np.full(self.n_iter_no_change, np.inf)
# We create a generator to get the predictions for X_val after
# the addition of each successive stage
y_val_pred_iter = self._staged_raw_predict(X_val, check_input=False)
# Older versions of GBT had its own loss functions. With the new common
# private loss function submodule _loss, we often are a factor of 2
# away from the old version. Here we keep backward compatibility for
# oob_scores_ and oob_improvement_, even if the old way is quite
# inconsistent (sometimes the gradient is half the gradient, sometimes
# not).
if isinstance(
self._loss,
(
HalfSquaredError,
HalfBinomialLoss,
),
):
factor = 2
else:
factor = 1
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag, random_state)
y_oob_masked = y[~sample_mask]
sample_weight_oob_masked = sample_weight[~sample_mask]
if i == 0: # store the initial loss to compute the OOB score
initial_loss = factor * self._loss(
y_true=y_oob_masked,
raw_prediction=raw_predictions[~sample_mask],
sample_weight=sample_weight_oob_masked,
)
# fit next stage of trees
raw_predictions = self._fit_stage(
i,
X,
y,
raw_predictions,
sample_weight,
sample_mask,
random_state,
X_csc=X_csc,
X_csr=X_csr,
)
# track loss
if do_oob:
self.train_score_[i] = factor * self._loss(
y_true=y[sample_mask],
raw_prediction=raw_predictions[sample_mask],
sample_weight=sample_weight[sample_mask],
)
self.oob_scores_[i] = factor * self._loss(
y_true=y_oob_masked,
raw_prediction=raw_predictions[~sample_mask],
sample_weight=sample_weight_oob_masked,
)
previous_loss = initial_loss if i == 0 else self.oob_scores_[i - 1]
self.oob_improvement_[i] = previous_loss - self.oob_scores_[i]
self.oob_score_ = self.oob_scores_[-1]
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = factor * self._loss(
y_true=y,
raw_prediction=raw_predictions,
sample_weight=sample_weight,
)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
# We also provide an early stopping based on the score from
# validation set (X_val, y_val), if n_iter_no_change is set
if self.n_iter_no_change is not None:
# By calling next(y_val_pred_iter), we get the predictions
# for X_val after the addition of the current stage
validation_loss = factor * self._loss(
y_val, next(y_val_pred_iter), sample_weight_val
)
# Require validation_score to be better (less) than at least
# one of the last n_iter_no_change evaluations
if np.any(validation_loss + self.tol < loss_history):
loss_history[i % len(loss_history)] = validation_loss
else:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _raw_predict_init(self, X):
"""Check input and compute raw predictions of the init estimator."""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
if self.init_ == "zero":
raw_predictions = np.zeros(
shape=(X.shape[0], self.n_trees_per_iteration_), dtype=np.float64
)
else:
raw_predictions = _init_raw_predictions(
X, self.init_, self._loss, is_classifier(self)
)
return raw_predictions
def _raw_predict(self, X):
"""Return the sum of the trees raw predictions (+ init estimator)."""
check_is_fitted(self)
raw_predictions = self._raw_predict_init(X)
predict_stages(self.estimators_, X, self.learning_rate, raw_predictions)
return raw_predictions
def _staged_raw_predict(self, X, check_input=True):
"""Compute raw predictions of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
If False, the input arrays X will not be checked.
Returns
-------
raw_predictions : generator of ndarray of shape (n_samples, k)
The raw predictions of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
if check_input:
X = validate_data(
self, X, dtype=DTYPE, order="C", accept_sparse="csr", reset=False
)
raw_predictions = self._raw_predict_init(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, raw_predictions)
yield raw_predictions.copy()
@property
def feature_importances_(self):
"""The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
Returns
-------
feature_importances_ : ndarray of shape (n_features,)
The values of this array sum to 1, unless all trees are single node
trees consisting of only the root node, in which case it will be an
array of zeros.
"""
self._check_initialized()
relevant_trees = [
tree
for stage in self.estimators_
for tree in stage
if tree.tree_.node_count > 1
]
if not relevant_trees:
# degenerate case where all trees have only one node
return np.zeros(shape=self.n_features_in_, dtype=np.float64)
relevant_feature_importances = [
tree.tree_.compute_feature_importances(normalize=False)
for tree in relevant_trees
]
avg_feature_importances = np.mean(
relevant_feature_importances, axis=0, dtype=np.float64
)
return avg_feature_importances / np.sum(avg_feature_importances)
def _compute_partial_dependence_recursion(self, grid, target_features):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray of shape (n_samples, n_target_features), dtype=np.float32
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray of shape (n_target_features,), dtype=np.intp
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray of shape \
(n_trees_per_iteration_, n_samples)
The value of the partial dependence function on each grid point.
"""
if self.init is not None:
warnings.warn(
"Using recursion method with a non-constant init predictor "
"will lead to incorrect partial dependence values. "
"Got init=%s." % self.init,
UserWarning,
)
grid = np.asarray(grid, dtype=DTYPE, order="C")
n_estimators, n_trees_per_stage = self.estimators_.shape
averaged_predictions = np.zeros(
(n_trees_per_stage, grid.shape[0]), dtype=np.float64, order="C"
)
target_features = np.asarray(target_features, dtype=np.intp, order="C")
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = self.estimators_[stage, k].tree_
tree.compute_partial_dependence(
grid, target_features, averaged_predictions[k]
)
averaged_predictions *= self.learning_rate
return averaged_predictions
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will
be converted to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array-like of shape (n_samples, n_estimators, n_classes)
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
| BaseGradientBoosting |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 15696,
"end": 16009
} | class ____(BaseModel):
"""
Serializer for Plugin FastAPI App responses.
"""
model_config = ConfigDict(
extra="allow",
)
app: Annotated[str, Field(title="App")]
url_prefix: Annotated[str, Field(title="Url Prefix")]
name: Annotated[str, Field(title="Name")]
| FastAPIAppResponse |
python | google__jax | jax/_src/interpreters/partial_eval.py | {
"start": 79398,
"end": 85391
} | class ____:
gensym: Callable[[AbstractValue], Var]
constid_to_tracer: WeakValueDictionary[ConstId, DynamicJaxprTracer]
constvar_to_val: dict[Var, Constants]
tracing_eqns: list[Union[ReferenceType[TracingEqn], Callable[[], TracingEqn]]]
invars: list[Var]
effects: core.Effects
debug_info: core.DebugInfo
is_high: bool
mutable_qdds: list[tuple[Var, core.MutableQuasiDynamicData]]
auto_dce: bool
def __init__(self, debug_info: core.DebugInfo, auto_dce: bool):
self.gensym = core.gensym()
self.constid_to_tracer = WeakValueDictionary()
self.constvar_to_val = {}
self.tracing_eqns = [] # cleared when we pop frame from main
self.invars = []
self.effects = set()
self.debug_info = debug_info
self.is_high = False
self.mutable_qdds = []
self.auto_dce = auto_dce
def add_eqn(self, eqn: core.TracingEqn):
assert isinstance(eqn, TracingEqn)
r = (lambda: eqn) if (eqn.effects or not self.auto_dce) else ref(eqn)
self.tracing_eqns.append(r)
def get_eqns(self):
eqns = []
for tracing_eqn in self.tracing_eqns:
e = tracing_eqn()
if e is None: continue
eqns.append(JaxprEqn(
[t.val for t in e.in_tracers],
e.outvars, e.primitive, e.params, e.effects, e.source_info, e.ctx))
return eqns
def to_jaxpr(
self, trace: DynamicJaxprTrace,
out_tracers: Sequence[Tracer],
debug_info: core.DebugInfo,
source_info: SourceInfo,
) -> tuple[Jaxpr, list[Any]]:
eqns = self.get_eqns()
outvars = [t.val for t in out_tracers]
constvars, constvals = unzip2(self.constvar_to_val.copy().items())
constvals = [c.canonical for c in constvals]
constvars, constvals = _drop_unused_vars(constvars, constvals, eqns, outvars)
effs = make_jaxpr_effects(constvars, self.invars, outvars, eqns)
# TODO(dougalm): handle qdd for consts
for v, qdd in self.mutable_qdds:
v.final_qdd = qdd.cur_val
all_vars = it.chain(constvars, self.invars, outvars)
is_high = self.is_high or any(v.aval.is_high for v in all_vars)
jaxpr = Jaxpr(constvars, self.invars, outvars, eqns, effs, debug_info, is_high)
return jaxpr, list(constvals)
def to_jaxpr2(self, out_tracers: Sequence[core.Tracer],
debug_info: core.DebugInfo):
eqns = self.get_eqns()
outvars = [t.val for t in out_tracers]
constvars, constvals = unzip2(self.constvar_to_val.copy().items())
constvals = [c.canonical for c in constvals]
constvars, constvals = _drop_unused_vars(constvars, constvals, eqns, outvars)
effs = make_jaxpr_effects(constvars, self.invars, outvars, eqns)
jaxpr = Jaxpr(constvars, self.invars, outvars, eqns, effs, debug_info)
jaxpr, out_type = _add_implicit_outputs(jaxpr)
config.enable_checks.value and core.check_jaxpr(jaxpr)
return jaxpr, out_type, constvals
def newvar(self, aval):
if isinstance(aval, DShapedArray):
# this aval may have tracers in it, so we replace those with variables
new_shape = [d.val if isinstance(d, Tracer) else d for d in aval.shape]
new_shape = [d.val if isinstance(d, Literal) else d for d in new_shape]
aval = aval.update(shape=tuple(new_shape))
if isinstance(aval, core.AvalQDD):
return self.gensym(aval.aval, initial_qdd=aval.qdd)
else:
return self.gensym(aval)
def find_progenitors(self, tracer):
eqns = self.get_eqns()
var = tracer.val
if not var or isinstance(var, Literal):
return None, None
active_vars = {var}
for eqn in eqns[::-1]:
produced = set(eqn.outvars) & active_vars
if produced:
active_vars.difference_update(produced)
active_vars.update({v for v in eqn.invars if type(v) is Var})
invar_positions = [i for i, v in enumerate(self.invars) if v in active_vars]
constvars = active_vars & set(self.constvar_to_val.copy())
const_eqns = [eqn for eqn in eqns if any(
v in constvars if type(v) is Var else type(v) is Literal
for v in eqn.invars)]
return invar_positions, const_eqns
ConstFoldRule = Callable[
[list[Union[Any, None]], Any, list[AbstractValue]],
tuple[list[Union[Any, None]], Union[JaxprEqn, None]],
]
const_fold_rules: dict[Primitive, ConstFoldRule] = {}
ForwardingRule = Callable[
[JaxprEqn],
tuple[list[Union[int, None]], Union[JaxprEqn, None]]
]
forwarding_rules: dict[Primitive, ForwardingRule] = {}
def _drop_unused_vars(constvars, constvals, eqns, outvars
) -> tuple[list[Var], list[Any]]:
# modifies eqns in-place!
def vars(atom: Atom) -> list[Var]:
if isinstance(atom, Literal):
return []
aval = atom.aval
if isinstance(aval, DShapedArray):
return [atom] + [d for d in aval.shape if isinstance(d, Var)]
return [atom]
used: set[Var] = {v for atom in outvars for v in vars(atom)}
for eqn in eqns[::-1]:
eqn.outvars = [v if v in used else DropVar(v.aval) for v in eqn.outvars]
used.update(v for atom in eqn.invars for v in vars(atom))
constvars, constvals = unzip2(
(v, val) for v, val in zip(constvars, constvals) if v in used)
return constvars, constvals
@multi_weakref_lru_cache
def _cached_abstract_eval(primitive: core.Primitive, *aval_qdds, **params):
return primitive.abstract_eval(*aval_qdds, **params)
def _verify_params_are_hashable(
primitive: core.Primitive, params: dict[str, Any]) -> None:
for k, v in params.items():
try:
hash(v)
except TypeError as e:
raise TypeError(
"As of JAX v0.7, parameters to jaxpr equations must have __hash__ and "
f"__eq__ methods. In a call to primitive {primitive}, the value of "
f"parameter {k} was not hashable: {v}") from e
# We use TracingEqn instead JaxprEqn during tracing to allow automatic
# on-the-fly DCE based on Python refcounting. DynamicJaxprTracers point to
# TracingEqns which point to DynamicJaxprTracers and unreachable constants can
# be freed.
@dataclass
| JaxprStackFrame |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_spans_performance.py | {
"start": 7778,
"end": 8689
} | class ____(serializers.Serializer[Never]):
query = serializers.CharField(required=False, allow_null=True)
span = serializers.CharField(required=True, allow_null=False)
min_exclusive_time = serializers.FloatField(required=False)
max_exclusive_time = serializers.FloatField(required=False)
def validate(self, data: dict[str, Any]) -> dict[str, Any]:
if (
"min_exclusive_time" in data
and "max_exclusive_time" in data
and data["min_exclusive_time"] > data["max_exclusive_time"]
):
raise serializers.ValidationError(
"min_exclusive_time cannot be greater than max_exclusive_time."
)
return data
def validate_span(self, span: str) -> Span:
try:
return Span.from_str(span)
except ValueError as e:
raise serializers.ValidationError(str(e))
| SpanSerializer |
python | modin-project__modin | asv_bench/benchmarks/benchmarks.py | {
"start": 15972,
"end": 16479
} | class ____:
param_names = ["shape", "tail_count"]
params = [
get_benchmark_shapes("TimeTail"),
[5, 0.8],
]
def setup(self, shape, tail_count):
self.df = generate_dataframe("int", *shape, RAND_LOW, RAND_HIGH)
self.tail_count = (
int(tail_count * len(self.df.index))
if isinstance(tail_count, float)
else tail_count
)
def time_tail(self, shape, tail_count):
execute(self.df.tail(self.tail_count))
| TimeTail |
python | google__pytype | pytype/rewrite/abstract/internal.py | {
"start": 745,
"end": 1632
} | class ____(base.BaseValue):
"""Representation of a function kwarg dict."""
def __init__(
self,
ctx: base.ContextType,
constant: dict[str, _Var] | None = None,
indefinite: bool = False,
):
super().__init__(ctx)
constant = constant or {}
self._check_keys(constant)
self.constant = constant
self.indefinite = indefinite
def _check_keys(self, constant: dict[str, _Var]):
"""Runtime check to ensure the invariant."""
assert isinstance(constant, dict), constant
if not all(isinstance(k, str) for k in constant):
raise ValueError("Passing a non-string key to a function arg dict")
def __repr__(self):
indef = "+" if self.indefinite else ""
return f"FunctionArgDict({indef}{self.constant!r})"
@property
def _attrs(self):
return (immutabledict.immutabledict(self.constant), self.indefinite)
| FunctionArgDict |
python | django__django | tests/forms_tests/tests/test_media.py | {
"start": 32502,
"end": 36502
} | class ____(SimpleTestCase):
"""Media handling when media are objects instead of raw strings."""
def test_construction(self):
m = Media(
css={
"all": (
CSS("path/to/css1", media="all"),
CSS("/path/to/css2", media="all"),
)
},
js=(
Script("/path/to/js1"),
Script("http://media.other.com/path/to/js2"),
Script(
"https://secure.other.com/path/to/js3",
integrity="9d947b87fdeb25030d56d01f7aa75800",
),
),
)
self.assertHTMLEqual(
str(m),
'<link href="http://media.example.com/static/path/to/css1" media="all" '
'rel="stylesheet">\n'
'<link href="/path/to/css2" media="all" rel="stylesheet">\n'
'<script src="/path/to/js1"></script>\n'
'<script src="http://media.other.com/path/to/js2"></script>\n'
'<script src="https://secure.other.com/path/to/js3" '
'integrity="9d947b87fdeb25030d56d01f7aa75800"></script>',
)
self.assertEqual(
repr(m),
"Media(css={'all': [CSS('path/to/css1'), CSS('/path/to/css2')]}, "
"js=[Script('/path/to/js1'), Script('http://media.other.com/path/to/js2'), "
"Script('https://secure.other.com/path/to/js3')])",
)
def test_simplest_class(self):
@html_safe
class SimpleJS:
"""The simplest possible asset class."""
def __str__(self):
return '<script src="https://example.org/asset.js" rel="stylesheet">'
m = Media(js=(SimpleJS(),))
self.assertEqual(
str(m),
'<script src="https://example.org/asset.js" rel="stylesheet">',
)
def test_combine_media(self):
class MyWidget1(TextInput):
class Media:
css = {"all": (CSS("path/to/css1", media="all"), "/path/to/css2")}
js = (
"/path/to/js1",
"http://media.other.com/path/to/js2",
"https://secure.other.com/path/to/js3",
Script(
"/path/to/js4", integrity="9d947b87fdeb25030d56d01f7aa75800"
),
)
class MyWidget2(TextInput):
class Media:
css = {"all": (CSS("/path/to/css2", media="all"), "/path/to/css3")}
js = (Script("/path/to/js1"), "/path/to/js4")
w1 = MyWidget1()
w2 = MyWidget2()
self.assertHTMLEqual(
str(w1.media + w2.media),
'<link href="http://media.example.com/static/path/to/css1" media="all" '
'rel="stylesheet">\n'
'<link href="/path/to/css2" media="all" rel="stylesheet">\n'
'<link href="/path/to/css3" media="all" rel="stylesheet">\n'
'<script src="/path/to/js1"></script>\n'
'<script src="http://media.other.com/path/to/js2"></script>\n'
'<script src="https://secure.other.com/path/to/js3"></script>\n'
'<script src="/path/to/js4" integrity="9d947b87fdeb25030d56d01f7aa75800">'
"</script>",
)
def test_media_deduplication(self):
# The deduplication doesn't only happen at the point of merging two or
# more media objects.
media = Media(
css={
"all": (
CSS("/path/to/css1", media="all"),
CSS("/path/to/css1", media="all"),
"/path/to/css1",
)
},
js=(Script("/path/to/js1"), Script("/path/to/js1"), "/path/to/js1"),
)
self.assertHTMLEqual(
str(media),
'<link href="/path/to/css1" media="all" rel="stylesheet">\n'
'<script src="/path/to/js1"></script>',
)
| FormsMediaObjectTestCase |
python | huggingface__transformers | tests/models/deepseek_vl_hybrid/test_image_processing_deepseek_vl_hybrid.py | {
"start": 3740,
"end": 12878
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = DeepseekVLHybridImageProcessor if is_vision_available() else None
fast_image_processing_class = DeepseekVLHybridImageProcessorFast if is_torchvision_available() else None
# Copied from tests.models.vit.test_image_processing_vit.ViTImageProcessingTester.setUp with ViT->DeepseekVLHybrid
def setUp(self):
super().setUp()
self.image_processor_tester = DeepseekVLHybridImageProcessingTester(self)
@property
# Copied from tests.models.vit.test_image_processing_vit.ViTImageProcessingTester.image_processor_dict with ViT->DeepseekVLHybrid
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
# Copied from tests.models.vit.test_image_processing_vit.ViTImageProcessingTester.test_image_processor_from_dict_with_kwargs
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 18, "width": 18})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "high_res_image_mean"))
self.assertTrue(hasattr(image_processing, "high_res_image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "high_res_size"))
def test_call_pil_high_res(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").high_res_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
[image_inputs[0]]
)
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").high_res_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
image_inputs
)
self.assertEqual(
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
)
def test_call_numpy_high_res(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").high_res_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
[image_inputs[0]]
)
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").high_res_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
image_inputs
)
self.assertEqual(
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
)
def test_call_pytorch_high_res(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").high_res_pixel_values
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
[image_inputs[0]]
)
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
expected_output_image_shape = self.image_processor_tester.expected_output_high_res_image_shape(
image_inputs
)
encoded_images = image_processing(image_inputs, return_tensors="pt").high_res_pixel_values
self.assertEqual(
tuple(encoded_images.shape),
(self.image_processor_tester.batch_size, *expected_output_image_shape),
)
@require_vision
@require_torch
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
self._assert_slow_fast_tensors_equivalence(
encoding_slow.high_res_pixel_values, encoding_fast.high_res_pixel_values
)
@require_vision
@require_torch
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, return_tensors=None)
encoding_fast = image_processor_fast(dummy_images, return_tensors=None)
# Overwrite as the outputs are not always all of the same shape (kept for BC)
for i in range(len(encoding_slow.pixel_values)):
self._assert_slow_fast_tensors_equivalence(
torch.from_numpy(encoding_slow.pixel_values[i]), encoding_fast.pixel_values[i]
)
for i in range(len(encoding_slow.high_res_pixel_values)):
self._assert_slow_fast_tensors_equivalence(
torch.from_numpy(encoding_slow.high_res_pixel_values[i]), encoding_fast.high_res_pixel_values[i]
)
@unittest.skip(reason="Not supported")
def test_call_numpy_4_channels(self):
pass
| DeepseekVLHybridImageProcessingTest |
python | pola-rs__polars | py-polars/src/polars/datatypes/classes.py | {
"start": 36327,
"end": 38765
} | class ____(NestedType):
"""
Struct composite type.
Parameters
----------
fields
The fields that make up the struct. Can be either a sequence of Field
objects or a mapping of column names to data types.
Examples
--------
Initialize using a dictionary:
>>> dtype = pl.Struct({"a": pl.Int8, "b": pl.List(pl.String)})
>>> dtype
Struct({'a': Int8, 'b': List(String)})
Initialize using a list of Field objects:
>>> dtype = pl.Struct([pl.Field("a", pl.Int8), pl.Field("b", pl.List(pl.String))])
>>> dtype
Struct({'a': Int8, 'b': List(String)})
When initializing a Series, Polars can infer a struct data type from the data.
>>> s = pl.Series([{"a": 1, "b": ["x", "y"]}, {"a": 2, "b": ["z"]}])
>>> s
shape: (2,)
Series: '' [struct[2]]
[
{1,["x", "y"]}
{2,["z"]}
]
>>> s.dtype
Struct({'a': Int64, 'b': List(String)})
"""
fields: list[Field]
def __init__(self, fields: Sequence[Field] | SchemaDict) -> None:
if isinstance(fields, Mapping):
self.fields = [Field(name, dtype) for name, dtype in fields.items()]
else:
self.fields = list(fields)
def __eq__(self, other: PolarsDataType) -> bool: # type: ignore[override]
# The comparison allows comparing objects to classes, and specific
# inner types to those without (eg: inner=None). if one of the
# arguments is not specific about its inner type we infer it
# as being equal. (See the List type for more info).
if isclass(other) and issubclass(other, Struct):
return True
elif isinstance(other, Struct):
return self.fields == other.fields
else:
return False
def __hash__(self) -> int:
return hash((self.__class__, tuple(self.fields)))
def __iter__(self) -> Iterator[tuple[str, PolarsDataType]]:
for fld in self.fields:
yield fld.name, fld.dtype
def __reversed__(self) -> Iterator[tuple[str, PolarsDataType]]:
for fld in reversed(self.fields):
yield fld.name, fld.dtype
def __repr__(self) -> str:
class_name = self.__class__.__name__
return f"{class_name}({dict(self)})"
def to_schema(self) -> OrderedDict[str, PolarsDataType]:
"""Return Struct dtype as a schema dict."""
return OrderedDict(self)
| Struct |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/comprehension7.py | {
"start": 216,
"end": 391
} | class ____:
var1 = [1, 2]
var2 = {x for x in var1}
# This should generate an error.
var3 = {var1[0] for x in var1}
var4 = {outer_var[0] for x in outer_var}
| A |
python | readthedocs__readthedocs.org | readthedocs/organizations/migrations/0015_remove_unused_indexes.py | {
"start": 150,
"end": 1201
} | class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("organizations", "0014_update_dj_simple_history"),
]
operations = [
migrations.AlterField(
model_name="historicalorganization",
name="extra_history_user_id",
field=models.IntegerField(blank=True, null=True, verbose_name="ID"),
),
migrations.AlterField(
model_name="historicalorganization",
name="extra_history_user_username",
field=models.CharField(max_length=150, null=True, verbose_name="username"),
),
migrations.AlterField(
model_name="historicalteam",
name="extra_history_user_id",
field=models.IntegerField(blank=True, null=True, verbose_name="ID"),
),
migrations.AlterField(
model_name="historicalteam",
name="extra_history_user_username",
field=models.CharField(max_length=150, null=True, verbose_name="username"),
),
]
| Migration |
python | gevent__gevent | src/gevent/tests/test__queue.py | {
"start": 15219,
"end": 15330
} | class ____(SubscriptMixin, TestCase):
def _getFUT(self):
return queue.PriorityQueue
| TestPriorityQueue |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/exc.py | {
"start": 1860,
"end": 3325
} | class ____(HasDescriptionCode, Exception):
"""Generic error class."""
def _message(self) -> str:
# rules:
#
# 1. single arg string will usually be a unicode
# object, but since __str__() must return unicode, check for
# bytestring just in case
#
# 2. for multiple self.args, this is not a case in current
# SQLAlchemy though this is happening in at least one known external
# library, call str() which does a repr().
#
text: str
if len(self.args) == 1:
arg_text = self.args[0]
if isinstance(arg_text, bytes):
text = compat.decode_backslashreplace(arg_text, "utf-8")
# This is for when the argument is not a string of any sort.
# Otherwise, converting this exception to string would fail for
# non-string arguments.
else:
text = str(arg_text)
return text
else:
# this is not a normal case within SQLAlchemy but is here for
# compatibility with Exception.args - the str() comes out as
# a repr() of the tuple
return str(self.args)
def _sql_message(self) -> str:
message = self._message()
if self.code:
message = "%s %s" % (message, self._code_str())
return message
def __str__(self) -> str:
return self._sql_message()
| SQLAlchemyError |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/comments.py | {
"start": 8560,
"end": 8870
} | class ____:
"""store tag information for roundtripping"""
__slots__ = ('value',)
attrib = tag_attrib
def __init__(self):
# type: () -> None
self.value = None
def __repr__(self):
# type: () -> Any
return '{0.__class__.__name__}({0.value!r})'.format(self)
| Tag |
python | encode__django-rest-framework | tests/test_utils.py | {
"start": 7922,
"end": 8515
} | class ____(TestCase):
def test_it_formats_correctly(self):
formatted = lazy_format('Does {} work? {answer}: %s', 'it', answer='Yes')
assert str(formatted) == 'Does it work? Yes: %s'
assert formatted % 'it does' == 'Does it work? Yes: it does'
def test_it_formats_lazily(self):
message = mock.Mock(wraps='message')
formatted = lazy_format(message)
assert message.format.call_count == 0
str(formatted)
assert message.format.call_count == 1
str(formatted)
assert message.format.call_count == 1
| LazyFormatTests |
python | google__pytype | pytype/rewrite/tests/test_basic.py | {
"start": 2049,
"end": 2709
} | class ____(RewriteTest):
"""Operator tests."""
def test_type_subscript(self):
self.Check("""
IntList = list[int]
def f(xs: IntList) -> list[str]:
return ["hello world"]
a = f([1, 2, 3])
assert_type(a, list)
""")
def test_binop(self):
self.Check("""
x = 1
y = 2
z = x + y
""")
def test_inplace_binop(self):
self.Check("""
class A:
def __iadd__(self, other):
return self
x = A()
y = A()
x += y
assert_type(x, A)
""")
def test_inplace_fallback(self):
self.Check("""
x = 1
y = 2
x -= y
""")
| OperatorsTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.