language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | tests/template_tests/filter_tests/test_title.py | {
"start": 117,
"end": 573
} | class ____(SimpleTestCase):
@setup({"title1": "{{ a|title }}"})
def test_title1(self):
output = self.engine.render_to_string("title1", {"a": "JOE'S CRAB SHACK"})
self.assertEqual(output, "Joe's Crab Shack")
@setup({"title2": "{{ a|title }}"})
def test_title2(self):
output = self.engine.render_to_string("title2", {"a": "555 WEST 53RD STREET"})
self.assertEqual(output, "555 West 53rd Street")
| TitleTests |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 69017,
"end": 72948
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: Qwen2_5OmniTextConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
if config.use_sliding_window and config._attn_implementation != "flash_attention_2":
logger.warning_once(
f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
"unexpected results may be encountered."
)
self.self_attn = Qwen2_5OmniAttention(config, layer_idx)
self.mlp = Qwen2MLP(config)
self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.attention_type = config.layer_types[layer_idx]
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_values (`Cache`, *optional*): cached past key and value projection states
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
kwargs (`dict`, *optional*):
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
into the model
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
@auto_docstring
| Qwen2_5OmniDecoderLayer |
python | weaviate__weaviate-python-client | weaviate/cluster/async_.py | {
"start": 218,
"end": 420
} | class ____(_ClusterExecutor[ConnectionAsync]):
def __init__(self, connection: ConnectionAsync):
super().__init__(connection)
self.replications = _ReplicateAsync(connection)
| _ClusterAsync |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/test_cards.py | {
"start": 1019,
"end": 1349
} | class ____(MetaflowCard):
type = "test_editable_card_2"
separator = "$&#!!@*"
ALLOW_USER_COMPONENTS = True
def __init__(self, components=[], **kwargs):
self._components = components
def render(self, task):
return self.separator.join([str(comp) for comp in self._components])
| TestEditableCard2 |
python | aimacode__aima-python | planning.py | {
"start": 21509,
"end": 23120
} | class ____(search.Problem):
"""
[Section 10.2.1]
Forward state-space search
"""
def __init__(self, planning_problem):
super().__init__(associate('&', planning_problem.initial), associate('&', planning_problem.goals))
self.planning_problem = planning_problem
self.expanded_actions = self.planning_problem.expand_actions()
def actions(self, state):
return [action for action in self.expanded_actions if all(pre in conjuncts(state) for pre in action.precond)]
def result(self, state, action):
return associate('&', action(conjuncts(state), action.args).clauses)
def goal_test(self, state):
return all(goal in conjuncts(state) for goal in self.planning_problem.goals)
def h(self, state):
"""
Computes ignore delete lists heuristic by creating a relaxed version of the original problem (we can do that
by removing the delete lists from all actions, i.e. removing all negative literals from effects) that will be
easier to solve through GraphPlan and where the length of the solution will serve as a good heuristic.
"""
relaxed_planning_problem = PlanningProblem(initial=state.state,
goals=self.goal,
actions=[action.relaxed() for action in
self.planning_problem.actions])
try:
return len(linearize(GraphPlan(relaxed_planning_problem).execute()))
except:
return np.inf
| ForwardPlan |
python | spack__spack | lib/spack/spack/cmd/style.py | {
"start": 2102,
"end": 17117
} | class ____:
def __init__(self, name: str, required: bool = False, external: bool = True) -> None:
self.name = name
self.external = external
self.required = required
def __call__(self, fun):
self.fun = fun
tools[self.name] = self
return fun
@property
def installed(self) -> bool:
return bool(which(self.name)) if self.external else True
@property
def executable(self) -> Optional[Executable]:
return which(self.name) if self.external else None
#: tools we run in spack style
tools: Dict[str, tool] = {}
def changed_files(base="develop", untracked=True, all_files=False, root=None):
"""Get list of changed files in the Spack repository.
Arguments:
base (str): name of base branch to evaluate differences with.
untracked (bool): include untracked files in the list.
all_files (bool): list all files in the repository.
root (str): use this directory instead of the Spack prefix.
"""
if root is None:
root = spack.paths.prefix
git = spack.util.git.git(required=True)
# ensure base is in the repo
base_sha = git(
"rev-parse", "--quiet", "--verify", "--revs-only", base, fail_on_error=False, output=str
)
if git.returncode != 0:
tty.die(
"This repository does not have a '%s' revision." % base,
"spack style needs this branch to determine which files changed.",
"Ensure that '%s' exists, or specify files to check explicitly." % base,
)
range = "{0}...".format(base_sha.strip())
git_args = [
# Add changed files committed since branching off of develop
["diff", "--name-only", "--diff-filter=ACMR", range],
# Add changed files that have been staged but not yet committed
["diff", "--name-only", "--diff-filter=ACMR", "--cached"],
# Add changed files that are unstaged
["diff", "--name-only", "--diff-filter=ACMR"],
]
# Add new files that are untracked
if untracked:
git_args.append(["ls-files", "--exclude-standard", "--other"])
# add everything if the user asked for it
if all_files:
git_args.append(["ls-files", "--exclude-standard"])
excludes = [os.path.realpath(os.path.join(root, f)) for f in exclude_paths]
changed = set()
for arg_list in git_args:
files = git(*arg_list, output=str).split("\n")
for f in files:
# Ignore non-Python files
if not (f.endswith(".py") or f == "bin/spack"):
continue
# Ignore files in the exclude locations
if any(os.path.realpath(f).startswith(e) for e in excludes):
continue
changed.add(f)
return sorted(changed)
def setup_parser(subparser: argparse.ArgumentParser) -> None:
subparser.add_argument(
"-b",
"--base",
action="store",
default="develop",
help="branch to compare against to determine changed files (default: develop)",
)
subparser.add_argument(
"-a", "--all", action="store_true", help="check all files, not just changed files"
)
subparser.add_argument(
"-r",
"--root-relative",
action="store_true",
default=False,
help="print root-relative paths (default: cwd-relative)",
)
subparser.add_argument(
"-U",
"--no-untracked",
dest="untracked",
action="store_false",
default=True,
help="exclude untracked files from checks",
)
subparser.add_argument(
"-f",
"--fix",
action="store_true",
default=False,
help="format automatically if possible (e.g., with isort, black)",
)
subparser.add_argument(
"--root", action="store", default=None, help="style check a different spack instance"
)
tool_group = subparser.add_mutually_exclusive_group()
tool_group.add_argument(
"-t",
"--tool",
action="append",
help="specify which tools to run (default: %s)" % ", ".join(tool_names),
)
tool_group.add_argument(
"-s",
"--skip",
metavar="TOOL",
action="append",
help="specify tools to skip (choose from %s)" % ", ".join(tool_names),
)
subparser.add_argument(
"--spec-strings",
action="store_true",
help="upgrade spec strings in Python, JSON and YAML files for compatibility with Spack "
"v1.0 and v0.x. Example: spack style ``--spec-strings $(git ls-files)``. Note: must be "
"used only on specs from spack v0.X.",
)
subparser.add_argument("files", nargs=argparse.REMAINDER, help="specific files to check")
def cwd_relative(path, root, initial_working_dir):
"""Translate prefix-relative path to current working directory-relative."""
return os.path.relpath(os.path.join(root, path), initial_working_dir)
def rewrite_and_print_output(
output, args, re_obj=re.compile(r"^(.+):([0-9]+):"), replacement=r"{0}:{1}:"
):
"""rewrite ouput with <file>:<line>: format to respect path args"""
# print results relative to current working directory
def translate(match):
return replacement.format(
cwd_relative(match.group(1), args.root, args.initial_working_dir),
*list(match.groups()[1:]),
)
for line in output.split("\n"):
if not line:
continue
if any(ignore in line for ignore in mypy_ignores):
# some mypy annotations can't be disabled in older mypys (e.g. .971, which
# is the only mypy that supports python 3.6), so we filter them here.
continue
if not args.root_relative and re_obj:
line = re_obj.sub(translate, line)
print(line)
def print_style_header(file_list, args, tools_to_run):
tty.msg("Running style checks on spack", "selected: " + ", ".join(tools_to_run))
# translate modified paths to cwd_relative if needed
paths = [filename.strip() for filename in file_list]
if not args.root_relative:
paths = [cwd_relative(filename, args.root, args.initial_working_dir) for filename in paths]
tty.msg("Modified files", *paths)
sys.stdout.flush()
def print_tool_header(tool):
sys.stdout.flush()
tty.msg("Running %s checks" % tool)
sys.stdout.flush()
def print_tool_result(tool, returncode):
if returncode == 0:
color.cprint(" @g{%s checks were clean}" % tool)
else:
color.cprint(" @r{%s found errors}" % tool)
@tool("flake8", required=True)
def run_flake8(flake8_cmd, file_list, args):
returncode = 0
output = ""
# run in chunks of 100 at a time to avoid line length limit
# filename parameter in config *does not work* for this reliably
for chunk in grouper(file_list, 100):
output = flake8_cmd(
# always run with config from running spack prefix
"--config=%s" % os.path.join(spack.paths.prefix, ".flake8"),
*chunk,
fail_on_error=False,
output=str,
)
returncode |= flake8_cmd.returncode
rewrite_and_print_output(output, args)
print_tool_result("flake8", returncode)
return returncode
@tool("mypy")
def run_mypy(mypy_cmd, file_list, args):
# always run with config from running spack prefix
common_mypy_args = [
"--config-file",
os.path.join(spack.paths.prefix, "pyproject.toml"),
"--show-error-codes",
]
mypy_arg_sets = [common_mypy_args + ["--package", "spack", "--package", "llnl"]]
if "SPACK_MYPY_CHECK_PACKAGES" in os.environ:
mypy_arg_sets.append(
common_mypy_args + ["--package", "packages", "--disable-error-code", "no-redef"]
)
returncode = 0
for mypy_args in mypy_arg_sets:
output = mypy_cmd(*mypy_args, fail_on_error=False, output=str)
returncode |= mypy_cmd.returncode
rewrite_and_print_output(output, args)
print_tool_result("mypy", returncode)
return returncode
@tool("isort")
def run_isort(isort_cmd, file_list, args):
# always run with config from running spack prefix
isort_args = ("--settings-path", os.path.join(spack.paths.prefix, "pyproject.toml"))
if not args.fix:
isort_args += ("--check", "--diff")
pat = re.compile("ERROR: (.*) Imports are incorrectly sorted")
replacement = "ERROR: {0} Imports are incorrectly sorted"
returncode = [0]
def process_files(file_list, is_args):
for chunk in grouper(file_list, 100):
packed_args = is_args + tuple(chunk)
output = isort_cmd(*packed_args, fail_on_error=False, output=str, error=str)
returncode[0] |= isort_cmd.returncode
rewrite_and_print_output(output, args, pat, replacement)
# packages
process_files(filter(is_package, file_list), isort_args)
# non-packages
process_files(filter(lambda f: not is_package(f), file_list), isort_args)
print_tool_result("isort", returncode[0])
return returncode[0]
@tool("black")
def run_black(black_cmd, file_list, args):
# always run with config from running spack prefix
black_args = ("--config", os.path.join(spack.paths.prefix, "pyproject.toml"))
if not args.fix:
black_args += ("--check", "--diff")
if color.get_color_when(): # only show color when spack would
black_args += ("--color",)
pat = re.compile("would reformat +(.*)")
replacement = "would reformat {0}"
returncode = 0
output = ""
# run in chunks of 100 at a time to avoid line length limit
# filename parameter in config *does not work* for this reliably
for chunk in grouper(file_list, 100):
packed_args = black_args + tuple(chunk)
output = black_cmd(*packed_args, fail_on_error=False, output=str, error=str)
returncode |= black_cmd.returncode
rewrite_and_print_output(output, args, pat, replacement)
print_tool_result("black", returncode)
return returncode
def _module_part(root: str, expr: str):
parts = expr.split(".")
# spack.pkg is for repositories, don't try to resolve it here.
if expr.startswith(spack.repo.PKG_MODULE_PREFIX_V1) or expr == "spack.pkg":
return None
while parts:
f1 = os.path.join(root, "lib", "spack", *parts) + ".py"
f2 = os.path.join(root, "lib", "spack", *parts, "__init__.py")
if (
os.path.exists(f1)
# ensure case sensitive match
and f"{parts[-1]}.py" in os.listdir(os.path.dirname(f1))
or os.path.exists(f2)
):
return ".".join(parts)
parts.pop()
return None
def _run_import_check(
file_list: List[str],
*,
fix: bool,
root_relative: bool,
root=spack.paths.prefix,
working_dir=spack.paths.prefix,
out=sys.stdout,
):
if sys.version_info < (3, 9):
print("import check requires Python 3.9 or later")
return 0
is_use = re.compile(r"(?<!from )(?<!import )spack\.[a-zA-Z0-9_\.]+")
# redundant imports followed by a `# comment` are ignored, cause there can be legimitate reason
# to import a module: execute module scope init code, or to deal with circular imports.
is_abs_import = re.compile(r"^import (spack\.[a-zA-Z0-9_\.]+)$", re.MULTILINE)
exit_code = 0
for file in file_list:
to_add = set()
to_remove = []
pretty_path = file if root_relative else cwd_relative(file, root, working_dir)
try:
with open(file, "r", encoding="utf-8") as f:
contents = f.read()
parsed = ast.parse(contents)
except Exception:
exit_code = 1
print(f"{pretty_path}: could not parse", file=out)
continue
for m in is_abs_import.finditer(contents):
# Find at most two occurences: the first is the import itself, the second is its usage.
if len(list(islice(re.finditer(rf"{re.escape(m.group(1))}(?!\w)", contents), 2))) == 1:
to_remove.append(m.group(0))
exit_code = 1
print(f"{pretty_path}: redundant import: {m.group(1)}", file=out)
# Clear all strings to avoid matching comments/strings etc.
for node in ast.walk(parsed):
if isinstance(node, ast.Constant) and isinstance(node.value, str):
node.value = ""
filtered_contents = ast.unparse(parsed) # novermin
for m in is_use.finditer(filtered_contents):
module = _module_part(root, m.group(0))
if not module or module in to_add:
continue
if re.search(rf"import {re.escape(module)}(?!\w|\.)", contents):
continue
to_add.add(module)
exit_code = 1
print(f"{pretty_path}: missing import: {module} ({m.group(0)})", file=out)
if not fix or not to_add and not to_remove:
continue
with open(file, "r", encoding="utf-8") as f:
lines = f.readlines()
if to_add:
# insert missing imports before the first import, delegate ordering to isort
for node in parsed.body:
if isinstance(node, (ast.Import, ast.ImportFrom)):
first_line = node.lineno
break
else:
print(f"{pretty_path}: could not fix", file=out)
continue
lines.insert(first_line, "\n".join(f"import {x}" for x in to_add) + "\n")
new_contents = "".join(lines)
# remove redundant imports
for statement in to_remove:
new_contents = new_contents.replace(f"{statement}\n", "")
with open(file, "w", encoding="utf-8") as f:
f.write(new_contents)
return exit_code
@tool("import", external=False)
def run_import_check(import_check_cmd, file_list, args):
exit_code = _run_import_check(
file_list,
fix=args.fix,
root_relative=args.root_relative,
root=args.root,
working_dir=args.initial_working_dir,
)
print_tool_result("import", exit_code)
return exit_code
def validate_toolset(arg_value):
"""Validate ``--tool`` and ``--skip`` arguments (sets of optionally comma-separated tools)."""
tools = set(",".join(arg_value).split(",")) # allow args like 'isort,flake8'
for tool in tools:
if tool not in tool_names:
tty.die("Invalid tool: '%s'" % tool, "Choose from: %s" % ", ".join(tool_names))
return tools
def missing_tools(tools_to_run: List[str]) -> List[str]:
return [t for t in tools_to_run if not tools[t].installed]
def _bootstrap_dev_dependencies():
import spack.bootstrap
with spack.bootstrap.ensure_bootstrap_configuration():
spack.bootstrap.ensure_environment_dependencies()
IS_PROBABLY_COMPILER = re.compile(r"%[a-zA-Z_][a-zA-Z0-9\-]")
| tool |
python | google__pytype | pytype/rewrite/frame_test.py | {
"start": 5477,
"end": 16079
} | class ____(FrameTestBase):
def test_run_no_crash(self):
block = [
opcodes.LOAD_CONST(0, 0, 0, 0, 0, 0, None),
opcodes.RETURN_VALUE(1, 0),
]
code = test_utils.FakeOrderedCode([block], [None])
frame = frame_lib.Frame(self.ctx, 'test', code.Seal())
frame.run()
def test_typing(self):
frame = self._make_frame('')
assert_type(frame.final_locals, Mapping[str, abstract.BaseValue])
def test_load_const(self):
block = [
opcodes.LOAD_CONST(0, 0, 0, 0, 0, 0, 42),
opcodes.RETURN_VALUE(1, 0),
]
code = test_utils.FakeOrderedCode([block], [42])
frame = frame_lib.Frame(self.ctx, 'test', code.Seal())
frame.step()
self.assertEqual(len(frame._stack), 1)
constant = frame._stack.top().get_atomic_value()
self.assertEqual(constant, self.ctx.consts[42])
def test_store_local(self):
frame = self._make_frame('x = 42')
frame.run()
self.assertIn('x', frame.final_locals)
self.assertEqual(frame.final_locals['x'], self.ctx.consts[42])
def test_store_global(self):
frame = self._make_frame("""
global x
x = 42
""")
frame.run()
self.assertIn('x', frame.final_locals)
self.assertEqual(frame.final_locals['x'], self.ctx.consts[42])
def test_function(self):
frame = self._make_frame('def f(): pass')
frame.run()
self.assertIn('f', frame.final_locals)
func = frame.final_locals['f']
self.assertIsInstance(func, abstract.InterpreterFunction)
self.assertEqual(func.name, 'f')
self.assertCountEqual(frame.functions, [func])
def test_copy_globals_from_module_frame(self):
module_frame = self._make_frame(
"""
x = 42
def f():
pass
""",
name='__main__',
)
module_frame.run()
f = _get(module_frame, 'f', _FrameFunction)
f_frame = module_frame.make_child_frame(f, {})
self.assertIn('x', f_frame._initial_globals)
self.assertIn('f', f_frame._initial_globals)
def test_copy_globals_from_nonmodule_frame(self):
f_frame = self._make_frame(
"""
global x
x = 42
def g():
pass
""",
name='f',
)
f_frame.run()
g = _get(f_frame, 'g', _FrameFunction)
g_frame = f_frame.make_child_frame(g, {})
self.assertIn('x', g_frame._initial_globals)
def test_copy_globals_from_inner_frame_to_module(self):
module_frame = self._make_frame(
"""
def f():
global x
x = 42
f()
""",
name='__main__',
)
module_frame.run()
self.assertIn('f', module_frame.final_locals)
self.assertIn('x', module_frame.final_locals)
def test_copy_globals_from_inner_frame_to_outer(self):
f_frame = self._make_frame(
"""
def g():
global x
x = 42
g()
""",
name='f',
)
f_frame.run()
self.assertIn('g', f_frame.final_locals)
self.assertIn('x', f_frame.final_locals)
self.assertCountEqual(f_frame._shadowed_nonlocals.get_global_names(), {'x'})
def test_read_enclosing(self):
module_frame = self._make_frame("""
def f():
x = None
def g():
y = x
""")
module_frame.run()
f = _get(module_frame, 'f', _FrameFunction)
f_frame = module_frame.make_child_frame(f, {})
f_frame.run()
g = _get(f_frame, 'g', _FrameFunction)
g_frame = f_frame.make_child_frame(g, {})
g_frame.run()
self.assertIn('y', g_frame.final_locals)
y = _get(g_frame, 'y', abstract.PythonConstant)
self.assertIsNone(y.constant)
self.assertIn('x', g_frame._initial_enclosing)
def test_write_enclosing(self):
module_frame = self._make_frame("""
def f():
x = None
def g():
nonlocal x
x = 5
g()
""")
module_frame.run()
f = _get(module_frame, 'f', _FrameFunction)
f_frame = module_frame.make_child_frame(f, {})
f_frame.run()
self.assertIn('x', f_frame.final_locals)
self.assertIn('g', f_frame.final_locals)
x = _get(f_frame, 'x', abstract.PythonConstant)
self.assertEqual(x.constant, 5)
def test_class(self):
module_frame = self._make_frame('class C: ...')
module_frame.run()
cls = _get(module_frame, 'C', abstract.InterpreterClass)
self.assertEqual(cls.name, 'C')
def test_class_body(self):
module_frame = self._make_frame("""
class C:
def f(self): ...
""")
module_frame.run()
cls = _get(module_frame, 'C', abstract.InterpreterClass)
self.assertIn('f', cls.members)
f = cls.members['f']
self.assertIsInstance(f, abstract.InterpreterFunction)
self.assertEqual(f.name, 'C.f')
def test_instance_attribute(self):
module_frame = self._make_frame("""
class C:
def __init__(self):
self.x = 3
""")
module_frame.run()
cls = _get(module_frame, 'C', abstract.InterpreterClass)
instance = cls.instantiate()
self.assertEqual(instance.get_attribute('x'), self.ctx.consts[3])
def test_read_instance_attribute(self):
module_frame = self._make_frame("""
class C:
def __init__(self):
self.x = 3
def read(self):
x = self.x
""")
module_frame.run()
cls = _get(module_frame, 'C', abstract.InterpreterClass)
instance = cls.instantiate()
read = cast(abstract.InterpreterFunction, cls.members['read'])
(frame,) = read.bind_to(instance).analyze()
self.assertIn('x', frame.final_locals)
self.assertEqual(frame.final_locals['x'], self.ctx.consts[3])
def test_write_and_read_instance_attribute(self):
module_frame = self._make_frame("""
class C:
def write_and_read(self):
self.x = 3
x = self.x
""")
module_frame.run()
cls = _get(module_frame, 'C', abstract.InterpreterClass)
instance = cls.instantiate()
write_and_read = cast(
abstract.InterpreterFunction, cls.members['write_and_read']
)
(frame,) = write_and_read.bind_to(instance).analyze()
self.assertIn('x', frame.final_locals)
self.assertEqual(frame.final_locals['x'], self.ctx.consts[3])
def test_modify_instance(self):
module_frame = self._make_frame("""
class C:
def f(self):
self.x = 3
c = C()
c.f()
""")
module_frame.run()
c = _get(module_frame, 'c', abstract.MutableInstance)
self.assertEqual(c.get_attribute('x'), self.ctx.consts[3])
def test_overwrite_instance_attribute(self):
module_frame = self._make_frame("""
class C:
def f(self):
self.x = 3
def g(self):
self.f()
self.x = None
c = C()
c.g()
""")
module_frame.run()
c = _get(module_frame, 'c', abstract.MutableInstance)
self.assertEqual(c.get_attribute('x'), self.ctx.consts[None])
def test_instance_attribute_multiple_options(self):
module_frame = self._make_frame("""
class C:
def __init__(self, rand):
if rand:
self.x = 3
else:
self.x = None
""")
module_frame.run()
instance = _get(module_frame, 'C', abstract.InterpreterClass).instantiate()
self.assertEqual(
instance.get_attribute('x'),
abstract.Union(self.ctx, (self.ctx.consts[3], self.ctx.consts[None])),
)
def test_method_parameter(self):
module_frame = self._make_frame("""
class C:
def f(self, x):
self.x = x
c = C()
c.f(0)
""")
module_frame.run()
instance = _get(module_frame, 'c', abstract.MutableInstance)
self.assertEqual(instance.get_attribute('x'), self.ctx.consts[0])
def test_multiple_initializers(self):
module_frame = self._make_frame("""
class C:
def __init__(self, rand):
if rand:
self.x = 3
def custom_init(self, rand):
if rand:
self.x = None
""")
module_frame.run()
cls = _get(module_frame, 'C', abstract.InterpreterClass)
cls.initializers.append('custom_init')
instance = cls.instantiate()
self.assertEqual(
instance.get_attribute('x'),
abstract.Union(self.ctx, (self.ctx.consts[3], self.ctx.consts[None])),
)
def test_return(self):
module_frame = self._make_frame("""
def f(rand):
if rand:
return 3
else:
return None
""")
module_frame.run()
f = _get(module_frame, 'f', _FrameFunction)
(f_frame,) = f.analyze()
self.assertEqual(
f_frame.get_return_value(),
abstract.Union(self.ctx, (self.ctx.consts[3], self.ctx.consts[None])),
)
def test_stack(self):
module_frame = self._make_frame('def f(): pass')
self.assertEqual(module_frame.stack, [module_frame])
module_frame.run()
f = _get(module_frame, 'f', _FrameFunction)
f_frame = module_frame.make_child_frame(f, {})
self.assertEqual(f_frame.stack, [module_frame, f_frame])
def test_stack_ops(self):
"""Basic smoke test for the stack manipulation ops."""
# These just pass through to the underlying DataStack, which is well tested,
# so we don't bother checking the stack contents here.
block = [
opcodes.LOAD_CONST(1, 0, 0, 0, 0, 0, 1), # 1
opcodes.LOAD_CONST(2, 0, 0, 0, 0, 1, 2), # 2
opcodes.LOAD_CONST(3, 0, 0, 0, 0, 2, 3), # 3
opcodes.DUP_TOP(4, 0), # 4
opcodes.DUP_TOP_TWO(5, 0), # 6
opcodes.ROT_TWO(6, 0), # 6
opcodes.ROT_THREE(7, 0), # 6
opcodes.ROT_FOUR(8, 0), # 6
opcodes.ROT_N(9, 0, 0, 0, 0, 2, 2), # 6
opcodes.POP_TOP(10, 0), # 5
opcodes.POP_TOP(11, 0), # 4
opcodes.POP_TOP(12, 0), # 3
opcodes.POP_TOP(13, 0), # 2
opcodes.POP_TOP(14, 0), # 1
opcodes.RETURN_VALUE(15, 0), # 0
]
code = test_utils.FakeOrderedCode([block], [1, 2, 3])
frame = frame_lib.Frame(self.ctx, 'test', code.Seal())
frame.run() # Should not crash
def test_class_bases(self):
frame = self._make_frame("""
class C:
pass
class D(C):
pass
""")
frame.run()
c = _get(frame, 'C', abstract.InterpreterClass)
d = _get(frame, 'D', abstract.InterpreterClass)
self.assertFalse(c.bases)
self.assertEqual(d.bases, [c])
def test_metaclass(self):
frame = self._make_frame("""
class Meta(type):
pass
class C(metaclass=Meta):
pass
""")
frame.run()
meta = _get(frame, 'Meta', abstract.InterpreterClass)
c = _get(frame, 'C', abstract.InterpreterClass)
self.assertEqual(c.metaclass, meta)
| FrameTest |
python | PyCQA__pylint | doc/data/messages/t/too-many-locals/good.py | {
"start": 68,
"end": 1464
} | class ____(NamedTuple):
number_of_sweets: int
number_of_sweet_per_child: int
number_of_children: int
@property
def sweets_given(self):
return self.number_of_sweet_per_child * self.number_of_children
def handle_sweets(infos):
children = [Child(info) for info in infos]
characteristics = SweetDistrubutionCharacteristics(87, 5, len(children))
_allocate_sweets_to_children(children, characteristics)
financial_impact = _assess_financial_impact(characteristics)
print(f"{children} ate {financial_impact}")
def _allocate_sweets_to_children(
children, characteristics: SweetDistrubutionCharacteristics
) -> None:
sweets = [Sweet() * characteristics.number_of_sweets]
for child in children:
child.give(sweets[characteristics.number_of_sweet_per_child :])
def _assess_financial_impact(characteristics: SweetDistrubutionCharacteristics) -> str:
time_to_eat_sweet = 54
money = 45.0
price_of_sweet = 0.42
cost_of_children = characteristics.sweets_given * price_of_sweet
remaining_money = money - cost_of_children
time_it_took_assuming_parallel_eating = (
time_to_eat_sweet * characteristics.number_of_sweet_per_child
)
return (
f"{cost_of_children}¤ of sweets in "
f"{time_it_took_assuming_parallel_eating}, you still have {remaining_money}"
)
| SweetDistrubutionCharacteristics |
python | astropy__astropy | astropy/utils/masked/tests/test_function_helpers.py | {
"start": 51060,
"end": 54734
} | class ____:
def setup_class(self):
self.a = np.array(
[
[np.nan, np.nan, 3.0],
[4.0, 5.0, 6.0],
]
)
self.mask_a = np.array(
[
[True, False, False],
[False, True, False],
]
)
self.b = np.arange(1, 7).reshape(2, 3)
self.mask_b = self.mask_a
self.ma = Masked(self.a, mask=self.mask_a)
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, function, exact_fill_value=None, masked_result=True, **kwargs):
result = function(self.ma, **kwargs)
expected_data = function(self.ma.filled(np.nan), **kwargs)
expected_mask = np.isnan(expected_data)
if masked_result:
assert isinstance(result, Masked)
assert_array_equal(result.mask, expected_mask)
assert np.all(result == expected_data)
else:
assert not isinstance(result, Masked)
assert_array_equal(result, expected_data)
assert not np.any(expected_mask)
out = np.zeros_like(result)
result2 = function(self.ma, out=out, **kwargs)
assert result2 is out
assert_array_equal(result2, result)
def check_arg(self, function, **kwargs):
# arg functions do not have an 'out' argument, so just test directly.
result = function(self.ma, **kwargs)
assert not isinstance(result, Masked)
expected = function(self.ma.filled(np.nan), **kwargs)
assert_array_equal(result, expected)
def test_nanmin(self):
self.check(np.nanmin)
self.check(np.nanmin, axis=0)
self.check(np.nanmin, axis=1)
resi = np.nanmin(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.array([2, 4]))
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanmax(self):
self.check(np.nanmax)
def test_nanargmin(self):
self.check_arg(np.nanargmin)
self.check_arg(np.nanargmin, axis=1)
def test_nanargmax(self):
self.check_arg(np.nanargmax)
def test_nansum(self):
self.check(np.nansum, masked_result=False)
resi = np.nansum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([5, 10]))
def test_nanprod(self):
self.check(np.nanprod, masked_result=False)
resi = np.nanprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([6, 24]))
def test_nancumsum(self):
self.check(np.nancumsum, masked_result=False)
resi = np.nancumsum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[0, 2, 5], [4, 4, 10]]))
def test_nancumprod(self):
self.check(np.nancumprod, masked_result=False)
resi = np.nancumprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[1, 2, 6], [4, 4, 24]]))
def test_nanmean(self):
self.check(np.nanmean)
resi = np.nanmean(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.mean(self.mb, axis=1).unmasked)
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanvar(self):
self.check(np.nanvar)
self.check(np.nanvar, ddof=1)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nanquantile(self):
self.check(np.nanquantile, q=0.5)
def test_nanpercentile(self):
self.check(np.nanpercentile, q=50)
| TestNaNFunctions |
python | apache__thrift | test/py/SerializationTest.py | {
"start": 15239,
"end": 17481
} | class ____(unittest.TestCase):
def testSerializeThenDeserialize(self):
obj = Xtruct2(i32_thing=1,
struct_thing=Xtruct(string_thing="foo"))
s1 = serialize(obj)
for i in range(10):
self.assertEqual(s1, serialize(obj))
objcopy = Xtruct2()
deserialize(objcopy, serialize(obj))
self.assertEqual(obj, objcopy)
obj = Xtruct(string_thing="bar")
objcopy = Xtruct()
deserialize(objcopy, serialize(obj))
self.assertEqual(obj, objcopy)
# test booleans
obj = Bools(im_true=True, im_false=False)
objcopy = Bools()
deserialize(objcopy, serialize(obj))
self.assertEqual(obj, objcopy)
# test enums
def _enumerate_enum(enum_class):
if hasattr(enum_class, '_VALUES_TO_NAMES'):
# old-style enums
for num, name in enum_class._VALUES_TO_NAMES.items():
yield (num, name)
else:
# assume Python 3.4+ IntEnum-based
from enum import IntEnum
self.assertTrue((issubclass(enum_class, IntEnum)))
for num in enum_class:
yield (num.value, num.name)
for num, name in _enumerate_enum(Numberz):
obj = Bonk(message='enum Numberz value %d is string %s' % (num, name), type=num)
objcopy = Bonk()
deserialize(objcopy, serialize(obj))
self.assertEqual(obj, objcopy)
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(NormalBinaryTest))
suite.addTest(loader.loadTestsFromTestCase(AcceleratedBinaryTest))
suite.addTest(loader.loadTestsFromTestCase(AcceleratedCompactTest))
suite.addTest(loader.loadTestsFromTestCase(CompactProtocolTest))
suite.addTest(loader.loadTestsFromTestCase(JSONProtocolTest))
suite.addTest(loader.loadTestsFromTestCase(AcceleratedFramedTest))
suite.addTest(loader.loadTestsFromTestCase(SerializersTest))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=2))
| SerializersTest |
python | pytorch__pytorch | test/test_mps.py | {
"start": 371983,
"end": 380014
} | class ____(TestCaseMPS):
def test_nll_loss_mismatched_batch(self, device='mps'):
x = torch.randn((10, 3), requires_grad=True, device=device)
# t should have size (10,)
t = torch.zeros((3,), dtype=torch.int64, device=device)
with self.assertRaisesRegex(ValueError, 'Expected.*batch_size'):
F.nll_loss(x, t)
def test_nll_loss_out_of_bounds_ignore_index(self):
def test_nll_loss_out_of_bounds_ignore_index_helper(device):
output = []
x = torch.tensor([[0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1], [
0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1]], device=device)
t1 = torch.tensor([0, 1, 255, 0, 1, 2], dtype=torch.int64, device=device)
t2 = torch.tensor([0, 1, 1, 0, -100, 2], dtype=torch.int64, device=device)
for reduction in ['mean', 'none']:
# out of bound ignore_index
output.append(F.nll_loss(x, t1, ignore_index=255, reduction=reduction))
# default ignore_index
output.append(F.nll_loss(x, t2, reduction=reduction))
return output
output_cpu = test_nll_loss_out_of_bounds_ignore_index_helper(device='cpu')
output_mps = test_nll_loss_out_of_bounds_ignore_index_helper(device='mps')
for cpu, mps in zip(output_cpu, output_mps):
self.assertEqual(cpu, mps)
def test_nll_loss_invalid_target_dim(self):
def _test_nll_loss_invalid_target_dim(device):
output = []
x = torch.tensor([[0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1], [
0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1]], device=device)
t = torch.zeros((6, 2), dtype=torch.int64, device=device)
with self.assertRaisesRegex(RuntimeError, "1D target tensor expected"):
F.nll_loss(x, t)
_test_nll_loss_invalid_target_dim(device='cpu')
_test_nll_loss_invalid_target_dim(device='mps')
def test_nll_loss_invalid_weights(self):
def _test_nll_loss_invalid_weights(device):
x = torch.tensor([[0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1], [
0.3, 0.5, 0.2], [0.1, 0.7, 0.2], [0.4, 0.5, 0.1]], device=device)
t = torch.tensor([0, 1, 2, 1, 1, 2], dtype=torch.int64, device=device)
invalid_weights = [
torch.zeros(4, device=device),
torch.zeros((1, 3), device=device),
]
msg = "weight tensor should be defined either for all 3 classes or no classes"
for weight in invalid_weights:
with self.assertRaisesRegex(RuntimeError, msg):
F.nll_loss(x, t, weight=weight)
_test_nll_loss_invalid_weights(device='cpu')
_test_nll_loss_invalid_weights(device='mps')
def _nll_loss_helper(self, input_size, reduction, expected):
# CPU
input = torch.rand(input_size, requires_grad=True, device='cpu')
num_channels = input_size[1]
target_size = (input_size[0], ) + tuple(input_size[2:])
target = torch.randint(num_channels, target_size, device='cpu')
weights = torch.randn(num_channels)
# MPS
input_mps = input.detach().clone().to('mps').requires_grad_()
target_mps = target.detach().clone().to('mps')
weights_mps = weights.to("mps")
output_cpu = F.nll_loss(input, target, weight=weights, reduction=reduction)
output_mps = F.nll_loss(input_mps, target_mps, weight=weights_mps, reduction=reduction)
self.assertEqual(output_cpu, output_mps.to('cpu'))
output_cpu.sum().backward()
output_mps.sum().backward()
self.assertEqual(input.grad, input_mps.grad.to('cpu'))
def _nll_loss_1d_helper(self, input_size, reduction):
# CPU
input = torch.rand(input_size, requires_grad=True, device='cpu')
num_channels = input_size[0]
target = torch.randint(num_channels, [], device='cpu')
# MPS
input_mps = input.detach().clone().to('mps').requires_grad_()
target_mps = target.detach().clone().to('mps')
output_cpu = F.nll_loss(input, target, reduction=reduction)
output_mps = F.nll_loss(input_mps, target_mps, reduction=reduction)
self.assertEqual(output_cpu, output_mps.to('cpu'))
output_cpu.sum().backward()
output_mps.sum().backward()
self.assertEqual(input.grad, input_mps.grad.to('cpu'))
def test_nll_loss_1d(self, device='cpu'):
self._nll_loss_1d_helper([10], "none")
self._nll_loss_1d_helper([10], "mean")
self._nll_loss_1d_helper([10], "sum")
def test_nll_loss_empty_tensor_reduction_none(self, device='cpu'):
self._nll_loss_helper([1, 3], "none", torch.empty([0], device=device))
self._nll_loss_helper([3, 5, 7], "none", torch.empty([5, 7], device=device))
self._nll_loss_helper([2, 3, 1, 7], "none", torch.empty([2, 1, 7], device=device))
self._nll_loss_helper([2, 3, 5, 1], "none", torch.empty([2, 5, 1], device=device))
self._nll_loss_helper([2, 3, 5, 7, 1], "none", torch.empty([2, 5, 7, 1], device=device))
def test_nll_loss_empty_tensor_reduction_mean(self, device='cpu'):
nan = torch.tensor(float('nan'), device=device)
self._nll_loss_helper([1, 3], "mean", nan)
self._nll_loss_helper([1, 3, 5, 7], "mean", nan)
self._nll_loss_helper([2, 3, 1, 7], "mean", nan)
self._nll_loss_helper([2, 3, 5, 1], "mean", nan)
self._nll_loss_helper([2, 3, 5, 7, 1], "mean", nan)
def test_nll_loss_empty_tensor_reduction_sum(self, device='cpu'):
zero = torch.tensor(0, device=device)
self._nll_loss_helper([1, 3], "sum", zero)
self._nll_loss_helper([1, 3, 5, 7], "sum", zero)
self._nll_loss_helper([2, 3, 1, 7], "sum", zero)
self._nll_loss_helper([2, 3, 5, 1], "sum", zero)
self._nll_loss_helper([2, 3, 5, 7, 1], "sum", zero)
def test_nll_loss_byte_target_matches_long(self, device='cpu'):
N, C = 10, 4
input = torch.randn(N, C, device=device, requires_grad=True)
target = torch.empty(N, dtype=torch.long, device=device).random_(0, C)
def compute_result_and_gradient(reduction, target_dtype):
result, grad = {}, {}
for dev in ['cpu', 'mps']:
input_dev = input.to(dev)
input_ = input_dev.detach()
input_.requires_grad_()
target_dev = target.to(dev)
prob = F.log_softmax(input_, dim=-1)
loss = nn.NLLLoss(reduction=reduction)
result[dev] = loss(prob, target_dev.to(target_dtype))
result[dev].sum().backward()
grad[dev] = input_.grad
return result, grad
for reduction in ["none", "mean", "sum"]:
result_long, grad_long = compute_result_and_gradient(reduction, torch.long)
result_byte, grad_byte = compute_result_and_gradient(reduction, torch.uint8)
self.assertEqual(result_long['mps'].to('cpu'), result_long['cpu'])
self.assertEqual(grad_long['mps'].to('cpu'), grad_long['cpu'])
def test_nll_loss_backward(self):
# Copy-n-pasted from similar test_torchinductor.py test
# Used to crash with `error: 'mps.divide' op requires the same element type for all operands and results`
labels = (
torch.zeros([5], dtype=torch.int64, device="mps"),
torch.tensor([-100, -100, 3, -100, -100], dtype=torch.int64, device="mps"),
)
for label in labels:
inp = torch.rand(5, 5, device="mps", dtype=torch.half)
grad_out = torch.empty((), device=inp.device, dtype=inp.dtype)
total_weight = torch.tensor(1.0, device=inp.device)
torch.ops.aten.nll_loss_backward(grad_out, inp, label, None, 1, -100, total_weight)
| TestNLLLoss |
python | allegroai__clearml | clearml/backend_api/services/v2_9/events.py | {
"start": 97919,
"end": 99006
} | class ____(Response):
"""
Response of events.scalar_metrics_iter_histogram endpoint.
:param images:
:type images: Sequence[dict]
"""
_service = "events"
_action = "scalar_metrics_iter_histogram"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {"images": {"items": {"type": "object"}, "type": ["array", "null"]}},
"type": "object",
}
def __init__(self, images: Optional[List[dict]] = None, **kwargs: Any) -> None:
super(ScalarMetricsIterHistogramResponse, self).__init__(**kwargs)
self.images = images
@schema_property("images")
def images(self) -> Optional[List[dict]]:
return self._property_images
@images.setter
def images(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_images = None
return
self.assert_isinstance(value, "images", (list, tuple))
self.assert_isinstance(value, "images", (dict,), is_array=True)
self._property_images = value
| ScalarMetricsIterHistogramResponse |
python | eventlet__eventlet | tests/debug_test.py | {
"start": 90,
"end": 2860
} | class ____(tests.LimitedTestCase):
def setUp(self):
self.orig_trace = sys.settrace
sys.settrace = self._settrace
self.tracer = None
def tearDown(self):
sys.settrace = self.orig_trace
sys.stdout = sys.__stdout__
def _settrace(self, cb):
self.tracer = cb
def test_spew(self):
debug.spew()
assert isinstance(self.tracer, debug.Spew)
def test_unspew(self):
debug.spew()
debug.unspew()
assert self.tracer is None
def test_line(self):
frame_str = "f=<frame at"
sys.stdout = io.StringIO()
s = debug.Spew()
f = sys._getframe()
s(f, "line", None)
lineno = f.f_lineno - 1 # -1 here since we called with frame f in the line above
output = sys.stdout.getvalue()
assert "%s:%i" % (__name__, lineno) in output, "Didn't find line %i in %s" % (lineno, output)
assert frame_str in output
def test_line_nofile(self):
sys.stdout = io.StringIO()
s = debug.Spew()
g = globals().copy()
del g['__file__']
f = eval("sys._getframe()", g)
lineno = f.f_lineno
s(f, "line", None)
output = sys.stdout.getvalue()
assert "[unknown]:%i" % lineno in output, "Didn't find [unknown]:%i in %s" % (lineno, output)
if "PYTEST_XDIST_WORKER" not in os.environ:
assert "VM instruction #" in output, output
def test_line_global(self):
frame_str = "f=<frame at"
global GLOBAL_VAR
sys.stdout = io.StringIO()
GLOBAL_VAR = debug.Spew()
f = sys._getframe()
GLOBAL_VAR(f, "line", None)
lineno = f.f_lineno - 1 # -1 here since we called with frame f in the line above
output = sys.stdout.getvalue()
assert "%s:%i" % (__name__, lineno) in output, "Didn't find line %i in %s" % (lineno, output)
assert frame_str in output
assert "GLOBAL_VAR" in f.f_globals
assert "GLOBAL_VAR=<eventlet.debug.Spew object at" in output
del GLOBAL_VAR
def test_line_novalue(self):
sys.stdout = io.StringIO()
s = debug.Spew(show_values=False)
f = sys._getframe()
s(f, "line", None)
lineno = f.f_lineno - 1 # -1 here since we called with frame f in the line above
output = sys.stdout.getvalue()
assert "%s:%i" % (__name__, lineno) in output, "Didn't find line %i in %s" % (lineno, output)
assert "f=<frame object at" not in output
def test_line_nooutput(self):
sys.stdout = io.StringIO()
s = debug.Spew(trace_names=['foo'])
f = sys._getframe()
s(f, "line", None)
output = sys.stdout.getvalue()
assert output == ""
| TestSpew |
python | ray-project__ray | python/ray/llm/_internal/serve/engines/vllm/vllm_models.py | {
"start": 2080,
"end": 12325
} | class ____(BaseModelExtended):
model_config = ConfigDict(
use_enum_values=True,
)
model_id: str = Field(
description="The identifier for the model. This is the id that will be used to query the model.",
)
hf_model_id: Optional[str] = Field(
None, description="The Hugging Face model identifier."
)
mirror_config: Optional[CloudMirrorConfig] = Field(
None,
description="Configuration for cloud storage mirror. This is for where the weights are downloaded from.",
)
accelerator_type: Optional[GPUType] = Field(
None,
description="The type of accelerator to use. This is used to determine the placement group strategy.",
)
placement_group_config: Optional[Dict[str, Any]] = Field(
default=None,
description=(
"Ray placement group configuration for scheduling vLLM engine workers. "
"Defines resource bundles and placement strategy for multi-node deployments. "
"Defaults to PACK strategy with automatic bundle generation based on TP/PP sizes."
),
)
@field_validator("placement_group_config")
@classmethod
def validate_placement_group_config(cls, value):
if value is None:
return None
# Validate through PlacementGroupConfig, then dump back to dict
validated = PlacementGroupConfig(**value)
return validated.model_dump()
runtime_env: Optional[Dict[str, Any]] = None
engine_kwargs: Dict[str, Any] = {}
frontend_kwargs: Dict[str, Any] = {}
@property
def actual_hf_model_id(self) -> str:
return self.hf_model_id or self.model_id
@property
def trust_remote_code(self) -> bool:
return self.engine_kwargs.get("trust_remote_code", False)
def get_initialization_kwargs(self) -> dict:
"""
Get kwargs that will be actually passed to the LLMInitializer
constructor.
"""
engine_kwargs = self.engine_kwargs.copy()
if "model" in engine_kwargs or "served_model_name" in engine_kwargs:
raise ValueError(
"model or served_model_name is not allowed in engine_kwargs when using Ray Serve LLM. Please use `model_loading_config` in LLMConfig instead."
)
engine_kwargs["model"] = self.actual_hf_model_id
engine_kwargs["served_model_name"] = [self.model_id]
if (
"distributed_executor_backend" in engine_kwargs
and engine_kwargs["distributed_executor_backend"] != "ray"
):
raise ValueError(
"distributed_executor_backend != 'ray' is not allowed in engine_kwargs when using Ray Serve LLM Configs."
)
else:
engine_kwargs["distributed_executor_backend"] = "ray"
# TODO (Nikhil): Remove this once vLLM fully deprecates disable_log_requests.
if "disable_log_requests" in engine_kwargs:
logger.warning(
"disable_log_requests is set in engine_kwargs, but vLLM "
"does not support it. Converting to enable_log_requests."
)
engine_kwargs["enable_log_requests"] = not engine_kwargs.pop(
"disable_log_requests"
)
elif "enable_log_requests" not in engine_kwargs:
engine_kwargs["enable_log_requests"] = False
return engine_kwargs
def get_runtime_env_with_local_env_vars(self) -> dict:
runtime_env = self.runtime_env or {}
runtime_env.setdefault("env_vars", {})
# Propagate env vars to the runtime env
for env_var in ENV_VARS_TO_PROPAGATE:
if env_var in os.environ:
runtime_env["env_vars"][env_var] = os.getenv(env_var)
return runtime_env
@classmethod
def from_llm_config(cls, llm_config: LLMConfig) -> "VLLMEngineConfig":
"""Converts the LLMConfig to a VLLMEngineConfig."""
# Set up the model downloading configuration.
hf_model_id, mirror_config = None, None
if llm_config.model_loading_config.model_source is None:
hf_model_id = llm_config.model_id
elif isinstance(llm_config.model_loading_config.model_source, str):
hf_model_id = llm_config.model_loading_config.model_source
else:
# If it's a CloudMirrorConfig (or subtype)
mirror_config = llm_config.model_loading_config.model_source
all_engine_kwargs = llm_config.engine_kwargs.copy()
engine_kwargs = {}
frontend_kwargs = {}
# Get field names from dataclasses
frontend_field_names = {
field.name for field in dataclasses.fields(FrontendArgs)
}
async_engine_field_names = {
field.name for field in dataclasses.fields(AsyncEngineArgs)
}
for key, value in all_engine_kwargs.items():
if key in frontend_field_names:
frontend_kwargs[key] = value
elif key in async_engine_field_names:
engine_kwargs[key] = value
else:
raise ValueError(f"Unknown engine argument: {key}")
# placement_group_config is already validated and stored as dict in LLMConfig
placement_group_config = llm_config.placement_group_config
return VLLMEngineConfig(
model_id=llm_config.model_id,
hf_model_id=hf_model_id,
mirror_config=mirror_config,
accelerator_type=llm_config.accelerator_type,
engine_kwargs=engine_kwargs,
frontend_kwargs=frontend_kwargs,
runtime_env=llm_config.runtime_env,
placement_group_config=placement_group_config,
)
def ray_accelerator_type(self) -> str:
"""Converts the accelerator type to the Ray Core format."""
return f"accelerator_type:{self.accelerator_type}"
@property
def tensor_parallel_degree(self) -> int:
return self.engine_kwargs.get("tensor_parallel_size", 1)
@property
def pipeline_parallel_degree(self) -> int:
return self.engine_kwargs.get("pipeline_parallel_size", 1)
@property
def num_devices(self) -> int:
return self.tensor_parallel_degree * self.pipeline_parallel_degree
@property
def placement_strategy(self) -> str:
# Use custom strategy if placement_group_config is provided
if self.placement_group_config:
return self.placement_group_config.get("strategy", "PACK")
# Default to PACK (cross-node best-effort placement)
# DP deployments overridden to STRICT_PACK in Serve config
return "PACK"
@property
def placement_bundles(self) -> List[Dict[str, float]]:
if self.placement_group_config:
# placement_group_config is validated dict; extract bundles
bundles = []
for bundle_dict in self.placement_group_config["bundles"]:
bundle = bundle_dict.copy()
if self.accelerator_type:
# Use setdefault to add accelerator hint WITHOUT overriding explicit user values
bundle.setdefault(self.ray_accelerator_type(), 0.001)
bundles.append(bundle)
return bundles
# Default bundles: GPU-only; replica actor contributes CPU to first bundle via merge
bundle = {"GPU": 1}
if self.accelerator_type:
bundle[self.ray_accelerator_type()] = 0.001
bundles = [copy.deepcopy(bundle) for _ in range(self.num_devices)]
return bundles
@property
def use_gpu(self) -> bool:
"""Returns True if vLLM is configured to use GPU resources."""
# Check placement_group_config bundles for explicit GPU specification
if self.placement_group_config:
bundles = self.placement_group_config.get("bundles", [])
if bundles:
# If any bundle has GPU > 0, we use GPU
return any(bundle.get("GPU", 0) > 0 for bundle in bundles)
# Default behavior based on accelerator_type
if not self.accelerator_type:
# By default, GPU resources are used
return True
return self.accelerator_type in (
GPUType.NVIDIA_TESLA_V100.value,
GPUType.NVIDIA_TESLA_P100.value,
GPUType.NVIDIA_TESLA_T4.value,
GPUType.NVIDIA_TESLA_P4.value,
GPUType.NVIDIA_TESLA_K80.value,
GPUType.NVIDIA_TESLA_A10G.value,
GPUType.NVIDIA_L4.value,
GPUType.NVIDIA_L40S.value,
GPUType.NVIDIA_A100.value,
GPUType.NVIDIA_H100.value,
GPUType.NVIDIA_H200.value,
GPUType.NVIDIA_H20.value,
GPUType.NVIDIA_A100_40G.value,
GPUType.NVIDIA_A100_80G.value,
)
def get_or_create_pg(self) -> PlacementGroup:
"""Gets or a creates a placement group.
If we are already in a placement group, return the existing placement group.
Else, create a new placement group based on the scaling config.
"""
dp_rank = self.engine_kwargs.get("data_parallel_rank", None)
pg = get_current_placement_group()
if pg:
logger.debug(
"Using existing placement group %s, details: %s",
pg.id,
placement_group_table(pg),
)
else:
if not ALLOW_NEW_PLACEMENT_GROUPS_IN_DEPLOYMENT:
raise RuntimeError(
"Creating new placement groups is not allowed. "
"Change RAYLLM_ALLOW_NEW_PLACEMENT_GROUPS_IN_DEPLOYMENT "
"if this is not intended."
)
name = "" if dp_rank is None else f"dp_{dp_rank}"
# Use placement_bundles and placement_strategy properties which handle
# both custom and default placement group configurations
pg = placement_group(
bundles=self.placement_bundles,
strategy=self.placement_strategy,
name=name,
)
logger.info(f"Using new placement group {pg}. {placement_group_table(pg)}")
return pg
| VLLMEngineConfig |
python | getsentry__sentry | src/sentry/data_export/processors/issues_by_tag.py | {
"start": 327,
"end": 431
} | class ____(NamedTuple):
value: GroupTagValue
eventuser: EventUser | None
| GroupTagValueAndEventUser |
python | kubernetes-client__python | kubernetes/client/api/scheduling_v1_api.py | {
"start": 543,
"end": 95824
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_priority_class(self, body, **kwargs): # noqa: E501
"""create_priority_class # noqa: E501
create a PriorityClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_priority_class(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1PriorityClass body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1PriorityClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_priority_class_with_http_info(body, **kwargs) # noqa: E501
def create_priority_class_with_http_info(self, body, **kwargs): # noqa: E501
"""create_priority_class # noqa: E501
create a PriorityClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_priority_class_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1PriorityClass body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1PriorityClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_priority_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_priority_class`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/scheduling.k8s.io/v1/priorityclasses', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1PriorityClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_priority_class(self, **kwargs): # noqa: E501
"""delete_collection_priority_class # noqa: E501
delete collection of PriorityClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_priority_class(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_priority_class_with_http_info(**kwargs) # noqa: E501
def delete_collection_priority_class_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_priority_class # noqa: E501
delete collection of PriorityClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_priority_class_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_priority_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/scheduling.k8s.io/v1/priorityclasses', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_priority_class(self, name, **kwargs): # noqa: E501
"""delete_priority_class # noqa: E501
delete a PriorityClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_priority_class(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the PriorityClass (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_priority_class_with_http_info(name, **kwargs) # noqa: E501
def delete_priority_class_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_priority_class # noqa: E501
delete a PriorityClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_priority_class_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the PriorityClass (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'ignore_store_read_error_with_cluster_breaking_potential',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_priority_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_priority_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501
query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/scheduling.k8s.io/v1/priorityclasses/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/scheduling.k8s.io/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_priority_class(self, **kwargs): # noqa: E501
"""list_priority_class # noqa: E501
list or watch objects of kind PriorityClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_priority_class(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1PriorityClassList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_priority_class_with_http_info(**kwargs) # noqa: E501
def list_priority_class_with_http_info(self, **kwargs): # noqa: E501
"""list_priority_class # noqa: E501
list or watch objects of kind PriorityClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_priority_class_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1PriorityClassList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'send_initial_events',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_priority_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501
query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/scheduling.k8s.io/v1/priorityclasses', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1PriorityClassList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_priority_class(self, name, body, **kwargs): # noqa: E501
"""patch_priority_class # noqa: E501
partially update the specified PriorityClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_priority_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the PriorityClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1PriorityClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_priority_class_with_http_info(name, body, **kwargs) # noqa: E501
def patch_priority_class_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_priority_class # noqa: E501
partially update the specified PriorityClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_priority_class_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the PriorityClass (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1PriorityClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_priority_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_priority_class`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_priority_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/scheduling.k8s.io/v1/priorityclasses/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1PriorityClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_priority_class(self, name, **kwargs): # noqa: E501
"""read_priority_class # noqa: E501
read the specified PriorityClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_priority_class(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the PriorityClass (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1PriorityClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_priority_class_with_http_info(name, **kwargs) # noqa: E501
def read_priority_class_with_http_info(self, name, **kwargs): # noqa: E501
"""read_priority_class # noqa: E501
read the specified PriorityClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_priority_class_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the PriorityClass (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1PriorityClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_priority_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_priority_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/scheduling.k8s.io/v1/priorityclasses/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1PriorityClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_priority_class(self, name, body, **kwargs): # noqa: E501
"""replace_priority_class # noqa: E501
replace the specified PriorityClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_priority_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the PriorityClass (required)
:param V1PriorityClass body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1PriorityClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_priority_class_with_http_info(name, body, **kwargs) # noqa: E501
def replace_priority_class_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_priority_class # noqa: E501
replace the specified PriorityClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_priority_class_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the PriorityClass (required)
:param V1PriorityClass body: (required)
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1PriorityClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'field_validation'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_priority_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_priority_class`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_priority_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/scheduling.k8s.io/v1/priorityclasses/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1PriorityClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| SchedulingV1Api |
python | pytorch__pytorch | torch/cuda/memory.py | {
"start": 1658,
"end": 48588
} | class ____(TypedDict):
"""Memory snapshot structure."""
segments: list[_Segment]
device_traces: NotRequired[list[list[_TraceEntry]]]
__all__ = [
"caching_allocator_alloc",
"caching_allocator_delete",
"caching_allocator_enable",
"get_per_process_memory_fraction",
"set_per_process_memory_fraction",
"empty_cache",
"memory_stats",
"memory_stats_as_nested_dict",
"reset_accumulated_memory_stats",
"reset_peak_memory_stats",
"reset_max_memory_allocated",
"reset_max_memory_cached",
"host_memory_stats",
"host_memory_stats_as_nested_dict",
"reset_accumulated_host_memory_stats",
"reset_peak_host_memory_stats",
"memory_allocated",
"max_memory_allocated",
"memory_reserved",
"max_memory_reserved",
"memory_cached",
"max_memory_cached",
"memory_snapshot",
"memory_summary",
"list_gpu_processes",
"mem_get_info",
"get_allocator_backend",
"CUDAPluggableAllocator",
"change_current_allocator",
"MemPool",
"use_mem_pool",
]
if not hasattr(torch._C, "_cuda_CUDAAllocator"):
# Define dummy base classes
torch._C.__dict__["_cuda_CUDAAllocator"] = _dummy_type("_cuda_CUDAAllocator")
if not hasattr(torch._C, "_MemPool"):
# Define dummy base classes
torch._C.__dict__["_MemPool"] = _dummy_type("_MemPool")
torch._C.__dict__["_cuda_beginAllocateToPool"] = _dummy_type(
"_cuda_beginAllocateToPool"
)
torch._C.__dict__["_cuda_beginAllocateCurrentThreadToPool"] = _dummy_type(
"_cuda_beginAllocateCurrentThreadToPool"
)
torch._C.__dict__["_cuda_endAllocateToPool"] = _dummy_type(
"_cuda_endAllocateToPool"
)
torch._C.__dict__["_cuda_releasePool"] = _dummy_type("_cuda_releasePool")
from torch._C import ( # noqa: F401
_cuda_beginAllocateCurrentThreadToPool,
_cuda_beginAllocateToPool,
_cuda_CUDAAllocator,
_cuda_endAllocateToPool,
_cuda_releasePool,
_MemPool,
)
def _host_allocator():
_lazy_init()
return torch._C._cuda_cudaHostAllocator()
@contextlib.contextmanager
def _free_mutex():
torch._C._cuda_lock_mutex()
try:
yield
finally:
torch._C._cuda_unlock_mutex()
def caching_allocator_alloc(size, device: "Device" = None, stream=None):
r"""Perform a memory allocation using the CUDA memory allocator.
Memory is allocated for a given device and a stream, this
function is intended to be used for interoperability with other
frameworks. Allocated memory is released through
:func:`~torch.cuda.caching_allocator_delete`.
Args:
size (int): number of bytes to be allocated.
device (torch.device or int, optional): selected device. If it is
``None`` the default CUDA device is used.
stream (torch.cuda.Stream or int, optional): selected stream. If is ``None`` then
the default stream for the selected device is used.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
if device is None:
device = torch.cuda.current_device()
device = _get_device_index(device)
if stream is None:
stream = torch.cuda.current_stream(device)
if isinstance(stream, torch.cuda.streams.Stream):
stream = stream.cuda_stream
if not isinstance(stream, int):
raise TypeError(
"Invalid type for stream argument, must be "
"`torch.cuda.Stream` or `int` representing a pointer "
"to a existing stream"
)
with torch.cuda.device(device):
return torch._C._cuda_cudaCachingAllocator_raw_alloc(size, stream)
def caching_allocator_delete(mem_ptr):
r"""Delete memory allocated using the CUDA memory allocator.
Memory allocated with :func:`~torch.cuda.caching_allocator_alloc`.
is freed here. The associated device and stream are tracked inside
the allocator.
Args:
mem_ptr (int): memory address to be freed by the allocator.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
torch._C._cuda_cudaCachingAllocator_raw_delete(mem_ptr)
def caching_allocator_enable(value: bool = True) -> None:
r"""Enable or disable the CUDA memory allocator. On by default."""
if is_initialized():
torch._C._cuda_cudaCachingAllocator_enable(value)
def set_per_process_memory_fraction(fraction, device: "Device" = None) -> None:
r"""Set memory fraction for a process.
The fraction is used to limit an caching allocator to allocated memory on a CUDA device.
The allowed value equals the total visible memory multiplied fraction.
If trying to allocate more than the allowed value in a process, will raise an out of
memory error in allocator.
Args:
fraction(float): Range: 0~1. Allowed memory equals total_memory * fraction.
device (torch.device or int, optional): selected device. If it is
``None`` the default CUDA device is used.
.. note::
In general, the total available free memory is less than the total capacity.
"""
_lazy_init()
if device is None:
device = torch.cuda.current_device()
device = _get_device_index(device)
if not isinstance(fraction, float):
raise TypeError("Invalid type for fraction argument, must be `float`")
if fraction < 0 or fraction > 1:
raise ValueError(f"Invalid fraction value: {fraction}. Allowed range: 0~1")
torch._C._cuda_setMemoryFraction(fraction, device)
def get_per_process_memory_fraction(device: "Device" = None) -> float:
r"""Get memory fraction for a process.
Args:
device (torch.device or int, optional): selected device. If it is
``None`` the default CUDA device is used.
Returns:
memory fraction, in range 0~1. Allowed memory equals total_memory * fraction.
"""
_lazy_init()
if device is None:
device = torch.cuda.current_device()
device = _get_device_index(device)
return torch._C._cuda_getMemoryFraction(device)
def empty_cache() -> None:
r"""Release all unoccupied cached memory currently held by the caching
allocator so that those can be used in other GPU application and visible in
`nvidia-smi`.
.. note::
:func:`~torch.cuda.empty_cache` doesn't increase the amount of GPU
memory available for PyTorch. However, it may help reduce fragmentation
of GPU memory in certain cases. See :ref:`cuda-memory-management` for
more details about GPU memory management.
"""
if is_initialized():
torch._C._cuda_emptyCache()
def memory_stats(device: "Device" = None) -> dict[str, Any]:
r"""Return a dictionary of CUDA memory allocator statistics for a given device.
The return value of this function is a dictionary of statistics, each of
which is a non-negative integer.
Core statistics:
- ``"allocated.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of allocation requests received by the memory allocator.
- ``"allocated_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of allocated memory.
- ``"segment.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of reserved segments from ``cudaMalloc()``.
- ``"reserved_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of reserved memory.
- ``"active.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of active memory blocks.
- ``"active_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of active memory.
- ``"inactive_split.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of inactive, non-releasable memory blocks.
- ``"inactive_split_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of inactive, non-releasable memory.
For these core statistics, values are broken down as follows.
Pool type:
- ``all``: combined statistics across all memory pools.
- ``large_pool``: statistics for the large allocation pool
(as of June 2025, for size >= 1MB allocations).
- ``small_pool``: statistics for the small allocation pool
(as of June 2025, for size < 1MB allocations).
Metric type:
- ``current``: current value of this metric.
- ``peak``: maximum value of this metric.
- ``allocated``: historical total increase in this metric.
- ``freed``: historical total decrease in this metric.
In addition to the core statistics, we also provide some simple event
counters:
- ``"num_alloc_retries"``: number of failed ``cudaMalloc`` calls that
result in a cache flush and retry.
- ``"num_ooms"``: number of out-of-memory errors thrown.
- ``"num_sync_all_streams"``: number of ``synchronize_and_free_events`` calls.
- ``"num_device_alloc"``: number of CUDA allocation calls. This includes both
cuMemMap and cudaMalloc.
- ``"num_device_free"``: number of CUDA free calls. This includes both cuMemUnmap
and cudaFree.
The caching allocator can be configured via ENV to not split blocks larger than a
defined size (see Memory Management section of the Cuda Semantics documentation).
This helps avoid memory fragmentation but may have a performance
penalty. Additional outputs to assist with tuning and evaluating impact:
- ``"max_split_size"``: blocks above this size will not be split.
- ``"oversize_allocations.{current,peak,allocated,freed}"``:
number of over-size allocation requests received by the memory allocator.
- ``"oversize_segments.{current,peak,allocated,freed}"``:
number of over-size reserved segments from ``cudaMalloc()``.
The caching allocator can be configured via ENV to round memory allocations in order
to reduce fragmentation. Sometimes the overhead from rounding can be higher than
the fragmentation it helps reduce. The following stat can be used to check if
rounding adds too much overhead:
- ``"requested_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
memory requested by client code, compare this with allocated_bytes to check if
allocation rounding adds too much overhead.
Args:
device (torch.device or int, optional): selected device. Returns
statistics for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
.. note::
With :ref:`backend:cudaMallocAsync<cuda-memory-envvars>`, some stats are not
meaningful, and are always reported as zero.
"""
result = []
def _recurse_add_to_result(prefix, obj):
if isinstance(obj, dict):
if len(prefix) > 0:
prefix += "."
for k, v in obj.items():
_recurse_add_to_result(prefix + k, v)
else:
result.append((prefix, obj))
stats = memory_stats_as_nested_dict(device=device)
_recurse_add_to_result("", stats)
result.sort()
return collections.OrderedDict(result)
def memory_stats_as_nested_dict(device: "Device" = None) -> dict[str, Any]:
r"""Return the result of :func:`~torch.cuda.memory_stats` as a nested dictionary."""
if not is_initialized():
return {}
device = _get_device_index(device, optional=True)
return torch._C._cuda_memoryStats(device)
def reset_accumulated_memory_stats(device: "Device" = None) -> None:
r"""Reset the "accumulated" (historical) stats tracked by the CUDA memory allocator.
See :func:`~torch.cuda.memory_stats` for details. Accumulated stats correspond to
the `"allocated"` and `"freed"` keys in each individual stat dict, as well as
`"num_alloc_retries"` and `"num_ooms"`.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
device = _get_device_index(device, optional=True)
return torch._C._cuda_resetAccumulatedMemoryStats(device)
def reset_peak_memory_stats(device: "Device" = None) -> None:
r"""Reset the "peak" stats tracked by the CUDA memory allocator.
See :func:`~torch.cuda.memory_stats` for details. Peak stats correspond to the
`"peak"` key in each individual stat dict.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
device = _get_device_index(device, optional=True)
return torch._C._cuda_resetPeakMemoryStats(device)
def host_memory_stats() -> dict[str, Any]:
r"""Return a dictionary of CUDA memory allocator statistics for a given device.
The return value of this function is a dictionary of statistics, each of
which is a non-negative integer.
Core statistics:
- ``"allocated.{current,peak,allocated,freed}"``:
number of allocation requests received by the memory allocator.
- ``"allocated_bytes.{current,peak,allocated,freed}"``:
amount of allocated memory.
- ``"segment.{current,peak,allocated,freed}"``:
number of reserved segments from ``cudaMalloc()``.
- ``"reserved_bytes.{current,peak,allocated,freed}"``:
amount of reserved memory.
For these core statistics, values are broken down as follows.
Metric type:
- ``current``: current value of this metric.
- ``peak``: maximum value of this metric.
- ``allocated``: historical total increase in this metric.
- ``freed``: historical total decrease in this metric.
In addition to the core statistics, we also provide some simple event
counters:
- ``"num_host_alloc"``: number of CUDA allocation calls. This includes both
cudaHostAlloc and cudaHostRegister.
- ``"num_host_free"``: number of CUDA free calls. This includes both cudaHostFree
and cudaHostUnregister.
Finally, we also provide some simple timing counters:
- ``"host_alloc_time.{total,max,min,count,avg}"``:
timing of allocation requests going through CUDA calls.
- ``"host_free_time.{total,max,min,count,avg}"``:
timing of free requests going through CUDA calls.
For these timing statistics, values are broken down as follows.
Metric type:
- ``total``: total time spent.
- ``max``: maximum value per call.
- ``min``: minimum value per call.
- ``count``: number of times it was called.
- ``avg``: average time per call.
"""
result = []
def _recurse_add_to_result(prefix, obj):
if isinstance(obj, dict):
if len(prefix) > 0:
prefix += "."
for k, v in obj.items():
_recurse_add_to_result(prefix + k, v)
else:
result.append((prefix, obj))
stats = host_memory_stats_as_nested_dict()
_recurse_add_to_result("", stats)
result.sort()
return collections.OrderedDict(result)
def host_memory_stats_as_nested_dict() -> dict[str, Any]:
r"""Return the result of :func:`~torch.cuda.host_memory_stats` as a nested dictionary."""
if not is_initialized():
return {}
return torch._C._cuda_hostMemoryStats()
def reset_accumulated_host_memory_stats() -> None:
r"""Reset the "accumulated" (historical) stats tracked by the host memory allocator.
See :func:`~torch.cuda.host_memory_stats` for details. Accumulated stats correspond to
the `"allocated"` and `"freed"` keys in each individual stat dict.
"""
return torch._C._cuda_resetAccumulatedHostMemoryStats()
def reset_peak_host_memory_stats() -> None:
r"""Reset the "peak" stats tracked by the host memory allocator.
See :func:`~torch.cuda.host_memory_stats` for details. Peak stats correspond to the
`"peak"` key in each individual stat dict.
"""
return torch._C._cuda_resetPeakHostMemoryStats()
def reset_max_memory_allocated(device: "Device" = None) -> None:
r"""Reset the starting point in tracking maximum GPU memory occupied by tensors for a given device.
See :func:`~torch.cuda.max_memory_allocated` for details.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. warning::
This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets
/all/ peak memory stats.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
warnings.warn(
"torch.cuda.reset_max_memory_allocated now calls torch.cuda.reset_peak_memory_stats, "
"which resets /all/ peak memory stats.",
FutureWarning,
stacklevel=2,
)
return reset_peak_memory_stats(device=device)
def reset_max_memory_cached(device: "Device" = None) -> None:
r"""Reset the starting point in tracking maximum GPU memory managed by the caching allocator for a given device.
See :func:`~torch.cuda.max_memory_cached` for details.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. warning::
This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets
/all/ peak memory stats.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
warnings.warn(
"torch.cuda.reset_max_memory_cached now calls torch.cuda.reset_peak_memory_stats, "
"which resets /all/ peak memory stats.",
FutureWarning,
stacklevel=2,
)
return reset_peak_memory_stats(device=device)
def memory_allocated(device: "Device" = None) -> int:
r"""Return the current GPU memory occupied by tensors in bytes for a given device.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
This is likely less than the amount shown in `nvidia-smi` since some
unused memory can be held by the caching allocator and some context
needs to be created on GPU. See :ref:`cuda-memory-management` for more
details about GPU memory management.
"""
return memory_stats(device=device).get("allocated_bytes.all.current", 0)
def max_memory_allocated(device: "Device" = None) -> int:
r"""Return the maximum GPU memory occupied by tensors in bytes for a given device.
By default, this returns the peak allocated memory since the beginning of
this program. :func:`~torch.cuda.reset_peak_memory_stats` can be used to
reset the starting point in tracking this metric. For example, these two
functions can measure the peak allocated memory usage of each iteration in a
training loop.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
return memory_stats(device=device).get("allocated_bytes.all.peak", 0)
def memory_reserved(device: "Device" = None) -> int:
r"""Return the current GPU memory managed by the caching allocator in bytes for a given device.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
return memory_stats(device=device).get("reserved_bytes.all.current", 0)
def max_memory_reserved(device: "Device" = None) -> int:
r"""Return the maximum GPU memory managed by the caching allocator in bytes for a given device.
By default, this returns the peak cached memory since the beginning of this
program. :func:`~torch.cuda.reset_peak_memory_stats` can be used to reset
the starting point in tracking this metric. For example, these two functions
can measure the peak cached memory amount of each iteration in a training
loop.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
return memory_stats(device=device).get("reserved_bytes.all.peak", 0)
@deprecated(
"`torch.cuda.memory_cached` has been renamed to `torch.cuda.memory_reserved`",
category=FutureWarning,
)
def memory_cached(device: "Device" = None) -> int:
r"""Deprecated; see :func:`~torch.cuda.memory_reserved`."""
return memory_reserved(device=device)
@deprecated(
"`torch.cuda.max_memory_cached` has been renamed to `torch.cuda.max_memory_reserved`",
category=FutureWarning,
)
def max_memory_cached(device: "Device" = None) -> int:
r"""Deprecated; see :func:`~torch.cuda.max_memory_reserved`."""
return max_memory_reserved(device=device)
def memory_snapshot(mempool_id=None):
r"""Return a snapshot of the CUDA memory allocator state across all devices.
Interpreting the output of this function requires familiarity with the
memory allocator internals.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
return torch._C._cuda_memorySnapshot(mempool_id)["segments"]
def memory_summary(device: "Device" = None, abbreviated: bool = False) -> str:
r"""Return a human-readable printout of the current memory allocator statistics for a given device.
This can be useful to display periodically during training, or when
handling out-of-memory exceptions.
Args:
device (torch.device or int, optional): selected device. Returns
printout for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
abbreviated (bool, optional): whether to return an abbreviated summary
(default: False).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
device = _get_device_index(device, optional=True)
stats = memory_stats(device=device)
def _format_size(sz, pref_sz):
prefixes = ["B ", "KiB", "MiB", "GiB", "TiB", "PiB"]
prefix = prefixes[0]
for new_prefix in prefixes[1:]:
if pref_sz < 768 * 1024:
break
prefix = new_prefix
sz //= 1024
pref_sz /= 1024
return f"{sz:6d} {prefix}"
def _format_count(cnt, pref_cnt):
prefixes = [" ", "K", "M"]
prefix = prefixes[0]
for new_prefix in prefixes[1:]:
if pref_cnt < 750 * 1000:
break
prefix = new_prefix
cnt //= 1000
pref_cnt /= 1000
return f"{cnt:7d} {prefix} "
metrics_to_display = [
("allocated_bytes", "Allocated memory", _format_size),
("active_bytes", "Active memory", _format_size),
("requested_bytes", "Requested memory", _format_size),
("reserved_bytes", "GPU reserved memory", _format_size),
("inactive_split_bytes", "Non-releasable memory", _format_size),
("allocation", "Allocations", _format_count),
("active", "Active allocs", _format_count),
("segment", "GPU reserved segments", _format_count),
("inactive_split", "Non-releasable allocs", _format_count),
]
lines = []
lines.append("=" * 75)
lines.append(" {_:16} PyTorch CUDA memory summary, device ID {device:<17d} ")
lines.append("-" * 75)
lines.append(
" {_:9} CUDA OOMs: {num_ooms:<12d} | {_:6} cudaMalloc retries: {num_alloc_retries:<8d} "
)
lines.append("=" * 75)
lines.append(
" Metric | Cur Usage | Peak Usage | Tot Alloc | Tot Freed "
)
for metric_key, metric_name, formatter in metrics_to_display:
lines.append("-" * 75)
submetrics = [("all", metric_name)]
if not abbreviated:
submetrics.append(("large_pool", " from large pool"))
submetrics.append(("small_pool", " from small pool"))
current_prefval, peak_prefval, allocated_prefval, freed_prefval = (
None,
None,
None,
None,
)
for submetric_key, submetric_name in submetrics:
prefix = metric_key + "." + submetric_key + "."
current = stats[prefix + "current"]
peak = stats[prefix + "peak"]
allocated = stats[prefix + "allocated"]
freed = stats[prefix + "freed"]
if current_prefval is None:
current_prefval = current
peak_prefval = peak
allocated_prefval = allocated
freed_prefval = freed
lines.append(
f" {submetric_name:<21} | {formatter(current, current_prefval)} | {formatter(peak, peak_prefval)} | "
f"{formatter(allocated, allocated_prefval)} | {formatter(freed, freed_prefval)} ",
)
metrics_to_display = [
("oversize_allocations", "Oversize allocations", _format_count),
("oversize_segments", "Oversize GPU segments", _format_count),
]
for metric_key, metric_name, formatter in metrics_to_display:
lines.append("-" * 75)
prefix = metric_key + "."
current = stats[prefix + "current"]
peak = stats[prefix + "peak"]
allocated = stats[prefix + "allocated"]
freed = stats[prefix + "freed"]
lines.append(
f" {metric_name:<21} | {formatter(current, current)} | {formatter(peak, peak)} | "
f"{formatter(allocated, allocated)} | {formatter(freed, freed)} ",
)
lines.append("=" * 75)
fmt_dict = {"_": "", "device": device}
for k, v in stats.items():
fmt_dict[k.replace(".", "-")] = v
return "|" + "|\n|".join(lines).format(**fmt_dict) + "|\n"
def list_gpu_processes(device: "Device" = None) -> str:
r"""Return a human-readable printout of the running processes and their GPU memory use for a given device.
This can be useful to display periodically during training, or when
handling out-of-memory exceptions.
Args:
device (torch.device or int, optional): selected device. Returns
printout for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
"""
if not torch.version.hip:
try:
import pynvml # type: ignore[import]
except ModuleNotFoundError:
return "pynvml module not found, please install nvidia-ml-py"
# pyrefly: ignore [import-error,missing-module-attribute]
from pynvml import NVMLError_DriverNotLoaded
try:
pynvml.nvmlInit()
except NVMLError_DriverNotLoaded:
return "cuda driver can't be loaded, is cuda enabled?"
device = _get_nvml_device_index(device)
handle = pynvml.nvmlDeviceGetHandleByIndex(device)
procs = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
else:
try:
import amdsmi # type: ignore[import]
except ModuleNotFoundError:
return "amdsmi module not found, please install amdsmi"
try:
amdsmi.amdsmi_init() # type: ignore[attr-defined]
except amdsmi.AmdSmiException: # type: ignore[attr-defined]
return "amdsmi driver can't be loaded, is ROCm installed?"
device = _get_amdsmi_device_index(device)
try:
handle = amdsmi.amdsmi_get_processor_handles()[device] # type: ignore[attr-defined]
procs = amdsmi.amdsmi_get_gpu_process_list(handle) # type: ignore[attr-defined]
except amdsmi.AmdSmiException: # type: ignore[attr-defined]
return "amdsmi cannot list processes from other users"
lines = []
lines.append(f"GPU:{device}")
if len(procs) == 0:
lines.append("no processes are running")
for p in procs:
if not torch.version.hip:
mem = p.usedGpuMemory / (1024 * 1024)
pid = p.pid
else:
try:
proc_info = amdsmi.amdsmi_get_gpu_process_info(handle, p) # type: ignore[possibly-undefined]
except AttributeError:
# https://github.com/ROCm/amdsmi/commit/c551c3caedbd903ba828e7fdffa5b56d475a15e7
# is a BC-breaking change that removes amdsmi_get_gpu_process_info API from amdsmi
proc_info = p
mem = proc_info["memory_usage"]["vram_mem"] / (1024 * 1024)
pid = proc_info["pid"]
lines.append(f"process {pid:>10d} uses {mem:>12.3f} MB GPU memory")
return "\n".join(lines)
def mem_get_info(device: "Device" = None) -> tuple[int, int]:
r"""Return the global free and total GPU memory for a given device using cudaMemGetInfo.
Args:
device (torch.device or int or str, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default) or if the device index is not specified.
.. note::
See :ref:`cuda-memory-management` for more
details about GPU memory management.
"""
if device is None:
device = torch.cuda.current_device()
# optional=True allows `device = torch.device('cuda')` for which device.index is None
device = _get_device_index(device, optional=True)
return torch.cuda.cudart().cudaMemGetInfo(device)
def _record_memory_history_legacy(
enabled: bool,
record_context=True,
trace_alloc_max_entries=1,
trace_alloc_record_context=False,
device: "Device" = None,
record_context_cpp=False,
clear_history=False,
compile_context=False,
global_record_annotations=False,
):
_C._cuda_record_memory_history_legacy( # type: ignore[call-arg]
enabled,
record_context,
# pyrefly: ignore [bad-argument-type]
trace_alloc_max_entries,
trace_alloc_record_context,
record_context_cpp,
clear_history,
compile_context,
global_record_annotations,
)
def _record_memory_history(
enabled: Optional[Literal["state", "all"]] = "all", *args, **kwargs
) -> None:
"""Enable recording of stack traces associated with memory
allocations, so you can tell what allocated any piece of memory in
:func:`torch.cuda.memory._snapshot()`.
In addition to keeping stack traces with each current allocation and free,
this will also enable recording of a history of all alloc/free events.
Use :func:`torch.cuda.memory._snapshot()` to retrieve this information,
and the tools in `_memory_viz.py` to visualize snapshots.
Buffer behavior
---------------
This will store up to `max_entries` instances of `TraceEntry` when enabled.
Python trace collection defaults to `sys.maxsize`, meaning long-running
or indefinitely running jobs should set a reasonable limit to avoid excessive
memory use. Expect each entry to be several KB.
Longer running workflows or those with smaller `max_entries` values will only
store the last accumulated `max_entries` entries, meaning new entries overwrite
older entries.
C++ implementation for reference to ring buffer implementation:
.. code-block:: cpp
if (record_history) {
if (alloc_trace->size() < alloc_trace_max_entries_) {
alloc_trace->emplace_back(te);
} else {
(*alloc_trace)[alloc_trace_next++] = te;
if (alloc_trace_next == alloc_trace_max_entries_) {
alloc_trace_next = 0;
}
}
}
Latency impact
--------------
The Python trace collection is fast (2us per trace), so you may consider
enabling this on production jobs if you anticipate ever having to debug
memory issues.
C++ trace collection is also fast (~50ns/frame), which for many typical programs
works out to ~2us per trace, but can vary depending on stack depth.
Args:
enabled (Literal[None, "state", "all"], optional):
`None`, disable recording memory history.
`"state"`, keep information for currently allocated memory.
`"all"`, additionally keep a history of all alloc/free calls.
Defaults to "all".
context (Literal[None, "state", "alloc", "all"], optional):
`None`, Do not record any tracebacks.
`"state"`, Record tracebacks for currently allocated memory.
`"alloc"`, additionally keep tracebacks for alloc calls.
`"all"`, additionally keep tracebacks for free calls.
Defaults to "all".
stacks (Literal["python", "all"], optional):
`"python"`, include Python, TorchScript, and inductor frames in tracebacks
`"all"`, additionally include C++ frames
Defaults to "all".
max_entries (int, optional): Keep a maximum of `max_entries`
alloc/free events in the recorded history recorded.
"""
if isinstance(enabled, bool):
return _record_memory_history_legacy(enabled, *args, **kwargs)
else:
return _record_memory_history_impl(enabled, *args, **kwargs)
def _record_memory_history_impl(
enabled: Optional[str] = "all",
context: Optional[str] = "all",
stacks: str = "all",
max_entries: int = sys.maxsize,
device: "Device" = None,
clear_history: bool = False,
compile_context: bool = False,
global_record_annotations: bool = False,
):
_C._cuda_record_memory_history( # type: ignore[call-arg]
enabled,
context,
stacks,
max_entries,
clear_history,
compile_context,
global_record_annotations,
)
_record_memory_history.__signature__ = signature(_record_memory_history_impl) # type: ignore[attr-defined]
def _augment_frames(frames: list[_Frame]) -> int:
"""
Augment a list of frames with FX debug information.
Args:
frames: List of frame dictionaries to augment
Returns:
The count of frames that were augmented.
"""
from torch.fx.graph_module import FX_GRAPH_MODULE_FILE_PREFIX
# Regex pattern to match FX generated files
_FX_GENERATED_PATTERN = re.compile(
rf"{re.escape(FX_GRAPH_MODULE_FILE_PREFIX)}.*\.py$"
)
count = 0
if not frames:
return count
for frame in frames:
if "filename" in frame and "line" in frame:
filename = frame["filename"]
lineno = frame["line"]
# Check if this looks like an FX generated file
if not _FX_GENERATED_PATTERN.search(os.path.basename(filename)):
continue
# Look up metadata from the global registry
from torch.fx.traceback import _FX_METADATA_REGISTRY
metadata = _FX_METADATA_REGISTRY.get(filename)
if metadata is None:
continue
lineno_map = metadata.get("lineno_map", {})
node_metadata = metadata.get("node_metadata", {})
prologue_start = metadata.get("prologue_start", 0)
# Get the node index for this line
node_idx = lineno_map.get(lineno - prologue_start)
if node_idx is not None and node_idx in node_metadata:
node_info = node_metadata[node_idx]
original_trace = node_info.get("stack_trace")
node_op = node_info.get("op")
node_name = node_info.get("name")
node_target = node_info.get("target")
# Always add node metadata
frame["fx_node_op"] = node_op
frame["fx_node_name"] = node_name
frame["fx_node_target"] = str(node_target)
# Add original trace if available
if original_trace:
frame["fx_original_trace"] = original_trace
count += 1
return count
def _augment_memory_snapshot_stack_traces(
snapshot: str | _Snapshot,
) -> _Snapshot:
"""
Augment a memory snapshot with original source stack traces from FX metadata.
IMPORTANT: This function reads from a global in-memory registry (_FX_METADATA_REGISTRY)
that is populated during graph module compilation. It must be called in the same
Python process where the FX graphs were compiled. It cannot be used to augment
snapshots loaded from disk in a different process.
Args:
snapshot: Either a memory snapshot dict or path to a snapshot pickle file
Returns:
The augmented snapshot dictionary with fx_node_op, fx_node_name,
fx_original_trace, and fx_node_info fields added to frames
"""
snapshot_dict: _Snapshot
if isinstance(snapshot, str):
# Load the memory snapshot
with open(snapshot, "rb") as f:
snapshot_dict = cast(_Snapshot, pickle.load(f))
else:
snapshot_dict = snapshot
# Process stack traces in the snapshot
augmented_count = 0
# Process blocks in segments (for regular allocations)
if "segments" in snapshot_dict:
for segment in snapshot_dict["segments"]:
if "blocks" in segment:
for block in segment["blocks"]:
if "frames" in block:
augmented_count += _augment_frames(block["frames"])
# Process device traces (for memory history)
if "device_traces" in snapshot_dict:
for trace_list in snapshot_dict["device_traces"]:
for trace_entry in trace_list:
if isinstance(trace_entry, dict) and "frames" in trace_entry:
augmented_count += _augment_frames(trace_entry["frames"])
return snapshot_dict
def _snapshot(device: "Device" = None, augment_with_fx_traces=False):
"""Save a snapshot of CUDA memory state at the time it was called.
The state is represented as a dictionary with the following structure.
.. code-block:: python
class Snapshot(TypedDict):
segments: List[Segment]
device_traces: List[List[TraceEntry]]
class Segment(TypedDict):
# Segments are memory returned from a cudaMalloc call.
# The size of reserved memory is the sum of all Segments.
# Segments are cached and reused for future allocations.
# If the reuse is smaller than the segment, the segment
# is split into more then one Block.
# empty_cache() frees Segments that are entirely inactive.
address: int
total_size: int # cudaMalloc'd size of segment
stream: int
segment_type: Literal["small", "large"] # 'large' (>1MB)
allocated_size: int # size of memory in use
active_size: int # size of memory in use or in active_awaiting_free state
blocks: List[Block]
class Block(TypedDict):
# A piece of memory returned from the allocator, or
# current cached but inactive.
size: int
requested_size: int # size requested during malloc, may be smaller than
# size due to rounding
address: int
state: Literal[
"active_allocated", # used by a tensor
"active_awaiting_free", # waiting for another stream to finish using
# this, then it will become free
"inactive",
] # free for reuse
frames: List[Frame] # stack trace from where the allocation occurred
class Frame(TypedDict):
filename: str
line: int
name: str
# Optional FX debug fields (present when augment_with_fx_traces=True
# and the frame corresponds to FX-generated code)
fx_node_op: str # FX node operation type (e.g., 'call_function', 'output')
fx_node_name: str # FX node name (e.g., 'linear', 'relu_1')
fx_original_trace: str # Original model source code stack trace
class TraceEntry(TypedDict):
# When `torch.cuda.memory._record_memory_history()` is enabled,
# the snapshot will contain TraceEntry objects that record each
# action the allocator took.
action: Literal[
"alloc" # memory allocated
"free_requested", # the allocated received a call to free memory
"free_completed", # the memory that was requested to be freed is now
# able to be used in future allocation calls
"segment_alloc", # the caching allocator ask cudaMalloc for more memory
# and added it as a segment in its cache
"segment_free", # the caching allocator called cudaFree to return memory
# to cuda possibly trying free up memory to
# allocate more segments or because empty_caches was called
"oom", # the allocator threw an OOM exception. 'size' is
# the requested number of bytes that did not succeed
"snapshot", # the allocator generated a memory snapshot
# useful to coorelate a previously taken
# snapshot with this trace
]
addr: int # not present for OOM
frames: List[Frame]
size: int
stream: int
device_free: int # only present for OOM, the amount of
# memory cuda still reports to be free
Args:
device: Device to capture snapshot for. If None, captures for current device.
augment_with_fx_traces: If True, augment stack trace frames with FX debug information
that maps generated FX code back to original model source code.
This adds fx_node_op, fx_node_name, fx_original_trace, and
fx_node_info fields to Frame objects. Default: False.
Returns:
The Snapshot dictionary object
"""
s = _C._cuda_memorySnapshot(None)
if augment_with_fx_traces:
s = _augment_memory_snapshot_stack_traces(s) # type: ignore[assignment, arg-type]
return s
def _dump_snapshot(filename="dump_snapshot.pickle", augment_with_fx_traces=False):
"""
Save a pickled version of the `torch.memory._snapshot()` dictionary to a file.
This file can be opened by the interactive snapshot viewer at pytorch.org/memory_viz
Snapshot file sizes scale with `max_entries` and stack trace depth per entry,
with several KB per entry. These can easily be in the GB range for longer running
workflows with large `max_entries`.
Args:
filename (str, optional): Name of the file to create. Defaults to "dump_snapshot.pickle".
augment_with_fx_traces (bool, optional): If True, augment the snapshot with FX debug information
before dumping. This maps generated FX code stack traces
back to original model source code. Defaults to False.
verbose (bool, optional): If True and augment_with_fx_traces is True, print verbose debug output
during augmentation. Defaults to False.
"""
s = _snapshot(augment_with_fx_traces=augment_with_fx_traces)
with open(filename, "wb") as f:
pickle.dump(s, f)
def _set_memory_metadata(metadata: str):
"""
Set custom metadata that will be attached to all subsequent CUDA memory allocations.
This metadata will be recorded in the memory snapshot for all allocations made
after this call until the metadata is cleared or changed.
Args:
metadata (str): Custom metadata string to attach to allocations.
Pass an empty string to clear the metadata.
"""
# pyrefly: ignore [missing-attribute]
torch._C._cuda_setMemoryMetadata(metadata)
def _get_memory_metadata() -> str:
"""
Get the current custom metadata that is being attached to CUDA memory allocations.
Returns:
str: The current metadata string, or empty string if no metadata is set.
"""
# pyrefly: ignore [missing-attribute]
return torch._C._cuda_getMemoryMetadata()
def _save_segment_usage(filename="output.svg", snapshot=None):
if snapshot is None:
snapshot = _snapshot()
with open(filename, "w") as f:
f.write(_segments(snapshot))
def _save_memory_usage(filename="output.svg", snapshot=None):
if snapshot is None:
snapshot = _snapshot()
with open(filename, "w") as f:
f.write(_memory(snapshot))
@deprecated(
"torch.cuda._set_allocator_settings is deprecated. Use torch._C._accelerator_setAllocatorSettings instead.",
category=FutureWarning,
)
def _set_allocator_settings(env: str):
# pyrefly: ignore [missing-attribute]
return torch._C._accelerator_setAllocatorSettings(env)
def get_allocator_backend() -> str:
r"""Return a string describing the active allocator backend as set by
``PYTORCH_CUDA_ALLOC_CONF``. Currently available backends are
``native`` (PyTorch's native caching allocator) and `cudaMallocAsync``
(CUDA's built-in asynchronous allocator).
.. note::
See :ref:`cuda-memory-management` for details on choosing the allocator backend.
"""
return torch._C._cuda_getAllocatorBackend()
| _Snapshot |
python | scipy__scipy | scipy/optimize/_optimize.py | {
"start": 140160,
"end": 149745
} | class ____:
"""
Object to wrap user cost function for optimize.brute, allowing picklability
"""
def __init__(self, f, args):
self.f = f
self.args = [] if args is None else args
def __call__(self, x):
# flatten needed for one dimensional case.
return self.f(np.asarray(x).flatten(), *self.args)
def show_options(solver=None, method=None, disp=True):
"""
Show documentation for additional options of optimization solvers.
These are method-specific options that can be supplied through the
``options`` dict.
Parameters
----------
solver : str
Type of optimization solver. One of 'minimize', 'minimize_scalar',
'root', 'root_scalar', 'linprog', or 'quadratic_assignment'.
method : str, optional
If not given, shows all methods of the specified solver. Otherwise,
show only the options for the specified method. Valid values
corresponds to methods' names of respective solver (e.g., 'BFGS' for
'minimize').
disp : bool, optional
Whether to print the result rather than returning it.
Returns
-------
text
Either None (for disp=True) or the text string (disp=False)
Notes
-----
The solver-specific methods are:
`scipy.optimize.minimize`
- :ref:`Nelder-Mead <optimize.minimize-neldermead>`
- :ref:`Powell <optimize.minimize-powell>`
- :ref:`CG <optimize.minimize-cg>`
- :ref:`BFGS <optimize.minimize-bfgs>`
- :ref:`Newton-CG <optimize.minimize-newtoncg>`
- :ref:`L-BFGS-B <optimize.minimize-lbfgsb>`
- :ref:`TNC <optimize.minimize-tnc>`
- :ref:`COBYLA <optimize.minimize-cobyla>`
- :ref:`COBYQA <optimize.minimize-cobyqa>`
- :ref:`SLSQP <optimize.minimize-slsqp>`
- :ref:`dogleg <optimize.minimize-dogleg>`
- :ref:`trust-ncg <optimize.minimize-trustncg>`
`scipy.optimize.root`
- :ref:`hybr <optimize.root-hybr>`
- :ref:`lm <optimize.root-lm>`
- :ref:`broyden1 <optimize.root-broyden1>`
- :ref:`broyden2 <optimize.root-broyden2>`
- :ref:`anderson <optimize.root-anderson>`
- :ref:`linearmixing <optimize.root-linearmixing>`
- :ref:`diagbroyden <optimize.root-diagbroyden>`
- :ref:`excitingmixing <optimize.root-excitingmixing>`
- :ref:`krylov <optimize.root-krylov>`
- :ref:`df-sane <optimize.root-dfsane>`
`scipy.optimize.minimize_scalar`
- :ref:`brent <optimize.minimize_scalar-brent>`
- :ref:`golden <optimize.minimize_scalar-golden>`
- :ref:`bounded <optimize.minimize_scalar-bounded>`
`scipy.optimize.root_scalar`
- :ref:`bisect <optimize.root_scalar-bisect>`
- :ref:`brentq <optimize.root_scalar-brentq>`
- :ref:`brenth <optimize.root_scalar-brenth>`
- :ref:`ridder <optimize.root_scalar-ridder>`
- :ref:`toms748 <optimize.root_scalar-toms748>`
- :ref:`newton <optimize.root_scalar-newton>`
- :ref:`secant <optimize.root_scalar-secant>`
- :ref:`halley <optimize.root_scalar-halley>`
`scipy.optimize.linprog`
- :ref:`simplex <optimize.linprog-simplex>`
- :ref:`interior-point <optimize.linprog-interior-point>`
- :ref:`revised simplex <optimize.linprog-revised_simplex>`
- :ref:`highs <optimize.linprog-highs>`
- :ref:`highs-ds <optimize.linprog-highs-ds>`
- :ref:`highs-ipm <optimize.linprog-highs-ipm>`
`scipy.optimize.quadratic_assignment`
- :ref:`faq <optimize.qap-faq>`
- :ref:`2opt <optimize.qap-2opt>`
Examples
--------
We can print documentations of a solver in stdout:
>>> from scipy.optimize import show_options
>>> show_options(solver="minimize")
...
Specifying a method is possible:
>>> show_options(solver="minimize", method="Nelder-Mead")
...
We can also get the documentations as a string:
>>> show_options(solver="minimize", method="Nelder-Mead", disp=False)
Minimization of scalar function of one or more variables using the ...
"""
import textwrap
doc_routines = {
'minimize': (
('bfgs', 'scipy.optimize._optimize._minimize_bfgs'),
('cg', 'scipy.optimize._optimize._minimize_cg'),
('cobyla', 'scipy.optimize._cobyla_py._minimize_cobyla'),
('cobyqa', 'scipy.optimize._cobyqa_py._minimize_cobyqa'),
('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'),
('l-bfgs-b', 'scipy.optimize._lbfgsb_py._minimize_lbfgsb'),
('nelder-mead', 'scipy.optimize._optimize._minimize_neldermead'),
('newton-cg', 'scipy.optimize._optimize._minimize_newtoncg'),
('powell', 'scipy.optimize._optimize._minimize_powell'),
('slsqp', 'scipy.optimize._slsqp_py._minimize_slsqp'),
('tnc', 'scipy.optimize._tnc._minimize_tnc'),
('trust-ncg',
'scipy.optimize._trustregion_ncg._minimize_trust_ncg'),
('trust-constr',
'scipy.optimize._trustregion_constr.'
'_minimize_trustregion_constr'),
('trust-exact',
'scipy.optimize._trustregion_exact._minimize_trustregion_exact'),
('trust-krylov',
'scipy.optimize._trustregion_krylov._minimize_trust_krylov'),
),
'root': (
('hybr', 'scipy.optimize._minpack_py._root_hybr'),
('lm', 'scipy.optimize._root._root_leastsq'),
('broyden1', 'scipy.optimize._root._root_broyden1_doc'),
('broyden2', 'scipy.optimize._root._root_broyden2_doc'),
('anderson', 'scipy.optimize._root._root_anderson_doc'),
('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'),
('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'),
('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'),
('krylov', 'scipy.optimize._root._root_krylov_doc'),
('df-sane', 'scipy.optimize._spectral._root_df_sane'),
),
'root_scalar': (
('bisect', 'scipy.optimize._root_scalar._root_scalar_bisect_doc'),
('brentq', 'scipy.optimize._root_scalar._root_scalar_brentq_doc'),
('brenth', 'scipy.optimize._root_scalar._root_scalar_brenth_doc'),
('ridder', 'scipy.optimize._root_scalar._root_scalar_ridder_doc'),
('toms748', 'scipy.optimize._root_scalar._root_scalar_toms748_doc'),
('secant', 'scipy.optimize._root_scalar._root_scalar_secant_doc'),
('newton', 'scipy.optimize._root_scalar._root_scalar_newton_doc'),
('halley', 'scipy.optimize._root_scalar._root_scalar_halley_doc'),
),
'linprog': (
('simplex', 'scipy.optimize._linprog._linprog_simplex_doc'),
('interior-point', 'scipy.optimize._linprog._linprog_ip_doc'),
('revised simplex', 'scipy.optimize._linprog._linprog_rs_doc'),
('highs-ipm', 'scipy.optimize._linprog._linprog_highs_ipm_doc'),
('highs-ds', 'scipy.optimize._linprog._linprog_highs_ds_doc'),
('highs', 'scipy.optimize._linprog._linprog_highs_doc'),
),
'quadratic_assignment': (
('faq', 'scipy.optimize._qap._quadratic_assignment_faq'),
('2opt', 'scipy.optimize._qap._quadratic_assignment_2opt'),
),
'minimize_scalar': (
('brent', 'scipy.optimize._optimize._minimize_scalar_brent'),
('bounded', 'scipy.optimize._optimize._minimize_scalar_bounded'),
('golden', 'scipy.optimize._optimize._minimize_scalar_golden'),
),
}
if solver is None:
text = ["\n\n\n========\n", "minimize\n", "========\n"]
text.append(show_options('minimize', disp=False))
text.extend(["\n\n===============\n", "minimize_scalar\n",
"===============\n"])
text.append(show_options('minimize_scalar', disp=False))
text.extend(["\n\n\n====\n", "root\n",
"====\n"])
text.append(show_options('root', disp=False))
text.extend(['\n\n\n=======\n', 'linprog\n',
'=======\n'])
text.append(show_options('linprog', disp=False))
text = "".join(text)
else:
solver = solver.lower()
if solver not in doc_routines:
raise ValueError(f'Unknown solver {solver!r}')
if method is None:
text = []
for name, _ in doc_routines[solver]:
text.extend(["\n\n" + name, "\n" + "="*len(name) + "\n\n"])
text.append(show_options(solver, name, disp=False))
text = "".join(text)
else:
method = method.lower()
methods = dict(doc_routines[solver])
if method not in methods:
raise ValueError(f"Unknown method {method!r}")
name = methods[method]
# Import function object
parts = name.split('.')
mod_name = ".".join(parts[:-1])
__import__(mod_name)
obj = getattr(sys.modules[mod_name], parts[-1])
# Get doc
doc = obj.__doc__
if doc is not None:
text = textwrap.dedent(doc).strip()
else:
text = ""
if disp:
print(text)
return
else:
return text
| _Brute_Wrapper |
python | pydata__xarray | xarray/tests/test_plot.py | {
"start": 99643,
"end": 102518
} | class ____(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self) -> None:
das = [
DataArray(
np.random.randn(3, 3, 4, 4),
dims=["x", "y", "row", "col"],
coords=[range(k) for k in [3, 3, 4, 4]],
)
for _ in [1, 2]
]
ds = Dataset({"u": das[0], "v": das[1]})
ds.x.attrs["units"] = "xunits"
ds.y.attrs["units"] = "yunits"
ds.col.attrs["units"] = "colunits"
ds.row.attrs["units"] = "rowunits"
ds.u.attrs["units"] = "uunits"
ds.v.attrs["units"] = "vunits"
ds["mag"] = np.hypot(ds.u, ds.v)
self.ds = ds
def test_quiver(self) -> None:
with figure_context():
hdl = self.ds.isel(row=0, col=0).plot.quiver(x="x", y="y", u="u", v="v")
assert isinstance(hdl, mpl.quiver.Quiver)
with pytest.raises(ValueError, match=r"specify x, y, u, v"):
self.ds.isel(row=0, col=0).plot.quiver(x="x", y="y", u="u")
with pytest.raises(ValueError, match=r"hue_style"):
self.ds.isel(row=0, col=0).plot.quiver(
x="x", y="y", u="u", v="v", hue="mag", hue_style="discrete"
)
def test_facetgrid(self) -> None:
with figure_context():
fg = self.ds.plot.quiver(
x="x", y="y", u="u", v="v", row="row", col="col", scale=1, hue="mag"
)
for handle in fg._mappables:
assert isinstance(handle, mpl.quiver.Quiver)
assert fg.quiverkey is not None
assert "uunits" in fg.quiverkey.text.get_text()
with figure_context():
fg = self.ds.plot.quiver(
x="x",
y="y",
u="u",
v="v",
row="row",
col="col",
scale=1,
hue="mag",
add_guide=False,
)
assert fg.quiverkey is None
with pytest.raises(ValueError, match=r"Please provide scale"):
self.ds.plot.quiver(x="x", y="y", u="u", v="v", row="row", col="col")
@pytest.mark.parametrize(
"add_guide, hue_style, legend, colorbar",
[
(None, None, False, True),
(False, None, False, False),
(True, None, False, True),
(True, "continuous", False, True),
],
)
def test_add_guide(self, add_guide, hue_style, legend, colorbar) -> None:
meta_data = _infer_meta_data(
self.ds,
x="x",
y="y",
hue="mag",
hue_style=hue_style,
add_guide=add_guide,
funcname="quiver",
)
assert meta_data["add_legend"] is legend
assert meta_data["add_colorbar"] is colorbar
@requires_matplotlib
| TestDatasetQuiverPlots |
python | coleifer__peewee | tests/pool.py | {
"start": 941,
"end": 1394
} | class ____(SqliteDatabase):
def __init__(self, *args, **kwargs):
self.counter = self.closed_counter = kwargs.pop('counter', 0)
self.transaction_history = []
super(FakeDatabase, self).__init__(*args, **kwargs)
def _connect(self):
self.counter += 1
return self.counter
def _close(self, conn):
self.closed_counter += 1
def transaction(self):
return FakeTransaction(self)
| FakeDatabase |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classVar3.py | {
"start": 398,
"end": 1582
} | class ____(Generic[T]):
x: ClassVar[int] = 3
# This should generate an error.
y: Final[ClassVar[int]] = 3
# This should generate an error.
z: list[ClassVar[int]] = []
# This should generate an error because TypeVars cannot
# be used in a ClassVar.
illegal1: ClassVar[list[T]]
# This should generate an error because TypeVars cannot
# be used in a ClassVar.
illegal2: ClassVar[T]
# This should generate an error because Final cannot be
# used with a ClassVar.
illegal3: ClassVar[Final] = 0
# This should generate an error because Final cannot be
# used with a ClassVar. A second error is generated because
# Final[int] is not interpreted as a valid type.
illegal4: ClassVar[Final[int]] = 0
ok1: ClassVar[list]
ok2: ClassVar[list[Any]]
ok3: Annotated[ClassVar[list[Self]], ""]
# This should generate an error.
def func1(self, a: ClassVar[int]):
# This should generate an error.
x: ClassVar[str] = ""
# This should generate an error.
self.xx: ClassVar[str] = ""
# This should generate an error.
def func2(self) -> ClassVar[int]:
return 3
| Foo |
python | doocs__leetcode | solution/2900-2999/2926.Maximum Balanced Subsequence Sum/Solution.py | {
"start": 0,
"end": 394
} | class ____:
def __init__(self, n: int):
self.n = n
self.c = [-inf] * (n + 1)
def update(self, x: int, v: int):
while x <= self.n:
self.c[x] = max(self.c[x], v)
x += x & -x
def query(self, x: int) -> int:
mx = -inf
while x:
mx = max(mx, self.c[x])
x -= x & -x
return mx
| BinaryIndexedTree |
python | huggingface__transformers | src/transformers/models/olmoe/modular_olmoe.py | {
"start": 10069,
"end": 11686
} | class ____(MixtralForCausalLM, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
def __init__(self, config):
super().__init__(config)
self.model = OlmoeModel(config)
self.num_experts = config.num_experts
def forward(self, **super_kwargs):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, OlmoeForCausalLM
>>> model = OlmoeForCausalLM.from_pretrained("allenai/OLMoE-1B-7B-0924")
>>> tokenizer = AutoTokenizer.from_pretrained("allenai/OLMoE-1B-7B-0924")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
'Hey, are you conscious? Can you talk to me?\nI’m not sure if you’re conscious of this, but I’m'
```
"""
return super().forward(**super_kwargs)
__all__ = ["OlmoeForCausalLM", "OlmoeModel", "OlmoePreTrainedModel"]
| OlmoeForCausalLM |
python | readthedocs__readthedocs.org | readthedocs/doc_builder/environments.py | {
"start": 17163,
"end": 22839
} | class ____:
"""
Base build environment.
Base class for wrapping command execution for build steps. This class is in
charge of raising ``BuildAppError`` for internal application errors that
should be communicated to the user as a general unknown error and
``BuildUserError`` that will be exposed to the user with a proper message
for them to debug by themselves since they are _not_ a Read the Docs issue.
:param project: Project that is being built
:param version: Project version that is being built
:param build: Build instance
:param environment: shell environment variables
:param record: whether or not record a build commands in the databse via
the API. The only case where we want this to be `False` is when
instantiating this class from `sync_repository_task` because it's a
background task that does not expose commands to the user.
:param api_client: API v2 client instance (readthedocs.v2.client).
This is used to record commands in the database, if `record=True`
this argument is required.
"""
def __init__(
self,
project=None,
version=None,
build=None,
config=None,
environment=None,
record=True,
api_client=None,
**kwargs,
):
self.project = project
self._environment = environment or {}
self.commands = []
self.version = version
self.build = build
self.config = config
self.record = record
self.api_client = api_client
if self.record and not self.api_client:
raise ValueError("api_client is required when record=True")
# TODO: remove these methods, we are not using LocalEnvironment anymore. We
# need to find a way for tests to not require this anymore
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
return
def record_command(self, command):
if self.record:
command.save(self.api_client)
def run(self, *cmd, **kwargs):
"""Shortcut to run command from environment."""
return self.run_command_class(cls=self.command_class, cmd=cmd, **kwargs)
def run_command_class(
self, cls, cmd, warn_only=False, record=True, record_as_success=False, **kwargs
):
"""
Run command from this environment.
:param cls: command class to instantiate a command
:param cmd: command (as a list) to execute in this environment
:param record: whether or not to record this particular command
(``False`` implies ``warn_only=True``)
:param warn_only: don't raise an exception on command failure
:param record_as_success: force command ``exit_code`` to be saved as
``0`` (``True`` implies ``warn_only=True`` and ``record=True``)
"""
if not record:
warn_only = True
if record_as_success:
record = True
warn_only = True
# ``record_as_success`` is needed to instantiate the BuildCommand
kwargs.update({"record_as_success": record_as_success})
# Remove PATH from env, and set it to bin_path if it isn't passed in
environment = self._environment.copy()
env_path = environment.pop("BIN_PATH", None)
if "bin_path" not in kwargs and env_path:
kwargs["bin_path"] = env_path
if "environment" in kwargs:
raise BuildAppError(
BuildAppError.GENERIC_WITH_BUILD_ID,
exception_message="environment can't be passed in via commands.",
)
kwargs["environment"] = environment
kwargs["build_env"] = self
build_cmd = cls(cmd, **kwargs)
# Save the command that's running before it starts,
# then we will update the results after it has run.
if record:
self.record_command(build_cmd)
# We want append this command to the list of commands only if it has
# to be recorded in the database (to keep consistency) and also, it
# has to be added after ``self.record_command`` since its
# ``exit_code`` can be altered because of ``record_as_success``
self.commands.append(build_cmd)
build_cmd.run()
if record:
# TODO: I don't like how it's handled this entry point here since
# this class should know nothing about a BuildCommand (which are the
# only ones that can be saved/recorded)
self.record_command(build_cmd)
if build_cmd.failed:
if warn_only:
msg = "Command failed"
log.warning(
msg,
command=build_cmd.get_command(),
output=_truncate_output(build_cmd.output),
stderr=_truncate_output(build_cmd.error),
exit_code=build_cmd.exit_code,
project_slug=self.project.slug if self.project else "",
version_slug=self.version.slug if self.version else "",
)
elif build_cmd.exit_code == RTD_SKIP_BUILD_EXIT_CODE:
raise BuildCancelled(BuildCancelled.SKIPPED_EXIT_CODE_183)
else:
# TODO: for now, this still outputs a generic error message
# that is the same across all commands. We could improve this
# with more granular error messages that vary by the command
# being run.
raise BuildUserError(BuildUserError.GENERIC)
return build_cmd
| BaseBuildEnvironment |
python | numba__numba | numba/cuda/cudadecl.py | {
"start": 8616,
"end": 15292
} | class ____(AbstractTemplate):
def generic(self, args, kws):
assert not kws
[arg] = args
if arg == types.float16:
return signature(arg, arg)
def _genfp16_binary_comparison(l_key):
@register
class Cuda_fp16_cmp(ConcreteTemplate):
key = l_key
cases = [
signature(types.b1, types.float16, types.float16)
]
return Cuda_fp16_cmp
# If multiple ConcreteTemplates provide typing for a single function, then
# function resolution will pick the first compatible typing it finds even if it
# involves inserting a cast that would be considered undesirable (in this
# specific case, float16s could be cast to float32s for comparisons).
#
# To work around this, we instead use an AbstractTemplate that implements
# exactly the casting logic that we desire. The AbstractTemplate gets
# considered in preference to ConcreteTemplates during typing.
#
# This is tracked as Issue #7863 (https://github.com/numba/numba/issues/7863) -
# once this is resolved it should be possible to replace this AbstractTemplate
# with a ConcreteTemplate to simplify the logic.
def _fp16_binary_operator(l_key, retty):
@register_global(l_key)
class Cuda_fp16_operator(AbstractTemplate):
key = l_key
def generic(self, args, kws):
assert not kws
if len(args) == 2 and \
(args[0] == types.float16 or args[1] == types.float16):
if (args[0] == types.float16):
convertible = self.context.can_convert(args[1], args[0])
else:
convertible = self.context.can_convert(args[0], args[1])
# We allow three cases here:
#
# 1. fp16 to fp16 - Conversion.exact
# 2. fp16 to other types fp16 can be promoted to
# - Conversion.promote
# 3. fp16 to int8 (safe conversion) -
# - Conversion.safe
if (convertible == Conversion.exact) or \
(convertible == Conversion.promote) or \
(convertible == Conversion.safe):
return signature(retty, types.float16, types.float16)
return Cuda_fp16_operator
def _genfp16_comparison_operator(op):
return _fp16_binary_operator(op, types.b1)
def _genfp16_binary_operator(op):
return _fp16_binary_operator(op, types.float16)
Cuda_hadd = _genfp16_binary(cuda.fp16.hadd)
Cuda_add = _genfp16_binary_operator(operator.add)
Cuda_iadd = _genfp16_binary_operator(operator.iadd)
Cuda_hsub = _genfp16_binary(cuda.fp16.hsub)
Cuda_sub = _genfp16_binary_operator(operator.sub)
Cuda_isub = _genfp16_binary_operator(operator.isub)
Cuda_hmul = _genfp16_binary(cuda.fp16.hmul)
Cuda_mul = _genfp16_binary_operator(operator.mul)
Cuda_imul = _genfp16_binary_operator(operator.imul)
Cuda_hmax = _genfp16_binary(cuda.fp16.hmax)
Cuda_hmin = _genfp16_binary(cuda.fp16.hmin)
Cuda_hneg = _genfp16_unary(cuda.fp16.hneg)
Cuda_neg = _genfp16_unary_operator(operator.neg)
Cuda_habs = _genfp16_unary(cuda.fp16.habs)
Cuda_abs = _genfp16_unary_operator(abs)
Cuda_heq = _genfp16_binary_comparison(cuda.fp16.heq)
_genfp16_comparison_operator(operator.eq)
Cuda_hne = _genfp16_binary_comparison(cuda.fp16.hne)
_genfp16_comparison_operator(operator.ne)
Cuda_hge = _genfp16_binary_comparison(cuda.fp16.hge)
_genfp16_comparison_operator(operator.ge)
Cuda_hgt = _genfp16_binary_comparison(cuda.fp16.hgt)
_genfp16_comparison_operator(operator.gt)
Cuda_hle = _genfp16_binary_comparison(cuda.fp16.hle)
_genfp16_comparison_operator(operator.le)
Cuda_hlt = _genfp16_binary_comparison(cuda.fp16.hlt)
_genfp16_comparison_operator(operator.lt)
_genfp16_binary_operator(operator.truediv)
_genfp16_binary_operator(operator.itruediv)
def _resolve_wrapped_unary(fname):
decl = declare_device_function_template(f'__numba_wrapper_{fname}',
types.float16,
(types.float16,))
return types.Function(decl)
def _resolve_wrapped_binary(fname):
decl = declare_device_function_template(f'__numba_wrapper_{fname}',
types.float16,
(types.float16, types.float16,))
return types.Function(decl)
hsin_device = _resolve_wrapped_unary('hsin')
hcos_device = _resolve_wrapped_unary('hcos')
hlog_device = _resolve_wrapped_unary('hlog')
hlog10_device = _resolve_wrapped_unary('hlog10')
hlog2_device = _resolve_wrapped_unary('hlog2')
hexp_device = _resolve_wrapped_unary('hexp')
hexp10_device = _resolve_wrapped_unary('hexp10')
hexp2_device = _resolve_wrapped_unary('hexp2')
hsqrt_device = _resolve_wrapped_unary('hsqrt')
hrsqrt_device = _resolve_wrapped_unary('hrsqrt')
hfloor_device = _resolve_wrapped_unary('hfloor')
hceil_device = _resolve_wrapped_unary('hceil')
hrcp_device = _resolve_wrapped_unary('hrcp')
hrint_device = _resolve_wrapped_unary('hrint')
htrunc_device = _resolve_wrapped_unary('htrunc')
hdiv_device = _resolve_wrapped_binary('hdiv')
# generate atomic operations
def _gen(l_key, supported_types):
@register
class Cuda_atomic(AbstractTemplate):
key = l_key
def generic(self, args, kws):
assert not kws
ary, idx, val = args
if ary.dtype not in supported_types:
return
if ary.ndim == 1:
return signature(ary.dtype, ary, types.intp, ary.dtype)
elif ary.ndim > 1:
return signature(ary.dtype, ary, idx, ary.dtype)
return Cuda_atomic
all_numba_types = (types.float64, types.float32,
types.int32, types.uint32,
types.int64, types.uint64)
integer_numba_types = (types.int32, types.uint32,
types.int64, types.uint64)
unsigned_int_numba_types = (types.uint32, types.uint64)
Cuda_atomic_add = _gen(cuda.atomic.add, all_numba_types)
Cuda_atomic_sub = _gen(cuda.atomic.sub, all_numba_types)
Cuda_atomic_max = _gen(cuda.atomic.max, all_numba_types)
Cuda_atomic_min = _gen(cuda.atomic.min, all_numba_types)
Cuda_atomic_nanmax = _gen(cuda.atomic.nanmax, all_numba_types)
Cuda_atomic_nanmin = _gen(cuda.atomic.nanmin, all_numba_types)
Cuda_atomic_and = _gen(cuda.atomic.and_, integer_numba_types)
Cuda_atomic_or = _gen(cuda.atomic.or_, integer_numba_types)
Cuda_atomic_xor = _gen(cuda.atomic.xor, integer_numba_types)
Cuda_atomic_inc = _gen(cuda.atomic.inc, unsigned_int_numba_types)
Cuda_atomic_dec = _gen(cuda.atomic.dec, unsigned_int_numba_types)
Cuda_atomic_exch = _gen(cuda.atomic.exch, integer_numba_types)
@register
| Float |
python | huggingface__transformers | src/transformers/models/granite_speech/processing_granite_speech.py | {
"start": 1024,
"end": 3732
} | class ____(ProcessorMixin):
def __init__(
self,
audio_processor,
tokenizer,
audio_token="<|audio|>",
chat_template=None,
):
self.audio_token = tokenizer.audio_token if hasattr(tokenizer, "audio_token") else audio_token
super().__init__(audio_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]],
audio: Union["torch.Tensor", list["torch.Tensor"]] = None,
device: str = "cpu",
**kwargs,
) -> BatchFeature:
requires_backends(self, ["torch"])
text = self._get_validated_text(text)
prompt_strings = text
if audio is not None:
# NOTE - we intentionally avoid throwing for potentially misaligned
# text / audio inputs here because some inference engines will
# trigger the conditions due to the way they call multimodal
# processors, e.g., vLLM.
audio_inputs = self.audio_processor(audio, device=device)
# TODO (@alex-jw-brooks); we should add a util to get_num_audio_tokens
# from feature lengths and call it here, rather than returning it
# from the feature extractor.
audio_embed_sizes = audio_inputs.pop("audio_embed_sizes")
# Expand the audio placeholders to match the feature dims; this
# is similar to how many VLMs handle image tokens, e.g., llava next
prompt_strings = []
num_replaced = 0
for sample in text:
while self.audio_token in sample:
sample = sample.replace(
self.audio_token,
"<placeholder>" * audio_embed_sizes[num_replaced],
1,
)
num_replaced += 1
prompt_strings.append(sample)
prompt_strings = [sample.replace("<placeholder>", self.audio_token) for sample in prompt_strings]
else:
audio_inputs = {}
if "padding" not in kwargs:
kwargs["padding"] = True
text_inputs = self.tokenizer(prompt_strings, **kwargs)
return BatchFeature(data={**text_inputs, **audio_inputs})
def _get_validated_text(self, text: Union[str, list]) -> list[str]:
if isinstance(text, str):
return [text]
elif isinstance(text, list) and isinstance(text[0], str):
return text
raise TypeError("Invalid text provided! Text should be a string or list of strings.")
__all__ = ["GraniteSpeechProcessor"]
| GraniteSpeechProcessor |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/variables.py | {
"start": 1153,
"end": 1861
} | class ____(BaseModel):
"""Variable serializer for responses."""
key: str
val: str = Field(alias="value")
description: str | None
is_encrypted: bool
team_id: UUID | None
@model_validator(mode="after")
def redact_val(self) -> Self:
if self.val is None:
return self
try:
val_dict = json.loads(self.val)
redacted_dict = redact(val_dict, max_depth=1)
self.val = json.dumps(redacted_dict)
return self
except json.JSONDecodeError:
# value is not a serialized string representation of a dict.
self.val = str(redact(self.val, self.key))
return self
| VariableResponse |
python | Textualize__textual | src/textual/widgets/_tree.py | {
"start": 1671,
"end": 1795
} | class ____(Exception):
"""Exception raised when there is an error with a request to add a node."""
@dataclass
| AddNodeError |
python | pyca__cryptography | tests/x509/verification/test_verification.py | {
"start": 946,
"end": 3914
} | class ____:
def test_time_already_set(self):
with pytest.raises(ValueError):
PolicyBuilder().time(datetime.datetime.now()).time(
datetime.datetime.now()
)
def test_store_already_set(self):
with pytest.raises(ValueError):
PolicyBuilder().store(dummy_store()).store(dummy_store())
def test_max_chain_depth_already_set(self):
with pytest.raises(ValueError):
PolicyBuilder().max_chain_depth(8).max_chain_depth(9)
def test_ipaddress_subject(self):
verifier = (
PolicyBuilder()
.store(dummy_store())
.build_server_verifier(IPAddress(IPv4Address("0.0.0.0")))
)
assert verifier.policy.subject == IPAddress(IPv4Address("0.0.0.0"))
def test_dnsname_subject(self):
verifier = (
PolicyBuilder()
.store(dummy_store())
.build_server_verifier(DNSName("cryptography.io"))
)
assert verifier.policy.subject == DNSName("cryptography.io")
def test_subject_bad_types(self):
# Subject must be a supported GeneralName type
with pytest.raises(TypeError):
PolicyBuilder().store(dummy_store()).build_server_verifier(
"cryptography.io" # type: ignore[arg-type]
)
with pytest.raises(TypeError):
PolicyBuilder().store(dummy_store()).build_server_verifier(
"0.0.0.0" # type: ignore[arg-type]
)
with pytest.raises(TypeError):
PolicyBuilder().store(dummy_store()).build_server_verifier(
IPv4Address("0.0.0.0") # type: ignore[arg-type]
)
with pytest.raises(TypeError):
PolicyBuilder().store(dummy_store()).build_server_verifier(None) # type: ignore[arg-type]
def test_builder_pattern(self):
now = datetime.datetime.now().replace(microsecond=0)
store = dummy_store()
max_chain_depth = 16
builder = PolicyBuilder()
builder = builder.time(now)
builder = builder.store(store)
builder = builder.max_chain_depth(max_chain_depth)
subject = DNSName("cryptography.io")
verifier = builder.build_server_verifier(subject)
assert verifier.policy.subject == subject
assert verifier.policy.validation_time == now
assert verifier.policy.max_chain_depth == max_chain_depth
assert (
verifier.policy.extended_key_usage
== ExtendedKeyUsageOID.SERVER_AUTH
)
assert (
verifier.policy.minimum_rsa_modulus == WEBPKI_MINIMUM_RSA_MODULUS
)
assert verifier.store == store
def test_build_server_verifier_missing_store(self):
with pytest.raises(
ValueError, match="A server verifier must have a trust store"
):
PolicyBuilder().build_server_verifier(DNSName("cryptography.io"))
| TestPolicyBuilder |
python | huggingface__transformers | src/transformers/models/mlcd/modeling_mlcd.py | {
"start": 13189,
"end": 15160
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: MLCDVisionConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = MLCDAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = MLCDMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(batch, seq_len, embed_dim)`.
Represents the hidden states from the previous layer or the input embeddings.
position_embeddings (`tuple[torch.Tensor, torch.Tensor]`):
A tuple of two tensors, each of shape `(batch, seq_len, embed_dim)`.
Represents absolute positional embeddings for the query and key in the attention mechanism.
attention_mask (`torch.FloatTensor`):
Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
| MLCDEncoderLayer |
python | pypa__pip | src/pip/_vendor/rich/jupyter.py | {
"start": 514,
"end": 1094
} | class ____:
"""A shim to write html to Jupyter notebook."""
def __init__(self, html: str, text: str) -> None:
self.html = html
self.text = text
def _repr_mimebundle_(
self, include: Sequence[str], exclude: Sequence[str], **kwargs: Any
) -> Dict[str, str]:
data = {"text/plain": self.text, "text/html": self.html}
if include:
data = {k: v for (k, v) in data.items() if k in include}
if exclude:
data = {k: v for (k, v) in data.items() if k not in exclude}
return data
| JupyterRenderable |
python | huggingface__transformers | src/transformers/models/modernbert/configuration_modernbert.py | {
"start": 1362,
"end": 13682
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ModernBertModel`]. It is used to instantiate an ModernBert
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the ModernBERT-base.
e.g. [answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50368):
Vocabulary size of the ModernBert model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ModernBertModel`]
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 1152):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 22):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer decoder.
hidden_activation (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the decoder. Will default to `"gelu"`
if not specified.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_cutoff_factor (`float`, *optional*, defaults to 2.0):
The cutoff factor for the truncated_normal_initializer for initializing all weight matrices.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
norm_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the normalization layers.
pad_token_id (`int`, *optional*, defaults to 50283):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 50282):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 50281):
Beginning of stream token id.
cls_token_id (`int`, *optional*, defaults to 50281):
Classification token id.
sep_token_id (`int`, *optional*, defaults to 50282):
Separation token id.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
layer_types (`list`, *optional*):
Attention pattern for each layer.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
local_attention (`int`, *optional*, defaults to 128):
The window size for local attention.
embedding_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the embeddings.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the MLP layers.
mlp_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the MLP layers.
decoder_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the decoder layers.
classifier_pooling (`str`, *optional*, defaults to `"cls"`):
The pooling method for the classifier. Should be either `"cls"` or `"mean"`. In local attention layers, the
CLS token doesn't attend to all tokens on long sequences.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the classifier.
classifier_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the classifier.
classifier_activation (`str`, *optional*, defaults to `"gelu"`):
The activation function for the classifier.
deterministic_flash_attn (`bool`, *optional*, defaults to `False`):
Whether to use deterministic flash attention. If `False`, inference will be faster but not deterministic.
sparse_prediction (`bool`, *optional*, defaults to `False`):
Whether to use sparse prediction for the masked language model instead of returning the full dense logits.
sparse_pred_ignore_index (`int`, *optional*, defaults to -100):
The index to ignore for the sparse prediction.
reference_compile (`bool`, *optional*):
Whether to compile the layers of the model which were compiled during pretraining. If `None`, then parts of
the model will be compiled if 1) `triton` is installed, 2) the model is not on MPS, 3) the model is not
shared between devices, and 4) the model is not resized after initialization. If `True`, then the model may
be faster in some scenarios.
repad_logits_with_grad (`bool`, *optional*, defaults to `False`):
When True, ModernBertForMaskedLM keeps track of the logits' gradient when repadding for output. This only
applies when using Flash Attention 2 with passed labels. Otherwise output logits always have a gradient.
Examples:
```python
>>> from transformers import ModernBertModel, ModernBertConfig
>>> # Initializing a ModernBert style configuration
>>> configuration = ModernBertConfig()
>>> # Initializing a model from the modernbert-base style configuration
>>> model = ModernBertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "modernbert"
keys_to_ignore_at_inference = ["past_key_values"]
default_theta = {"global": 160_000.0, "local": 10_000.0}
def __init__(
self,
vocab_size: Optional[int] = 50368,
hidden_size: Optional[int] = 768,
intermediate_size: Optional[int] = 1152,
num_hidden_layers: Optional[int] = 22,
num_attention_heads: Optional[int] = 12,
hidden_activation: Optional[str] = "gelu",
max_position_embeddings: Optional[int] = 8192,
initializer_range: Optional[float] = 0.02,
initializer_cutoff_factor: Optional[float] = 2.0,
norm_eps: Optional[int] = 1e-5,
norm_bias: Optional[bool] = False,
pad_token_id: Optional[int] = 50283,
eos_token_id: Optional[int] = 50282,
bos_token_id: Optional[int] = 50281,
cls_token_id: Optional[int] = 50281,
sep_token_id: Optional[int] = 50282,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
layer_types: Optional[list[str]] = None,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
local_attention: Optional[int] = 128,
embedding_dropout: Optional[float] = 0.0,
mlp_bias: Optional[bool] = False,
mlp_dropout: Optional[float] = 0.0,
decoder_bias: Optional[bool] = True,
classifier_pooling: Literal["cls", "mean"] = "cls",
classifier_dropout: Optional[float] = 0.0,
classifier_bias: Optional[bool] = False,
classifier_activation: Optional[str] = "gelu",
deterministic_flash_attn: Optional[bool] = False,
sparse_prediction: Optional[bool] = False,
sparse_pred_ignore_index: Optional[int] = -100,
reference_compile: Optional[bool] = None,
repad_logits_with_grad: Optional[bool] = False,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.initializer_range = initializer_range
self.initializer_cutoff_factor = initializer_cutoff_factor
self.norm_eps = norm_eps
self.norm_bias = norm_bias
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.hidden_activation = hidden_activation
self.local_attention = local_attention
self.embedding_dropout = embedding_dropout
self.mlp_bias = mlp_bias
self.mlp_dropout = mlp_dropout
self.decoder_bias = decoder_bias
self.classifier_pooling = classifier_pooling
self.classifier_dropout = classifier_dropout
self.classifier_bias = classifier_bias
self.classifier_activation = classifier_activation
self.deterministic_flash_attn = deterministic_flash_attn
self.sparse_prediction = sparse_prediction
self.sparse_pred_ignore_index = sparse_pred_ignore_index
self.reference_compile = reference_compile
self.repad_logits_with_grad = repad_logits_with_grad
if self.classifier_pooling not in ["cls", "mean"]:
raise ValueError(
f'Invalid value for `classifier_pooling`, should be either "cls" or "mean", but is {self.classifier_pooling}.'
)
self.layer_types = layer_types
# BC -> the pattern used to be a simple int, and it's still present in configs on the Hub
self.global_attn_every_n_layers = kwargs.get("global_attn_every_n_layers", 3)
if self.layer_types is None:
self.layer_types = [
"sliding_attention" if bool(i % self.global_attn_every_n_layers) else "full_attention"
for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
cls_token_id=cls_token_id,
sep_token_id=sep_token_id,
**kwargs,
)
def convert_rope_params_to_dict(self, ignore_keys_at_rope_validation=None, **kwargs):
rope_scaling = kwargs.pop("rope_scaling", None)
# Try to set `rope_scaling` if available, otherwise use `rope_parameters`. If we find `rope_parameters`
# as arg in the inputs, we can safely assume that it is in the new format. New naming used -> new format
default_rope_params = {
"sliding_attention": {"rope_type": "default"},
"full_attention": {"rope_type": "default"},
}
self.rope_parameters = self.rope_parameters if self.rope_parameters is not None else default_rope_params
if rope_scaling is not None:
self.rope_parameters["full_attention"].update(rope_scaling)
self.rope_parameters["sliding_attention"].update(rope_scaling)
self.rope_parameters["full_attention"].setdefault(
"rope_theta", kwargs.pop("global_rope_theta", self.default_theta["global"])
)
self.rope_parameters["sliding_attention"].setdefault(
"rope_theta", kwargs.pop("local_rope_theta", self.default_theta["local"])
)
# Standardize and validate the correctness of rotary position embeddings parameters
self.standardize_rope_params()
self.validate_rope(ignore_keys=ignore_keys_at_rope_validation)
return kwargs
def to_dict(self):
output = super().to_dict()
output.pop("reference_compile", None)
return output
__all__ = ["ModernBertConfig"]
| ModernBertConfig |
python | django__django | tests/admin_inlines/admin.py | {
"start": 1290,
"end": 1411
} | class ____(admin.TabularInline):
model = NonAutoPKBookChild
classes = ("collapse",)
| NonAutoPKBookChildTabularInline |
python | ipython__ipython | IPython/core/ultratb.py | {
"start": 16211,
"end": 37808
} | class ____(TBTools):
"""A port of Ka-Ping Yee's cgitb.py module that outputs color text instead
of HTML. Requires inspect and pydoc. Crazy, man.
Modified version which optionally strips the topmost entries from the
traceback, to be used with alternate interpreters (because their own code
would appear in the traceback)."""
tb_highlight = "bg:ansiyellow"
tb_highlight_style = "default"
_mode: str
def __init__(
self,
# TODO: no default ?
theme_name: str = _default,
call_pdb: bool = False,
ostream: Any = None,
tb_offset: int = 0,
long_header: bool = False,
include_vars: bool = True,
check_cache: Callable[[], None] | None = None,
debugger_cls: type | None = None,
*,
color_scheme: Any = _sentinel,
):
"""Specify traceback offset, headers and color scheme.
Define how many frames to drop from the tracebacks. Calling it with
tb_offset=1 allows use of this handler in interpreters which will have
their own code at the top of the traceback (VerboseTB will first
remove that frame before printing the traceback info)."""
if color_scheme is not _sentinel:
assert isinstance(color_scheme, str)
theme_name = color_scheme.lower()
warnings.warn(
"color_scheme is deprecated as of IPython 9.0 and replaced by "
"theme_name (which should be lowercase). As you passed a "
"color_scheme value I will try to see if I have corresponding "
"theme.",
stacklevel=2,
category=DeprecationWarning,
)
if theme_name != _default:
warnings.warn(
"You passed both `theme_name` and `color_scheme` "
"(deprecated) to VerboseTB constructor. `theme_name` will "
"be ignored for the time being.",
stacklevel=2,
category=DeprecationWarning,
)
if theme_name == _default:
theme_name = "linux"
assert isinstance(theme_name, str)
super().__init__(
theme_name=theme_name,
call_pdb=call_pdb,
ostream=ostream,
debugger_cls=debugger_cls,
)
self.tb_offset = tb_offset
self.long_header = long_header
self.include_vars = include_vars
# By default we use linecache.checkcache, but the user can provide a
# different check_cache implementation. This was formerly used by the
# IPython kernel for interactive code, but is no longer necessary.
if check_cache is None:
check_cache = linecache.checkcache
self.check_cache = check_cache
self.skip_hidden = True
def format_record(self, frame_info: FrameInfo) -> str:
"""Format a single stack frame"""
assert isinstance(frame_info, FrameInfo)
if isinstance(frame_info._sd, stack_data.RepeatedFrames):
return theme_table[self._theme_name].format(
[
(Token, " "),
(
Token.ExcName,
"[... skipping similar frames: %s]" % frame_info.description,
),
(Token, "\n"),
]
)
indent: str = " " * INDENT_SIZE
assert isinstance(frame_info.lineno, int)
args, varargs, varkw, locals_ = inspect.getargvalues(frame_info.frame)
if frame_info.executing is not None:
func = frame_info.executing.code_qualname()
else:
func = "?"
if func == "<module>":
call = ""
else:
# Decide whether to include variable details or not
var_repr = eqrepr if self.include_vars else nullrepr
try:
scope = inspect.formatargvalues(
args, varargs, varkw, locals_, formatvalue=var_repr
)
assert isinstance(scope, str)
call = theme_table[self._theme_name].format(
[(Token, "in "), (Token.VName, func), (Token.ValEm, scope)]
)
except KeyError:
# This happens in situations like errors inside generator
# expressions, where local variables are listed in the
# line, but can't be extracted from the frame. I'm not
# 100% sure this isn't actually a bug in inspect itself,
# but since there's no info for us to compute with, the
# best we can do is report the failure and move on. Here
# we must *not* call any traceback construction again,
# because that would mess up use of %debug later on. So we
# simply report the failure and move on. The only
# limitation will be that this frame won't have locals
# listed in the call signature. Quite subtle problem...
# I can't think of a good way to validate this in a unit
# test, but running a script consisting of:
# dict( (k,v.strip()) for (k,v) in range(10) )
# will illustrate the error, if this exception catch is
# disabled.
call = theme_table[self._theme_name].format(
[
(Token, "in "),
(Token.VName, func),
(Token.ValEm, "(***failed resolving arguments***)"),
]
)
lvals_toks: list[TokenStream] = []
if self.include_vars:
try:
# we likely want to fix stackdata at some point, but
# still need a workaround.
fibp = frame_info.variables_in_executing_piece
for var in fibp:
lvals_toks.append(
[
(Token, var.name),
(Token, " "),
(Token.ValEm, "= "),
(Token.ValEm, repr(var.value)),
]
)
except Exception:
lvals_toks.append(
[
(
Token,
"Exception trying to inspect frame. No more locals available.",
),
]
)
if frame_info._sd is None:
# fast fallback if file is too long
assert frame_info.filename is not None
level_tokens = [
(Token.FilenameEm, util_path.compress_user(frame_info.filename)),
(Token, " "),
(Token, call),
(Token, "\n"),
]
_line_format = Parser(theme_name=self._theme_name).format2
assert isinstance(frame_info.code, types.CodeType)
first_line: int = frame_info.code.co_firstlineno
current_line: int = frame_info.lineno
raw_lines: list[str] = frame_info.raw_lines
index: int = current_line - first_line
assert frame_info.context is not None
if index >= frame_info.context:
start = max(index - frame_info.context, 0)
stop = index + frame_info.context
index = frame_info.context
else:
start = 0
stop = index + frame_info.context
raw_lines = raw_lines[start:stop]
# Jan 2025: may need _line_format(py3ompat.cast_unicode(s))
raw_color_err = []
for s in raw_lines:
formatted, is_error = _line_format(s, "str")
assert formatted is not None, "format2 should return str when out='str'"
raw_color_err.append((s, (formatted, is_error)))
tb_tokens = _simple_format_traceback_lines(
current_line,
index,
raw_color_err,
lvals_toks,
theme=theme_table[self._theme_name],
)
_tb_lines: str = theme_table[self._theme_name].format(tb_tokens)
return theme_table[self._theme_name].format(level_tokens + tb_tokens)
else:
result = theme_table[self._theme_name].format(
_tokens_filename(True, frame_info.filename, lineno=frame_info.lineno)
)
result += ", " if call else ""
result += f"{call}\n"
result += theme_table[self._theme_name].format(
_format_traceback_lines(
frame_info.lines,
theme_table[self._theme_name],
self.has_colors,
lvals_toks,
)
)
return result
def prepare_header(self, etype: str, long_version: bool = False) -> str:
width = min(75, get_terminal_size()[0])
if long_version:
# Header with the exception type, python version, and date
pyver = "Python " + sys.version.split()[0] + ": " + sys.executable
date = time.ctime(time.time())
theme = theme_table[self._theme_name]
head = theme.format(
[
(Token.Topline, theme.symbols["top_line"] * width),
(Token, "\n"),
(Token.ExcName, etype),
(Token, " " * (width - len(etype) - len(pyver))),
(Token, pyver),
(Token, "\n"),
(Token, date.rjust(width)),
]
)
head += (
"\nA problem occurred executing Python code. Here is the sequence of function"
"\ncalls leading up to the error, with the most recent (innermost) call last."
)
else:
# Simplified header
head = theme_table[self._theme_name].format(
[
(Token.ExcName, etype),
(
Token,
"Traceback (most recent call last)".rjust(width - len(etype)),
),
]
)
return head
def format_exception(self, etype, evalue):
# Get (safely) a string form of the exception info
try:
etype_str, evalue_str = map(str, (etype, evalue))
except:
# User exception is improperly defined.
etype, evalue = str, sys.exc_info()[:2]
etype_str, evalue_str = map(str, (etype, evalue))
# PEP-678 notes
notes = getattr(evalue, "__notes__", [])
if not isinstance(notes, Sequence) or isinstance(notes, (str, bytes)):
notes = [_safe_string(notes, "__notes__", func=repr)]
for note in notes:
assert isinstance(note, str)
str_notes: Sequence[str] = notes
# ... and format it
return [
theme_table[self._theme_name].format(
[(Token.ExcName, etype_str), (Token, ": "), (Token, evalue_str)]
),
*(
theme_table[self._theme_name].format([(Token, note)])
for note in str_notes
),
]
def format_exception_as_a_whole(
self,
etype: type,
evalue: Optional[BaseException],
etb: Optional[TracebackType],
context: int,
tb_offset: Optional[int],
) -> list[list[str]]:
"""Formats the header, traceback and exception message for a single exception.
This may be called multiple times by Python 3 exception chaining
(PEP 3134).
"""
# some locals
orig_etype = etype
try:
etype = etype.__name__ # type: ignore[assignment]
except AttributeError:
pass
tb_offset = self.tb_offset if tb_offset is None else tb_offset
assert isinstance(tb_offset, int)
head = self.prepare_header(str(etype), self.long_header)
records = self.get_records(etb, context, tb_offset) if etb else []
frames = []
skipped = 0
lastrecord = len(records) - 1
for i, record in enumerate(records):
if (
not isinstance(record._sd, stack_data.RepeatedFrames)
and self.skip_hidden
):
if (
record.frame.f_locals.get("__tracebackhide__", 0)
and i != lastrecord
):
skipped += 1
continue
if skipped:
frames.append(
theme_table[self._theme_name].format(
[
(Token, " "),
(Token.ExcName, "[... skipping hidden %s frame]" % skipped),
(Token, "\n"),
]
)
)
skipped = 0
frames.append(self.format_record(record))
if skipped:
frames.append(
theme_table[self._theme_name].format(
[
(Token, " "),
(Token.ExcName, "[... skipping hidden %s frame]" % skipped),
(Token, "\n"),
]
)
)
formatted_exception = self.format_exception(etype, evalue)
if records:
frame_info = records[-1]
ipinst = get_ipython()
if ipinst is not None:
ipinst.hooks.synchronize_with_editor(
frame_info.filename, frame_info.lineno, 0
)
return [[head] + frames + formatted_exception]
def get_records(self, etb: TracebackType, context: int, tb_offset: int) -> Any:
assert etb is not None
context = context - 1
after = context // 2
before = context - after
if self.has_colors:
base_style = theme_table[self._theme_name].as_pygments_style()
style = stack_data.style_with_executing_node(base_style, self.tb_highlight)
formatter = Terminal256Formatter(style=style)
else:
formatter = None
options = stack_data.Options(
before=before,
after=after,
pygments_formatter=formatter,
)
# Let's estimate the amount of code we will have to parse/highlight.
cf: Optional[TracebackType] = etb
max_len = 0
tbs = []
while cf is not None:
try:
mod = inspect.getmodule(cf.tb_frame)
if mod is not None:
mod_name = mod.__name__
root_name, *_ = mod_name.split(".")
if root_name == "IPython":
cf = cf.tb_next
continue
max_len = get_line_number_of_frame(cf.tb_frame)
except OSError:
max_len = 0
max_len = max(max_len, max_len)
tbs.append(cf)
cf = getattr(cf, "tb_next", None)
if max_len > FAST_THRESHOLD:
FIs: list[FrameInfo] = []
for tb in tbs:
frame = tb.tb_frame # type: ignore[union-attr]
lineno = frame.f_lineno
code = frame.f_code
filename = code.co_filename
# TODO: Here we need to use before/after/
FIs.append(
FrameInfo(
"Raw frame", filename, lineno, frame, code, context=context
)
)
return FIs
res = list(stack_data.FrameInfo.stack_data(etb, options=options))[tb_offset:]
res2 = [FrameInfo._from_stack_data_FrameInfo(r) for r in res]
return res2
def structured_traceback(
self,
etype: type,
evalue: Optional[BaseException],
etb: Optional[TracebackType] = None,
tb_offset: Optional[int] = None,
context: int = 5,
) -> list[str]:
"""Return a nice text document describing the traceback."""
formatted_exceptions: list[list[str]] = self.format_exception_as_a_whole(
etype, evalue, etb, context, tb_offset
)
termsize = min(75, get_terminal_size()[0])
theme = theme_table[self._theme_name]
head: str = theme.format(
[
(
Token.Topline,
theme.symbols["top_line"] * termsize,
),
]
)
structured_traceback_parts: list[str] = [head]
chained_exceptions_tb_offset = 0
lines_of_context = 3
exception = self.get_parts_of_chained_exception(evalue)
if exception:
assert evalue is not None
formatted_exceptions += self.prepare_chained_exception_message(
evalue.__cause__
)
etype, evalue, etb = exception
else:
evalue = None
chained_exc_ids = set()
while evalue:
formatted_exceptions += self.format_exception_as_a_whole(
etype, evalue, etb, lines_of_context, chained_exceptions_tb_offset
)
exception = self.get_parts_of_chained_exception(evalue)
if exception and id(exception[1]) not in chained_exc_ids:
chained_exc_ids.add(
id(exception[1])
) # trace exception to avoid infinite 'cause' loop
formatted_exceptions += self.prepare_chained_exception_message(
evalue.__cause__
)
etype, evalue, etb = exception
else:
evalue = None
# we want to see exceptions in a reversed order:
# the first exception should be on top
for fx in reversed(formatted_exceptions):
structured_traceback_parts += fx
return structured_traceback_parts
def debugger(self, force: bool = False) -> None:
"""Call up the pdb debugger if desired, always clean up the tb
reference.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
If the call_pdb flag is set, the pdb interactive debugger is
invoked. In all cases, the self.tb reference to the current traceback
is deleted to prevent lingering references which hamper memory
management.
Note that each call to pdb() does an 'import readline', so if your app
requires a special setup for the readline completers, you'll have to
fix that by hand after invoking the exception handler."""
if force or self.call_pdb:
if self.pdb is None:
self.pdb = self.debugger_cls()
# the system displayhook may have changed, restore the original
# for pdb
display_trap = DisplayTrap(hook=sys.__displayhook__)
with display_trap:
self.pdb.reset()
# Find the right frame so we don't pop up inside ipython itself
if hasattr(self, "tb") and self.tb is not None: # type: ignore[has-type]
etb = self.tb # type: ignore[has-type]
else:
etb = self.tb = sys.last_traceback
while self.tb is not None and self.tb.tb_next is not None:
assert self.tb.tb_next is not None
self.tb = self.tb.tb_next
if etb and etb.tb_next:
etb = etb.tb_next
self.pdb.botframe = etb.tb_frame
# last_value should be deprecated, but last-exc sometimme not set
# please check why later and remove the getattr.
exc = (
sys.last_value
if sys.version_info < (3, 12)
else getattr(sys, "last_exc", sys.last_value)
) # type: ignore[attr-defined]
if exc:
self.pdb.interaction(None, exc)
else:
self.pdb.interaction(None, etb)
if hasattr(self, "tb"):
del self.tb
def handler(self, info=None):
(etype, evalue, etb) = info or sys.exc_info()
self.tb = etb
ostream = self.ostream
ostream.flush()
ostream.write(self.text(etype, evalue, etb)) # type:ignore[arg-type]
ostream.write("\n")
ostream.flush()
# Changed so an instance can just be called as VerboseTB_inst() and print
# out the right info on its own.
def __call__(self, etype=None, evalue=None, etb=None):
"""This hook can replace sys.excepthook (for Python 2.1 or higher)."""
if etb is None:
self.handler()
else:
self.handler((etype, evalue, etb))
try:
self.debugger()
except KeyboardInterrupt:
print("\nKeyboardInterrupt")
# ----------------------------------------------------------------------------
| VerboseTB |
python | numba__numba | numba/tests/test_ufuncs.py | {
"start": 69898,
"end": 71073
} | class ____(TestCase):
def test_cpu_get_ufunc_info(self):
# The CPU context defines get_ufunc_info that is the same as
# ufunc_db.get_ufunc_info.
targetctx = cpu_target.target_context
# Check: get_ufunc_info returns a dict
add_info = targetctx.get_ufunc_info(np.add)
self.assertIsInstance(add_info, dict)
# Check: it is the same as ufunc_db.get_ufunc_info
expected = ufunc_db.get_ufunc_info(np.add)
self.assertEqual(add_info, expected)
# Check: KeyError raised on bad key
badkey = object()
with self.assertRaises(KeyError) as raises:
ufunc_db.get_ufunc_info(badkey)
self.assertEqual(raises.exception.args, (badkey,))
def test_base_get_ufunc_info(self):
# The BaseContext always raises NotImplementedError
targetctx = BaseContext(cpu_target.typing_context, 'cpu')
with self.assertRaises(NotImplementedError) as raises:
targetctx.get_ufunc_info(np.add)
self.assertRegex(
str(raises.exception),
r"<numba\..*\.BaseContext object at .*> does not support ufunc",
)
| TestUfuncOnContext |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/appflow.py | {
"start": 11167,
"end": 13551
} | class ____(AppflowBaseOperator):
"""
Execute an AppFlow run after updating the filters to select only future data.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AppflowRunAfterOperator`
:param source: The source name (Supported: salesforce, zendesk)
:param flow_name: The flow name
:param source_field: The field name to apply filters
:param filter_date: The date value (or template) to be used in filters.
:param poll_interval: how often in seconds to check the query status
:param wait_for_completion: whether to wait for the run to end to return
"""
def __init__(
self,
source: str,
flow_name: str,
source_field: str,
filter_date: str,
poll_interval: int = 20,
wait_for_completion: bool = True,
**kwargs,
) -> None:
if not filter_date:
raise ValueError(MANDATORY_FILTER_DATE_MSG.format(entity="AppflowRunAfterOperator"))
if source not in {"salesforce", "zendesk"}:
raise ValueError(NOT_SUPPORTED_SOURCE_MSG.format(source=source, entity="AppflowRunAfterOperator"))
super().__init__(
source=source,
flow_name=flow_name,
flow_update=True,
source_field=source_field,
filter_date=filter_date,
poll_interval=poll_interval,
wait_for_completion=wait_for_completion,
**kwargs,
)
def _update_flow(self) -> None:
if not self.filter_date_parsed:
raise ValueError(f"Invalid filter_date argument parser value: {self.filter_date_parsed}")
if not self.source_field:
raise ValueError(f"Invalid source_field argument value: {self.source_field}")
filter_task: TaskTypeDef = {
"taskType": "Filter",
"connectorOperator": {self.connector_type: "GREATER_THAN"}, # type: ignore
"sourceFields": [self.source_field],
"taskProperties": {
"DATA_TYPE": "datetime",
"VALUE": str(datetime_to_epoch_ms(self.filter_date_parsed)),
}, # NOT inclusive
}
self.hook.update_flow_filter(
flow_name=self.flow_name, filter_tasks=[filter_task], set_trigger_ondemand=True
)
| AppflowRunAfterOperator |
python | pytorch__pytorch | test/functorch/test_ac_logging.py | {
"start": 438,
"end": 6506
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.graph: MagicMock = MagicMock(spec=Graph)
self.node1: MagicMock = MagicMock(spec=Node)
self.node2: MagicMock = MagicMock(spec=Node)
self.node1.name = "node1"
self.node1.target = "target1"
self.node1.meta = {
"tensor_meta": MagicMock(shape=(2, 2)),
"stack_trace": "trace1",
}
self.node1.all_input_nodes = []
self.node2.name = "node2"
self.node2.target = "target2"
self.node2.meta = {"tensor_meta": None, "stack_trace": "trace2"}
self.node2.all_input_nodes = [self.node1]
self.graph.nodes = [self.node1, self.node2]
self.all_recomputable_banned_nodes: list[Node] = [self.node1]
self.saved_node_idxs: list[int] = [0]
self.recomputable_node_idxs: list[int] = []
self.expected_runtime: int = 100
self.memories_banned_nodes: list[int] = [50]
self.normalized_memories_banned_nodes: list[float] = [0.10344827586206896]
self.runtimes_banned_nodes: list[int] = [10]
self.min_cut_saved_values: list[Node] = [self.node1]
def test_create_joint_graph_node_information(self) -> None:
recomputable_node_info: dict[str, int] = {"node1": 0}
expected_output: dict[str, dict] = {
"node1": {
"index": 0,
"name": "node1",
"is_recomputable_candidate": True,
"target": "target1",
"shape": "(2, 2)",
"input_arguments": [],
"stack_trace": "trace1",
"recomputable_candidate_info": {"recomputable_node_idx": 0},
},
"node2": {
"index": 1,
"name": "node2",
"is_recomputable_candidate": False,
"target": "target2",
"shape": "[]",
"input_arguments": ["node1"],
"stack_trace": "trace2",
},
}
result = create_joint_graph_node_information(self.graph, recomputable_node_info)
self.assertEqual(result, expected_output)
def test_create_joint_graph_edges(self) -> None:
expected_edges: list[tuple[str, str]] = [("node1", "node2")]
result = create_joint_graph_edges(self.graph)
self.assertEqual(result, expected_edges)
def test_create_activation_checkpointing_logging_structure_payload(self) -> None:
input_joint_graph_node_information: dict[str, dict] = {
"node1": {
"index": 0,
"name": "node1",
"is_recomputable_candidate": True,
"target": "target1",
"shape": "(2, 2)",
"input_arguments": [],
"stack_trace": "trace1",
"recomputable_candidate_info": {"recomputable_node_idx": 0},
}
}
joint_graph_edges: list[tuple[str, str]] = [("node1", "node2")]
expected_payload: dict[str, any] = {
"Joint Graph Size": 2,
"Joint Graph Edges": {"Total": 1, "Edges": joint_graph_edges},
"Joint Graph Node Information": input_joint_graph_node_information,
"Recomputable Banned Nodes Order": ["node1"],
"Expected Runtime": self.expected_runtime,
"Knapsack Saved Nodes": self.saved_node_idxs,
"Knapsack Recomputed Nodes": self.recomputable_node_idxs,
"Knapsack Input Memories": self.normalized_memories_banned_nodes,
"Absolute Memories": self.memories_banned_nodes,
"Knapsack Input Runtimes": self.runtimes_banned_nodes,
"Min Cut Solution Saved Values": ["node1"],
}
result = create_activation_checkpointing_logging_structure_payload(
joint_graph=self.graph,
joint_graph_node_information=input_joint_graph_node_information,
joint_graph_edges=joint_graph_edges,
all_recomputable_banned_nodes=self.all_recomputable_banned_nodes,
expected_runtime=self.expected_runtime,
saved_node_idxs=self.saved_node_idxs,
recomputable_node_idxs=self.recomputable_node_idxs,
memories_banned_nodes=self.memories_banned_nodes,
normalized_memories_banned_nodes=self.normalized_memories_banned_nodes,
runtimes_banned_nodes=self.runtimes_banned_nodes,
min_cut_saved_values=self.min_cut_saved_values,
)
self.assertEqual(result, expected_payload)
@patch(
"torch._functorch._activation_checkpointing.ac_logging_utils.trace_structured"
)
@patch("json.dumps", return_value="mocked_payload")
def test_create_structured_trace_for_min_cut_info(
self, mock_json_dumps: MagicMock, mock_trace_structured: MagicMock
) -> None:
create_structured_trace_for_min_cut_info(
joint_graph=self.graph,
all_recomputable_banned_nodes=self.all_recomputable_banned_nodes,
saved_node_idxs=self.saved_node_idxs,
recomputable_node_idxs=self.recomputable_node_idxs,
expected_runtime=self.expected_runtime,
memories_banned_nodes=self.memories_banned_nodes,
normalized_memories_banned_nodes=self.normalized_memories_banned_nodes,
runtimes_banned_nodes=self.runtimes_banned_nodes,
min_cut_saved_values=self.min_cut_saved_values,
)
self.assertEqual(mock_trace_structured.call_count, 1)
metadata_fn_result = mock_trace_structured.call_args[1]["metadata_fn"]()
payload_fn_result = mock_trace_structured.call_args[1]["payload_fn"]()
self.assertEqual(
metadata_fn_result,
{
"name": "min_cut_information",
"encoding": "json",
},
)
self.assertEqual(payload_fn_result, "mocked_payload")
mock_json_dumps.assert_called_once()
if __name__ == "__main__":
run_tests()
| TestAcLogging |
python | huggingface__transformers | src/transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py | {
"start": 23488,
"end": 29839
} | class ____(KyutaiSpeechToTextAttention):
"""
KyutaiSpeechToText flash attention module. This module inherits from `KyutaiSpeechToTextAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
flash attention and deal with padding tokens in case the input contains any of them.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask()
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
if isinstance(past_key_values, StaticCache):
raise ValueError(
"`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
"make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
)
output_attentions = False
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states, cache_position) # Ignore copy
key_states = self.k_proj(hidden_states, cache_position) # Ignore copy
value_states = self.v_proj(hidden_states, cache_position) # Ignore copy
# Flash attention requires the input to have the shape
# batch_size x seq_length x head_dim x hidden_dim
# therefore we just need to keep the original shape
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
if self.rotary_emb is not None: # Ignore copy
cos, sin = self.rotary_emb(value_states, position_ids) # Ignore copy
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) # Ignore copy
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = (
{"sin": sin, "cos": cos, "cache_position": cache_position}
if self.rotary_emb is not None
else {"cache_position": cache_position}
) # Ignore copy
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
# TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
# to be able to avoid many of these transpose/reshape/view.
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
dropout_rate = self.attention_dropout if self.training else 0.0
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in the correct dtype just to be sure everything works as expected.
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
# in fp32. (KyutaiSpeechToTextRMSNorm handles it correctly)
input_dtype = query_states.dtype
device_type = query_states.device.type if query_states.device.type != "mps" else "cpu"
if input_dtype == torch.float32:
if torch.is_autocast_enabled():
# NOTE: `torch.get_autocast_dtype` is there starting from PyTorch 2.4
target_dtype = (
torch.get_autocast_dtype(device_type)
if hasattr(torch, "get_autocast_dtype")
else torch.get_autocast_gpu_dtype()
)
# Handle the case where the model is quantized
elif hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.q_proj.weight.dtype
logger.warning_once(
f"The input hidden states seems to be silently casted in float32, this might be related to"
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
f" {target_dtype}."
)
query_states = query_states.to(target_dtype)
key_states = key_states.to(target_dtype)
value_states = value_states.to(target_dtype)
attn_output = _flash_attention_forward(
query_states,
key_states,
value_states,
attention_mask,
q_len,
position_ids=position_ids,
dropout=dropout_rate,
sliding_window=getattr(self, "sliding_window", None),
is_causal=self.is_causal,
use_top_left_mask=self._flash_attn_uses_top_left_mask,
)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output, cache_position) # Ignore copy
if not output_attentions:
attn_weights = None
return attn_output, attn_weights
| KyutaiSpeechToTextFlashAttention2 |
python | wandb__wandb | wandb/vendor/pygments/lexers/robotframework.py | {
"start": 10131,
"end": 10198
} | class ____(Tokenizer):
_tokens = (ARGUMENT,)
| TemplatedKeywordCall |
python | huggingface__transformers | src/transformers/models/chameleon/modeling_chameleon.py | {
"start": 26544,
"end": 28247
} | class ____(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
def forward(self, hidden_states):
residual = hidden_states
hidden_states = self.norm(hidden_states)
query_states = self.q(hidden_states)
key_states = self.k(hidden_states)
value_states = self.v(hidden_states)
# compute attention
batch_size, channels, height, width = query_states.shape
query_states = query_states.reshape(batch_size, channels, height * width).permute(0, 2, 1)
key_states = key_states.reshape(batch_size, channels, height * width)
attn_weights = torch.bmm(query_states, key_states)
attn_weights = attn_weights * (int(channels) ** (-0.5))
attn_weights = F.softmax(attn_weights, dim=2)
# attend to values
value_states = value_states.reshape(batch_size, channels, height * width)
attn_weights = attn_weights.permute(0, 2, 1)
attn_output = torch.bmm(value_states, attn_weights).reshape(batch_size, channels, height, width)
attn_output = self.proj_out(attn_output)
return residual + attn_output
| ChameleonVQVAEEncoderAttnBlock |
python | python-attrs__attrs | tests/test_cmp.py | {
"start": 1304,
"end": 8878
} | class ____:
"""
Tests for eq and order related methods.
"""
#########
# eq
#########
@pytest.mark.parametrize(
("cls", "requires_same_type"), cmp_data, ids=cmp_ids
)
def test_equal_same_type(self, cls, requires_same_type):
"""
Equal objects are detected as equal.
"""
assert cls(1) == cls(1)
assert not (cls(1) != cls(1))
@pytest.mark.parametrize(
("cls", "requires_same_type"), cmp_data, ids=cmp_ids
)
def test_unequal_same_type(self, cls, requires_same_type):
"""
Unequal objects of correct type are detected as unequal.
"""
assert cls(1) != cls(2)
assert not (cls(1) == cls(2))
@pytest.mark.parametrize(
("cls", "requires_same_type"), cmp_data, ids=cmp_ids
)
def test_equal_different_type(self, cls, requires_same_type):
"""
Equal values of different types are detected appropriately.
"""
assert (cls(1) == cls(1.0)) == (not requires_same_type)
assert not (cls(1) != cls(1.0)) == (not requires_same_type)
#########
# lt
#########
@pytest.mark.parametrize(
("cls", "requires_same_type"), eq_data, ids=eq_ids
)
def test_lt_unorderable(self, cls, requires_same_type):
"""
TypeError is raised if class does not implement __lt__.
"""
with pytest.raises(TypeError):
cls(1) < cls(2)
@pytest.mark.parametrize(
("cls", "requires_same_type"), order_data, ids=order_ids
)
def test_lt_same_type(self, cls, requires_same_type):
"""
Less-than objects are detected appropriately.
"""
assert cls(1) < cls(2)
assert not (cls(2) < cls(1))
@pytest.mark.parametrize(
("cls", "requires_same_type"), order_data, ids=order_ids
)
def test_not_lt_same_type(self, cls, requires_same_type):
"""
Not less-than objects are detected appropriately.
"""
assert cls(2) >= cls(1)
assert not (cls(1) >= cls(2))
@pytest.mark.parametrize(
("cls", "requires_same_type"), order_data, ids=order_ids
)
def test_lt_different_type(self, cls, requires_same_type):
"""
Less-than values of different types are detected appropriately.
"""
if requires_same_type:
# Unlike __eq__, NotImplemented will cause an exception to be
# raised from __lt__.
with pytest.raises(TypeError):
cls(1) < cls(2.0)
else:
assert cls(1) < cls(2.0)
assert not (cls(2) < cls(1.0))
#########
# le
#########
@pytest.mark.parametrize(
("cls", "requires_same_type"), eq_data, ids=eq_ids
)
def test_le_unorderable(self, cls, requires_same_type):
"""
TypeError is raised if class does not implement __le__.
"""
with pytest.raises(TypeError):
cls(1) <= cls(2)
@pytest.mark.parametrize(
("cls", "requires_same_type"), order_data, ids=order_ids
)
def test_le_same_type(self, cls, requires_same_type):
"""
Less-than-or-equal objects are detected appropriately.
"""
assert cls(1) <= cls(1)
assert cls(1) <= cls(2)
assert not (cls(2) <= cls(1))
@pytest.mark.parametrize(
("cls", "requires_same_type"), order_data, ids=order_ids
)
def test_not_le_same_type(self, cls, requires_same_type):
"""
Not less-than-or-equal objects are detected appropriately.
"""
assert cls(2) > cls(1)
assert not (cls(1) > cls(1))
assert not (cls(1) > cls(2))
@pytest.mark.parametrize(
("cls", "requires_same_type"), order_data, ids=order_ids
)
def test_le_different_type(self, cls, requires_same_type):
"""
Less-than-or-equal values of diff. types are detected appropriately.
"""
if requires_same_type:
# Unlike __eq__, NotImplemented will cause an exception to be
# raised from __le__.
with pytest.raises(TypeError):
cls(1) <= cls(2.0)
else:
assert cls(1) <= cls(2.0)
assert cls(1) <= cls(1.0)
assert not (cls(2) <= cls(1.0))
#########
# gt
#########
@pytest.mark.parametrize(
("cls", "requires_same_type"), eq_data, ids=eq_ids
)
def test_gt_unorderable(self, cls, requires_same_type):
"""
TypeError is raised if class does not implement __gt__.
"""
with pytest.raises(TypeError):
cls(2) > cls(1)
@pytest.mark.parametrize(
("cls", "requires_same_type"), order_data, ids=order_ids
)
def test_gt_same_type(self, cls, requires_same_type):
"""
Greater-than objects are detected appropriately.
"""
assert cls(2) > cls(1)
assert not (cls(1) > cls(2))
@pytest.mark.parametrize(
("cls", "requires_same_type"), order_data, ids=order_ids
)
def test_not_gt_same_type(self, cls, requires_same_type):
"""
Not greater-than objects are detected appropriately.
"""
assert cls(1) <= cls(2)
assert not (cls(2) <= cls(1))
@pytest.mark.parametrize(
("cls", "requires_same_type"), order_data, ids=order_ids
)
def test_gt_different_type(self, cls, requires_same_type):
"""
Greater-than values of different types are detected appropriately.
"""
if requires_same_type:
# Unlike __eq__, NotImplemented will cause an exception to be
# raised from __gt__.
with pytest.raises(TypeError):
cls(2) > cls(1.0)
else:
assert cls(2) > cls(1.0)
assert not (cls(1) > cls(2.0))
#########
# ge
#########
@pytest.mark.parametrize(
("cls", "requires_same_type"), eq_data, ids=eq_ids
)
def test_ge_unorderable(self, cls, requires_same_type):
"""
TypeError is raised if class does not implement __ge__.
"""
with pytest.raises(TypeError):
cls(2) >= cls(1)
@pytest.mark.parametrize(
("cls", "requires_same_type"), order_data, ids=order_ids
)
def test_ge_same_type(self, cls, requires_same_type):
"""
Greater-than-or-equal objects are detected appropriately.
"""
assert cls(1) >= cls(1)
assert cls(2) >= cls(1)
assert not (cls(1) >= cls(2))
@pytest.mark.parametrize(
("cls", "requires_same_type"), order_data, ids=order_ids
)
def test_not_ge_same_type(self, cls, requires_same_type):
"""
Not greater-than-or-equal objects are detected appropriately.
"""
assert cls(1) < cls(2)
assert not (cls(1) < cls(1))
assert not (cls(2) < cls(1))
@pytest.mark.parametrize(
("cls", "requires_same_type"), order_data, ids=order_ids
)
def test_ge_different_type(self, cls, requires_same_type):
"""
Greater-than-or-equal values of diff. types are detected appropriately.
"""
if requires_same_type:
# Unlike __eq__, NotImplemented will cause an exception to be
# raised from __ge__.
with pytest.raises(TypeError):
cls(2) >= cls(1.0)
else:
assert cls(2) >= cls(2.0)
assert cls(2) >= cls(1.0)
assert not (cls(1) >= cls(2.0))
| TestEqOrder |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/fmt_skip/type_params.py | {
"start": 226,
"end": 616
} | class ____[
T
] ( # trailing arguments open parenthesis comment
# leading argument comment
A # trailing argument comment
# trailing argument own line comment
): # fmt: skip
pass
def test [
# comment
A,
# another
B,
] (): # fmt: skip
...
def test [
# comment
A,
# another
B,
] () -> str: # fmt: skip
...
| TestTrailingComment4 |
python | ray-project__ray | python/ray/_private/state_api_test_utils.py | {
"start": 953,
"end": 1079
} | class ____:
api: Callable
verify_cb: Callable
kwargs: Dict = field(default_factory=dict)
@dataclass
| StateAPICallSpec |
python | google__flatbuffers | tests/service_test_grpc.fb.py | {
"start": 1364,
"end": 3331
} | class ____(object):
"""Interface exported by the server."""
def Hello(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamClient(self, request_iterator, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamServer(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Stream(self, request_iterator, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_HelloServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Hello': grpc.unary_unary_rpc_method_handler(
servicer.Hello,
request_deserializer=HelloRequest.GetRootAs,
response_serializer=_serialize_to_bytes,
),
'StreamClient': grpc.stream_unary_rpc_method_handler(
servicer.StreamClient,
request_deserializer=HelloRequest.GetRootAs,
response_serializer=_serialize_to_bytes,
),
'StreamServer': grpc.unary_stream_rpc_method_handler(
servicer.StreamServer,
request_deserializer=HelloRequest.GetRootAs,
response_serializer=_serialize_to_bytes,
),
'Stream': grpc.stream_stream_rpc_method_handler(
servicer.Stream,
request_deserializer=HelloRequest.GetRootAs,
response_serializer=_serialize_to_bytes,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'example.HelloService', rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| HelloServiceServicer |
python | huggingface__transformers | src/transformers/models/parakeet/processing_parakeet.py | {
"start": 1321,
"end": 3175
} | class ____(ProcessorMixin):
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
def __call__(
self,
audio: AudioInput,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput], None] = None,
sampling_rate: Optional[int] = None,
**kwargs: Unpack[ParakeetProcessorKwargs],
):
audio = make_list_of_audio(audio)
output_kwargs = self._merge_kwargs(
ParakeetProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if sampling_rate is None:
logger.warning_once(
f"You've provided audio without specifying the sampling rate. It will be assumed to be {output_kwargs['audio_kwargs']['sampling_rate']}, which can result in silent errors."
)
elif sampling_rate != output_kwargs["audio_kwargs"]["sampling_rate"]:
raise ValueError(
f"The sampling rate of the audio ({sampling_rate}) does not match the sampling rate of the processor ({output_kwargs['audio_kwargs']['sampling_rate']}). Please provide resampled the audio to the expected sampling rate."
)
if audio is not None:
inputs = self.feature_extractor(audio, **output_kwargs["audio_kwargs"])
if text is not None:
encodings = self.tokenizer(text, **output_kwargs["text_kwargs"])
if text is None:
return inputs
else:
inputs["labels"] = encodings["input_ids"]
return inputs
@property
def model_input_names(self):
feature_extractor_input_names = self.feature_extractor.model_input_names
return feature_extractor_input_names + ["labels"]
__all__ = ["ParakeetProcessor"]
| ParakeetProcessor |
python | kamyu104__LeetCode-Solutions | Python/closest-prime-numbers-in-range.py | {
"start": 232,
"end": 1843
} | class ____(object):
def __init__(self, N, build_fn, query_fn):
self.tree = [None]*(2*2**((N-1).bit_length()))
self.base = len(self.tree)//2
self.query_fn = query_fn
for i in xrange(self.base, self.base+N):
self.tree[i] = build_fn(i-self.base)
for i in reversed(xrange(1, self.base)):
self.tree[i] = query_fn(self.tree[2*i], self.tree[2*i+1])
def query(self, L, R):
L += self.base
R += self.base
left = right = None
while L <= R:
if L & 1:
left = self.query_fn(left, self.tree[L])
L += 1
if R & 1 == 0:
right = self.query_fn(self.tree[R], right)
R -= 1
L //= 2
R //= 2
return self.query_fn(left, right)
# number theory, segment tree
def linear_sieve_of_eratosthenes(n):
primes = []
spf = [-1]*(n+1) # the smallest prime factor
for i in xrange(2, n+1):
if spf[i] == -1:
spf[i] = i
primes.append(i)
for p in primes:
if i*p > n or p > spf[i]:
break
spf[i*p] = p
return primes # len(primes) = O(n/(logn-1)), reference: https://math.stackexchange.com/questions/264544/how-to-find-number-of-prime-numbers-up-to-to-n
MAX_N = 10**6
PRIMES = linear_sieve_of_eratosthenes(MAX_N)
ST = SegmentTree(len(PRIMES)-1,
build_fn=lambda i: [PRIMES[i+1]-PRIMES[i], [PRIMES[i], PRIMES[i+1]]],
query_fn=lambda x, y: y if x is None else x if y is None else min(x, y))
| SegmentTree |
python | doocs__leetcode | lcof2/剑指 Offer II 024. 反转链表/Solution.py | {
"start": 136,
"end": 357
} | class ____:
def reverseList(self, head: ListNode) -> ListNode:
pre, p = None, head
while p:
q = p.next
p.next = pre
pre = p
p = q
return pre
| Solution |
python | getsentry__sentry | src/sentry/pipeline/types.py | {
"start": 236,
"end": 472
} | class ____[M: Model, S: PipelineSessionStore]:
"""Initial pipeline attributes from a request."""
state: S
provider_model: M | None
organization: RpcOrganization | None
provider_key: str
@dataclass
| PipelineRequestState |
python | pytorch__pytorch | torch/optim/optimizer.py | {
"start": 1526,
"end": 13661
} | class ____:
"""Singleton class representing a required parameter for an Optimizer."""
def __repr__(self) -> str:
return "<required parameter>"
required = _RequiredParameter()
def _use_grad_for_differentiable(func: Callable[_P, _T]) -> Callable[_P, _T]:
def _use_grad(*args: _P.args, **kwargs: _P.kwargs) -> _T:
import torch._dynamo
# pyrefly: ignore [unsupported-operation]
self = cast(Optimizer, args[0]) # assume first positional arg is `self`
prev_grad = torch.is_grad_enabled()
try:
# Note on graph break below:
# we need to graph break to ensure that aot respects the no_grad annotation.
# This is important for perf because without this, functionalization will generate an epilogue
# which updates the mutated parameters of the optimizer which is *not* visible to inductor, as a result,
# inductor will allocate for every parameter in the model, which is horrible.
# With this, aot correctly sees that this is an inference graph, and functionalization will generate
# an epilogue which is appended to the graph, which *is* visible to inductor, as a result, inductor sees that
# step is in place and is able to avoid the extra allocation.
# In the future, we will either 1) continue to graph break on backward, so this graph break does not matter
# or 2) have a fully fused forward and backward graph, which will have no_grad by default, and we can remove this
# graph break to allow the fully fused fwd-bwd-optimizer graph to be compiled.
# see https://github.com/pytorch/pytorch/issues/104053
torch.set_grad_enabled(self.defaults["differentiable"])
torch._dynamo.graph_break()
ret = func(*args, **kwargs)
finally:
torch._dynamo.graph_break()
torch.set_grad_enabled(prev_grad)
return ret
functools.update_wrapper(_use_grad, func)
return _use_grad
def _get_value(x):
# item is significantly faster than a cpu tensor in eager mode
if not torch.jit.is_scripting() and torch.compiler.is_compiling():
return x
else:
return x.item() if isinstance(x, torch.Tensor) else x
def _stack_if_compiling(x):
if not torch.jit.is_scripting() and torch.compiler.is_compiling():
return torch.stack(x)
else:
return x
def _disable_dynamo_if_unsupported(
single_tensor_fn: Optional[Callable[..., object]] = None,
) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:
# workaround for torchscript BC
# it requires all called functions to be in the
# global environment at the site at which the
# maybe_fallback closure is created
if single_tensor_fn:
globals()[single_tensor_fn.__name__] = single_tensor_fn
def wrapper(func: Callable[_P, _T]) -> Callable[_P, _T]:
import inspect
disabled_func = torch._disable_dynamo(func)
ps = inspect.signature(func).parameters
has_state_steps = True
try:
state_steps_ind = list(ps.keys()).index("state_steps")
except ValueError:
has_state_steps = False
# Today, there are cases where we stack state steps
# and pass them as the value arg of foreach ops.
# Having state steps on cuda as the value arg is not supported in eager,
# but this only occurs in the rare case that the user explicitly deletes
# the capturable flag. If capturable=True, this is not a problem.
@functools.wraps(func)
def maybe_fallback(*args: _P.args, **kwargs: _P.kwargs):
if torch.compiler.is_compiling() and (
not kwargs.get("capturable", False)
and has_state_steps
# pyrefly: ignore [unsupported-operation]
and (arg := args[state_steps_ind])
and isinstance(arg, Sequence)
and arg[0].is_cuda
or (
"state_steps" in kwargs
# pyrefly: ignore [unsupported-operation]
and (kwarg := kwargs["state_steps"])
and isinstance(kwarg, Sequence)
and kwarg[0].is_cuda
)
):
return disabled_func(*args, **kwargs)
else:
return func(*args, **kwargs)
return maybe_fallback
return wrapper
# For any optimizer with a faster implementation, we attempt to default to the
# fastest + stablest whenever possible. For foreach, the requirements are to have
# native params all on CUDA. For fused, there's currently the additional requirement
# that the tensors' dtypes must be floating point. Neither alternative supports
# torch.jit.script nor differentiable, so we fall back to the single tensor
# implementation in those cases.
def _default_to_fused_or_foreach(
params: list[torch.Tensor], differentiable: bool, use_fused: bool = False
) -> tuple[bool, bool]:
if torch.jit.is_scripting() or differentiable:
return False, False
fused_supported_devices = _get_fused_kernels_supported_devices()
foreach_supported_devices = _get_foreach_kernels_supported_devices()
fused = use_fused and all(
p is None
or (
type(p) in _foreach_supported_types
and p.device.type in fused_supported_devices
and torch.is_floating_point(p)
)
for p in params
)
foreach = not fused and all(
p is None
or (
type(p) in _foreach_supported_types
and p.device.type in foreach_supported_devices
)
for p in params
)
return fused, foreach
def _device_dtype_check_for_fused(
p: torch.Tensor, cuda_unsupported: bool = False
) -> None:
fused_supported_devices = _get_fused_kernels_supported_devices()
if cuda_unsupported:
fused_supported_devices.remove("cuda")
if not (p.device.type in fused_supported_devices and torch.is_floating_point(p)):
raise RuntimeError(
"`fused=True` requires all the params to be floating point Tensors of "
f"supported devices: {fused_supported_devices} but {p.dtype} and {p.device.type}"
)
def _view_as_real(params, *state_and_grads) -> None:
for i, p in enumerate(params):
if torch.is_complex(p):
params[i] = torch.view_as_real(params[i])
for s in state_and_grads:
s[i] = torch.view_as_real(s[i])
def _get_scalar_dtype(is_fused=None):
if is_fused:
return torch.float32
return (
torch.float64 if torch.get_default_dtype() == torch.float64 else torch.float32
)
def _get_capturable_supported_devices(supports_xla: bool = True) -> list[str]:
r"""Return the device type list that supports capturable optimizer."""
capturable_supported_devices = ["cuda", "xpu", "hpu"]
if not torch.jit.is_scripting():
capturable_supported_devices.append(torch._C._get_privateuse1_backend_name())
if supports_xla:
capturable_supported_devices.append("xla")
return capturable_supported_devices
def _to_scalar(x: Union[float, torch.Tensor]):
r"""This function converts a hyperparameter to a 0-dimension (scalar) tensor
if it is a nonzero-dimensions 1-element tensor. If it is not a tensor, it is
kept as is.
Args:
x (float or Tensor): A hyperparameter of the optimizer.
If it is Tensor, it is needed to be 1-element.
Returns:
float or Tensor:
a scalar tensor if x is Tensor otherwise Python scalar (float) value.
"""
if isinstance(x, torch.Tensor) and x.dim() != 0:
return x.squeeze()
else:
return x
# Common doc strings among optimizers
_params_doc = r"""params (iterable): iterable of parameters or named_parameters to optimize
or iterable of dicts defining parameter groups. When using named_parameters,
all parameters in all groups should be named"""
_foreach_doc = r"""foreach (bool, optional): whether foreach implementation of optimizer
is used. If unspecified by the user (so foreach is None), we will try to use
foreach over the for-loop implementation on CUDA, since it is usually
significantly more performant. Note that the foreach implementation uses
~ sizeof(params) more peak memory than the for-loop version due to the intermediates
being a tensorlist vs just one tensor. If memory is prohibitive, batch fewer
parameters through the optimizer at a time or switch this flag to False (default: None)"""
_fused_doc = r"""fused (bool, optional): whether the fused implementation is used.
Currently, `torch.float64`, `torch.float32`, `torch.float16`, and `torch.bfloat16`
are supported. (default: None)
.. note:: The foreach and fused implementations are typically faster than the for-loop,
single-tensor implementation, with fused being theoretically fastest with both
vertical and horizontal fusion. As such, if the user has not specified either
flag (i.e., when foreach = fused = None), we will attempt defaulting to the foreach
implementation when the tensors are all on CUDA. Why not fused? Since the fused
implementation is relatively new, we want to give it sufficient bake-in time.
To specify fused, pass True for fused. To force running the for-loop
implementation, pass False for either foreach or fused. """
_capturable_doc = r"""capturable (bool, optional): whether this instance is safe to
capture in a graph, whether for CUDA graphs or for torch.compile support.
Tensors are only capturable when on supported :ref:`accelerators<accelerators>`.
Passing True can impair ungraphed performance, so if you don't intend to graph
capture this instance, leave it False (default: False)"""
_differentiable_doc = r"""differentiable (bool, optional): whether autograd should
occur through the optimizer step in training. Otherwise, the step()
function runs in a torch.no_grad() context. Setting to True can impair
performance, so leave it False if you don't intend to run autograd
through this instance (default: False)"""
_maximize_doc = r"""maximize (bool, optional): maximize the objective with respect to the
params, instead of minimizing (default: False)"""
def register_optimizer_step_pre_hook(hook: GlobalOptimizerPreHook) -> RemovableHandle:
r"""Register a pre hook common to all optimizers.
The hook should have the following signature::
hook(optimizer, args, kwargs) -> None or modified args and kwargs
Args:
hook (Callable): A user defined hook which is registered on all optimizers.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(_global_optimizer_pre_hooks)
_global_optimizer_pre_hooks[handle.id] = hook
return handle
def register_optimizer_step_post_hook(hook: GlobalOptimizerPostHook) -> RemovableHandle:
r"""Register a post hook common to all optimizers.
The hook should have the following signature::
hook(optimizer, args, kwargs) -> None
Args:
hook (Callable): A user defined hook which is registered on all optimizers.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
a handle that can be used to remove the added hook by calling
``handle.remove()``
"""
handle = hooks.RemovableHandle(_global_optimizer_post_hooks)
_global_optimizer_post_hooks[handle.id] = hook
return handle
ParamsT: TypeAlias = Union[
Iterable[torch.Tensor], Iterable[dict[str, Any]], Iterable[tuple[str, torch.Tensor]]
]
R = TypeVar("R")
T = TypeVar("T")
| _RequiredParameter |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_superfences.py | {
"start": 50068,
"end": 54280
} | class ____(util.MdCase):
"""Test relaxed header cases."""
extension = ['pymdownx.highlight', 'pymdownx.superfences', 'attr_list']
extension_configs = {
'pymdownx.superfences': {
'relaxed_headers': True,
'custom_fences': [
{
'name': 'test',
'class': 'test',
'format': custom_format,
'validator': custom_validator_except
},
{
'name': 'test2',
'class': 'test',
'format': custom_format,
'validator': custom_validator_exploder
}
]
}
}
def test_custom_fail_exception(self):
"""Test custom fences forced exception."""
with self.assertRaises(SuperFencesException):
self.check_markdown(
r'''
```test [1]
test
```
''',
'',
True
)
def test_custom_fail_exception_relaxed(self):
"""Test custom fences relaxed forced exception."""
self.check_markdown(
r'''
```test2 [1]
test
```
''',
'''
<div class="highlight"><pre><span></span><code>test
</code></pre></div>
''',
True
)
def test_bad_lang(self):
"""Test bad language."""
self.check_markdown(
r'''
```bad
test
```
''',
'''
<div class="highlight"><pre><span></span><code>test
</code></pre></div>
''',
True
)
def test_bad_option(self):
"""Test bad options."""
self.check_markdown(
r'''
```pycon bad="My title"
>>> import test
```
''',
r'''
<div class="highlight"><pre><span></span><code><span class="gp">>>> </span><span class="kn">import</span><span class="w"> </span><span class="nn">test</span>
</code></pre></div>
''', # noqa: E501
True
)
def test_bad_attribute(self):
"""Test bad attribute."""
self.check_markdown(
r'''
```pycon {.class <nonsense>!!!}
>>> import test
```
''',
r'''
<div class="class highlight"><pre><span></span><code><span class="gp">>>> </span><span class="kn">import</span><span class="w"> </span><span class="nn">test</span>
</code></pre></div>
''', # noqa: E501
True
)
def test_bad_attribute_full(self):
"""Test bad attribute full."""
self.check_markdown(
r'''
```{.pycon .class <nonsense>!!!}
>>> import test
```
''',
r'''
<div class="class highlight"><pre><span></span><code><span class="gp">>>> </span><span class="kn">import</span><span class="w"> </span><span class="nn">test</span>
</code></pre></div>
''', # noqa: E501
True
)
def test_bad_header(self):
"""Test bad header."""
self.check_markdown(
r'''
```pycon [nonsense]
>>> import test
```
''',
r'''
<div class="highlight"><pre><span></span><code><span class="gp">>>> </span><span class="kn">import</span><span class="w"> </span><span class="nn">test</span>
</code></pre></div>
''', # noqa: E501
True
)
def test_nonsense(self):
"""Test only nonsense."""
self.check_markdown(
r'''
```[nonsense]
>>> import test
```
''',
r'''
<div class="highlight"><pre><span></span><code>>>> import test
</code></pre></div>
''',
True
)
| TestHighlightRelaxedHeaders |
python | getsentry__sentry | src/sentry/sentry_metrics/querying/data/mapping/base.py | {
"start": 116,
"end": 660
} | class ____(abc.ABC):
from_key: str = ""
to_key: str = ""
applied_on_groupby: bool = False
def __init__(self):
# This exists to satisfy mypy, which complains otherwise
self.map: dict[Any, Any] = {}
def __hash__(self):
return hash((self.from_key, self.to_key))
@abc.abstractmethod
def forward(self, projects: Sequence[Project], value: Any) -> Any:
return value
@abc.abstractmethod
def backward(self, projects: Sequence[Project], value: Any) -> Any:
return value
| Mapper |
python | django__django | tests/delete_regress/models.py | {
"start": 3793,
"end": 4054
} | class ____(models.Model):
best_toy = models.ForeignKey(
Toy, default=get_best_toy, on_delete=models.SET_DEFAULT, related_name="toys"
)
worst_toy = models.ForeignKey(
Toy, models.SET(get_worst_toy), related_name="bad_toys"
)
| Collector |
python | sympy__sympy | sympy/functions/combinatorial/numbers.py | {
"start": 74821,
"end": 93635
} | class ____(tuple):
__slots__ = ()
_N = -1
_ITEMS = -2
_M = slice(None, _ITEMS)
def _multiset_histogram(n):
"""Return tuple used in permutation and combination counting. Input
is a dictionary giving items with counts as values or a sequence of
items (which need not be sorted).
The data is stored in a class deriving from tuple so it is easily
recognized and so it can be converted easily to a list.
"""
if isinstance(n, dict): # item: count
if not all(isinstance(v, int) and v >= 0 for v in n.values()):
raise ValueError
tot = sum(n.values())
items = sum(1 for k in n if n[k] > 0)
return _MultisetHistogram([n[k] for k in n if n[k] > 0] + [items, tot])
else:
n = list(n)
s = set(n)
lens = len(s)
lenn = len(n)
if lens == lenn:
n = [1]*lenn + [lenn, lenn]
return _MultisetHistogram(n)
m = dict(zip(s, range(lens)))
d = dict(zip(range(lens), (0,)*lens))
for i in n:
d[m[i]] += 1
return _multiset_histogram(d)
def nP(n, k=None, replacement=False):
"""Return the number of permutations of ``n`` items taken ``k`` at a time.
Possible values for ``n``:
integer - set of length ``n``
sequence - converted to a multiset internally
multiset - {element: multiplicity}
If ``k`` is None then the total of all permutations of length 0
through the number of items represented by ``n`` will be returned.
If ``replacement`` is True then a given item can appear more than once
in the ``k`` items. (For example, for 'ab' permutations of 2 would
include 'aa', 'ab', 'ba' and 'bb'.) The multiplicity of elements in
``n`` is ignored when ``replacement`` is True but the total number
of elements is considered since no element can appear more times than
the number of elements in ``n``.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nP
>>> from sympy.utilities.iterables import multiset_permutations, multiset
>>> nP(3, 2)
6
>>> nP('abc', 2) == nP(multiset('abc'), 2) == 6
True
>>> nP('aab', 2)
3
>>> nP([1, 2, 2], 2)
3
>>> [nP(3, i) for i in range(4)]
[1, 3, 6, 6]
>>> nP(3) == sum(_)
True
When ``replacement`` is True, each item can have multiplicity
equal to the length represented by ``n``:
>>> nP('aabc', replacement=True)
121
>>> [len(list(multiset_permutations('aaaabbbbcccc', i))) for i in range(5)]
[1, 3, 9, 27, 81]
>>> sum(_)
121
See Also
========
sympy.utilities.iterables.multiset_permutations
References
==========
.. [1] https://en.wikipedia.org/wiki/Permutation
"""
try:
n = as_int(n)
except ValueError:
return Integer(_nP(_multiset_histogram(n), k, replacement))
return Integer(_nP(n, k, replacement))
@cacheit
def _nP(n, k=None, replacement=False):
if k == 0:
return 1
if isinstance(n, SYMPY_INTS): # n different items
# assert n >= 0
if k is None:
return sum(_nP(n, i, replacement) for i in range(n + 1))
elif replacement:
return n**k
elif k > n:
return 0
elif k == n:
return factorial(k)
elif k == 1:
return n
else:
# assert k >= 0
return _product(n - k + 1, n)
elif isinstance(n, _MultisetHistogram):
if k is None:
return sum(_nP(n, i, replacement) for i in range(n[_N] + 1))
elif replacement:
return n[_ITEMS]**k
elif k == n[_N]:
return factorial(k)/prod([factorial(i) for i in n[_M] if i > 1])
elif k > n[_N]:
return 0
elif k == 1:
return n[_ITEMS]
else:
# assert k >= 0
tot = 0
n = list(n)
for i in range(len(n[_M])):
if not n[i]:
continue
n[_N] -= 1
if n[i] == 1:
n[i] = 0
n[_ITEMS] -= 1
tot += _nP(_MultisetHistogram(n), k - 1)
n[_ITEMS] += 1
n[i] = 1
else:
n[i] -= 1
tot += _nP(_MultisetHistogram(n), k - 1)
n[i] += 1
n[_N] += 1
return tot
@cacheit
def _AOP_product(n):
"""for n = (m1, m2, .., mk) return the coefficients of the polynomial,
prod(sum(x**i for i in range(nj + 1)) for nj in n); i.e. the coefficients
of the product of AOPs (all-one polynomials) or order given in n. The
resulting coefficient corresponding to x**r is the number of r-length
combinations of sum(n) elements with multiplicities given in n.
The coefficients are given as a default dictionary (so if a query is made
for a key that is not present, 0 will be returned).
Examples
========
>>> from sympy.functions.combinatorial.numbers import _AOP_product
>>> from sympy.abc import x
>>> n = (2, 2, 3) # e.g. aabbccc
>>> prod = ((x**2 + x + 1)*(x**2 + x + 1)*(x**3 + x**2 + x + 1)).expand()
>>> c = _AOP_product(n); dict(c)
{0: 1, 1: 3, 2: 6, 3: 8, 4: 8, 5: 6, 6: 3, 7: 1}
>>> [c[i] for i in range(8)] == [prod.coeff(x, i) for i in range(8)]
True
The generating poly used here is the same as that listed in
https://tinyurl.com/cep849r, but in a refactored form.
"""
n = list(n)
ord = sum(n)
need = (ord + 2)//2
rv = [1]*(n.pop() + 1)
rv.extend((0,) * (need - len(rv)))
rv = rv[:need]
while n:
ni = n.pop()
N = ni + 1
was = rv[:]
for i in range(1, min(N, len(rv))):
rv[i] += rv[i - 1]
for i in range(N, need):
rv[i] += rv[i - 1] - was[i - N]
rev = list(reversed(rv))
if ord % 2:
rv = rv + rev
else:
rv[-1:] = rev
d = defaultdict(int)
for i, r in enumerate(rv):
d[i] = r
return d
def nC(n, k=None, replacement=False):
"""Return the number of combinations of ``n`` items taken ``k`` at a time.
Possible values for ``n``:
integer - set of length ``n``
sequence - converted to a multiset internally
multiset - {element: multiplicity}
If ``k`` is None then the total of all combinations of length 0
through the number of items represented in ``n`` will be returned.
If ``replacement`` is True then a given item can appear more than once
in the ``k`` items. (For example, for 'ab' sets of 2 would include 'aa',
'ab', and 'bb'.) The multiplicity of elements in ``n`` is ignored when
``replacement`` is True but the total number of elements is considered
since no element can appear more times than the number of elements in
``n``.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nC
>>> from sympy.utilities.iterables import multiset_combinations
>>> nC(3, 2)
3
>>> nC('abc', 2)
3
>>> nC('aab', 2)
2
When ``replacement`` is True, each item can have multiplicity
equal to the length represented by ``n``:
>>> nC('aabc', replacement=True)
35
>>> [len(list(multiset_combinations('aaaabbbbcccc', i))) for i in range(5)]
[1, 3, 6, 10, 15]
>>> sum(_)
35
If there are ``k`` items with multiplicities ``m_1, m_2, ..., m_k``
then the total of all combinations of length 0 through ``k`` is the
product, ``(m_1 + 1)*(m_2 + 1)*...*(m_k + 1)``. When the multiplicity
of each item is 1 (i.e., k unique items) then there are 2**k
combinations. For example, if there are 4 unique items, the total number
of combinations is 16:
>>> sum(nC(4, i) for i in range(5))
16
See Also
========
sympy.utilities.iterables.multiset_combinations
References
==========
.. [1] https://en.wikipedia.org/wiki/Combination
.. [2] https://tinyurl.com/cep849r
"""
if isinstance(n, SYMPY_INTS):
if k is None:
if not replacement:
return 2**n
return sum(nC(n, i, replacement) for i in range(n + 1))
if k < 0:
raise ValueError("k cannot be negative")
if replacement:
return binomial(n + k - 1, k)
return binomial(n, k)
if isinstance(n, _MultisetHistogram):
N = n[_N]
if k is None:
if not replacement:
return prod(m + 1 for m in n[_M])
return sum(nC(n, i, replacement) for i in range(N + 1))
elif replacement:
return nC(n[_ITEMS], k, replacement)
# assert k >= 0
elif k in (1, N - 1):
return n[_ITEMS]
elif k in (0, N):
return 1
return _AOP_product(tuple(n[_M]))[k]
else:
return nC(_multiset_histogram(n), k, replacement)
def _eval_stirling1(n, k):
if n == k == 0:
return S.One
if 0 in (n, k):
return S.Zero
# some special values
if n == k:
return S.One
elif k == n - 1:
return binomial(n, 2)
elif k == n - 2:
return (3*n - 1)*binomial(n, 3)/4
elif k == n - 3:
return binomial(n, 2)*binomial(n, 4)
return _stirling1(n, k)
@cacheit
def _stirling1(n, k):
row = [0, 1]+[0]*(k-1) # for n = 1
for i in range(2, n+1):
for j in range(min(k,i), 0, -1):
row[j] = (i-1) * row[j] + row[j-1]
return Integer(row[k])
def _eval_stirling2(n, k):
if n == k == 0:
return S.One
if 0 in (n, k):
return S.Zero
# some special values
if n == k:
return S.One
elif k == n - 1:
return binomial(n, 2)
elif k == 1:
return S.One
elif k == 2:
return Integer(2**(n - 1) - 1)
return _stirling2(n, k)
@cacheit
def _stirling2(n, k):
row = [0, 1]+[0]*(k-1) # for n = 1
for i in range(2, n+1):
for j in range(min(k,i), 0, -1):
row[j] = j * row[j] + row[j-1]
return Integer(row[k])
def stirling(n, k, d=None, kind=2, signed=False):
r"""Return Stirling number $S(n, k)$ of the first or second (default) kind.
The sum of all Stirling numbers of the second kind for $k = 1$
through $n$ is ``bell(n)``. The recurrence relationship for these numbers
is:
.. math :: {0 \brace 0} = 1; {n \brace 0} = {0 \brace k} = 0;
.. math :: {{n+1} \brace k} = j {n \brace k} + {n \brace {k-1}}
where $j$ is:
$n$ for Stirling numbers of the first kind,
$-n$ for signed Stirling numbers of the first kind,
$k$ for Stirling numbers of the second kind.
The first kind of Stirling number counts the number of permutations of
``n`` distinct items that have ``k`` cycles; the second kind counts the
ways in which ``n`` distinct items can be partitioned into ``k`` parts.
If ``d`` is given, the "reduced Stirling number of the second kind" is
returned: $S^{d}(n, k) = S(n - d + 1, k - d + 1)$ with $n \ge k \ge d$.
(This counts the ways to partition $n$ consecutive integers into $k$
groups with no pairwise difference less than $d$. See example below.)
To obtain the signed Stirling numbers of the first kind, use keyword
``signed=True``. Using this keyword automatically sets ``kind`` to 1.
Examples
========
>>> from sympy.functions.combinatorial.numbers import stirling, bell
>>> from sympy.combinatorics import Permutation
>>> from sympy.utilities.iterables import multiset_partitions, permutations
First kind (unsigned by default):
>>> [stirling(6, i, kind=1) for i in range(7)]
[0, 120, 274, 225, 85, 15, 1]
>>> perms = list(permutations(range(4)))
>>> [sum(Permutation(p).cycles == i for p in perms) for i in range(5)]
[0, 6, 11, 6, 1]
>>> [stirling(4, i, kind=1) for i in range(5)]
[0, 6, 11, 6, 1]
First kind (signed):
>>> [stirling(4, i, signed=True) for i in range(5)]
[0, -6, 11, -6, 1]
Second kind:
>>> [stirling(10, i) for i in range(12)]
[0, 1, 511, 9330, 34105, 42525, 22827, 5880, 750, 45, 1, 0]
>>> sum(_) == bell(10)
True
>>> len(list(multiset_partitions(range(4), 2))) == stirling(4, 2)
True
Reduced second kind:
>>> from sympy import subsets, oo
>>> def delta(p):
... if len(p) == 1:
... return oo
... return min(abs(i[0] - i[1]) for i in subsets(p, 2))
>>> parts = multiset_partitions(range(5), 3)
>>> d = 2
>>> sum(1 for p in parts if all(delta(i) >= d for i in p))
7
>>> stirling(5, 3, 2)
7
See Also
========
sympy.utilities.iterables.multiset_partitions
References
==========
.. [1] https://en.wikipedia.org/wiki/Stirling_numbers_of_the_first_kind
.. [2] https://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind
"""
# TODO: make this a class like bell()
n = as_int(n)
k = as_int(k)
if n < 0:
raise ValueError('n must be nonnegative')
if k > n:
return S.Zero
if d:
# assert k >= d
# kind is ignored -- only kind=2 is supported
return _eval_stirling2(n - d + 1, k - d + 1)
elif signed:
# kind is ignored -- only kind=1 is supported
return S.NegativeOne**(n - k)*_eval_stirling1(n, k)
if kind == 1:
return _eval_stirling1(n, k)
elif kind == 2:
return _eval_stirling2(n, k)
else:
raise ValueError('kind must be 1 or 2, not %s' % k)
@cacheit
def _nT(n, k):
"""Return the partitions of ``n`` items into ``k`` parts. This
is used by ``nT`` for the case when ``n`` is an integer."""
# really quick exits
if k > n or k < 0:
return 0
if k in (1, n):
return 1
if k == 0:
return 0
# exits that could be done below but this is quicker
if k == 2:
return n//2
d = n - k
if d <= 3:
return d
# quick exit
if 3*k >= n: # or, equivalently, 2*k >= d
# all the information needed in this case
# will be in the cache needed to calculate
# partition(d), so...
# update cache
tot = _partition_rec(d)
# and correct for values not needed
if d - k > 0:
tot -= sum(_partition_rec.fetch_item(slice(d - k)))
return tot
# regular exit
# nT(n, k) = Sum(nT(n - k, m), (m, 1, k));
# calculate needed nT(i, j) values
p = [1]*d
for i in range(2, k + 1):
for m in range(i + 1, d):
p[m] += p[m - i]
d -= 1
# if p[0] were appended to the end of p then the last
# k values of p are the nT(n, j) values for 0 < j < k in reverse
# order p[-1] = nT(n, 1), p[-2] = nT(n, 2), etc.... Instead of
# putting the 1 from p[0] there, however, it is simply added to
# the sum below which is valid for 1 < k <= n//2
return (1 + sum(p[1 - k:]))
def nT(n, k=None):
"""Return the number of ``k``-sized partitions of ``n`` items.
Possible values for ``n``:
integer - ``n`` identical items
sequence - converted to a multiset internally
multiset - {element: multiplicity}
Note: the convention for ``nT`` is different than that of ``nC`` and
``nP`` in that
here an integer indicates ``n`` *identical* items instead of a set of
length ``n``; this is in keeping with the ``partitions`` function which
treats its integer-``n`` input like a list of ``n`` 1s. One can use
``range(n)`` for ``n`` to indicate ``n`` distinct items.
If ``k`` is None then the total number of ways to partition the elements
represented in ``n`` will be returned.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nT
Partitions of the given multiset:
>>> [nT('aabbc', i) for i in range(1, 7)]
[1, 8, 11, 5, 1, 0]
>>> nT('aabbc') == sum(_)
True
>>> [nT("mississippi", i) for i in range(1, 12)]
[1, 74, 609, 1521, 1768, 1224, 579, 197, 50, 9, 1]
Partitions when all items are identical:
>>> [nT(5, i) for i in range(1, 6)]
[1, 2, 2, 1, 1]
>>> nT('1'*5) == sum(_)
True
When all items are different:
>>> [nT(range(5), i) for i in range(1, 6)]
[1, 15, 25, 10, 1]
>>> nT(range(5)) == sum(_)
True
Partitions of an integer expressed as a sum of positive integers:
>>> from sympy import partition
>>> partition(4)
5
>>> nT(4, 1) + nT(4, 2) + nT(4, 3) + nT(4, 4)
5
>>> nT('1'*4)
5
See Also
========
sympy.utilities.iterables.partitions
sympy.utilities.iterables.multiset_partitions
sympy.functions.combinatorial.numbers.partition
References
==========
.. [1] https://web.archive.org/web/20210507012732/https://teaching.csse.uwa.edu.au/units/CITS7209/partition.pdf
"""
if isinstance(n, SYMPY_INTS):
# n identical items
if k is None:
return partition(n)
if isinstance(k, SYMPY_INTS):
n = as_int(n)
k = as_int(k)
return Integer(_nT(n, k))
if not isinstance(n, _MultisetHistogram):
try:
# if n contains hashable items there is some
# quick handling that can be done
u = len(set(n))
if u <= 1:
return nT(len(n), k)
elif u == len(n):
n = range(u)
raise TypeError
except TypeError:
n = _multiset_histogram(n)
N = n[_N]
if k is None and N == 1:
return 1
if k in (1, N):
return 1
if k == 2 or N == 2 and k is None:
m, r = divmod(N, 2)
rv = sum(nC(n, i) for i in range(1, m + 1))
if not r:
rv -= nC(n, m)//2
if k is None:
rv += 1 # for k == 1
return rv
if N == n[_ITEMS]:
# all distinct
if k is None:
return bell(N)
return stirling(N, k)
m = MultisetPartitionTraverser()
if k is None:
return m.count_partitions(n[_M])
# MultisetPartitionTraverser does not have a range-limited count
# method, so need to enumerate and count
tot = 0
for discard in m.enum_range(n[_M], k-1, k):
tot += 1
return tot
#-----------------------------------------------------------------------------#
# #
# Motzkin numbers #
# #
#-----------------------------------------------------------------------------#
| _MultisetHistogram |
python | Lightning-AI__lightning | tests/tests_pytorch/strategies/test_single_device.py | {
"start": 3073,
"end": 4681
} | class ____(BoringModel):
def train_dataloader(self):
raise NotImplementedError
def val_dataloader(self):
raise NotImplementedError
def test_dataloader(self):
raise NotImplementedError
def predict_dataloader(self):
raise NotImplementedError
_loader = DataLoader(RandomDataset(32, 64))
_loader_no_len = CustomNotImplementedErrorDataloader(_loader)
@pytest.mark.parametrize(
("keyword", "value"),
[
("train_dataloaders", _loader_no_len),
("val_dataloaders", _loader_no_len),
("test_dataloaders", _loader_no_len),
("predict_dataloaders", _loader_no_len),
("val_dataloaders", [_loader, _loader_no_len]),
],
)
def test_process_dataloader_gets_called_as_expected(keyword, value, monkeypatch):
trainer = Trainer()
model = BoringModelNoDataloaders()
strategy = SingleDeviceStrategy(accelerator=Mock())
strategy.connect(model)
trainer._accelerator_connector.strategy = strategy
process_dataloader_mock = MagicMock()
monkeypatch.setattr(strategy, "process_dataloader", process_dataloader_mock)
if "train" in keyword:
fn = trainer.fit_loop.setup_data
elif "val" in keyword:
fn = trainer.validate_loop.setup_data
elif "test" in keyword:
fn = trainer.test_loop.setup_data
else:
fn = trainer.predict_loop.setup_data
trainer._data_connector.attach_dataloaders(model, **{keyword: value})
fn()
expected = len(value) if isinstance(value, list) else 1
assert process_dataloader_mock.call_count == expected
| BoringModelNoDataloaders |
python | django__django | tests/admin_widgets/tests.py | {
"start": 10521,
"end": 10892
} | class ____(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_changelist_ForeignKey(self):
response = self.client.get(reverse("admin:admin_widgets_car_changelist"))
self.assertContains(response, "/auth/user/add/")
@override_settings(ROOT_URLCONF="admin_widgets.urls")
| AdminForeignKeyWidgetChangeList |
python | getsentry__sentry | tests/sentry/sentry_apps/api/serializers/test_sentry_app.py | {
"start": 3783,
"end": 4467
} | class ____(TestCase):
def test_hidden_client_secret(self) -> None:
sentry_app = self.create_sentry_app(
name="Tesla App", organization=self.organization, published=True, scopes=("org:write",)
)
acc = access.from_user(self.user, self.organization)
result = serialize(sentry_app, self.user, SentryAppSerializer(), access=acc)
assert result["clientSecret"] is not None
now = datetime.now()
with freeze_time(now + timedelta(minutes=10)):
result = serialize(sentry_app, self.user, SentryAppSerializer(), access=acc)
assert result["clientSecret"] is None
| SentryAppHiddenClientSecretSerializerTest |
python | spack__spack | lib/spack/spack/llnl/util/argparsewriter.py | {
"start": 1694,
"end": 6926
} | class ____(argparse.HelpFormatter, abc.ABC):
"""Analyze an argparse ArgumentParser for easy generation of help."""
def __init__(self, prog: str, out: IO = sys.stdout, aliases: bool = False) -> None:
"""Initialize a new ArgparseWriter instance.
Args:
prog: Program name.
out: File object to write to.
aliases: Whether or not to include subparsers for aliases.
"""
super().__init__(prog)
self.level = 0
self.prog = prog
self.out = out
self.aliases = aliases
def parse(self, parser: ArgumentParser, prog: str) -> Command:
"""Parse the parser object and return the relevant components.
Args:
parser: Command parser.
prog: Program name.
Returns:
Information about the command from the parser.
"""
self.parser = parser
split_prog = parser.prog.split(" ")
split_prog[-1] = prog
prog = " ".join(split_prog)
description = parser.description
fmt = parser._get_formatter()
actions = parser._actions
groups = parser._mutually_exclusive_groups
usage = fmt._format_usage(None, actions, groups, "").strip()
# Go through actions and split them into optionals, positionals, and subcommands
optionals = []
positionals = []
subcommands = []
for action in actions:
if action.option_strings:
flags = action.option_strings
dest_flags = fmt._format_action_invocation(action)
nargs = action.nargs
help = (
self._expand_help(action)
if action.help and action.help != argparse.SUPPRESS
else ""
)
help = help.split("\n")[0]
if action.choices is not None:
dest = [str(choice) for choice in action.choices]
else:
dest = [action.dest]
optionals.append((flags, dest, dest_flags, nargs, help))
elif isinstance(action, argparse._SubParsersAction):
for subaction in action._choices_actions:
subparser = action._name_parser_map[subaction.dest]
help = (
self._expand_help(subaction)
if subaction.help and action.help != argparse.SUPPRESS
else ""
)
help = help.split("\n")[0]
subcommands.append((subparser, subaction.dest, help))
# Look for aliases of the form 'name (alias, ...)'
if self.aliases and isinstance(subaction.metavar, str):
match = re.match(r"(.*) \((.*)\)", subaction.metavar)
if match:
aliases = match.group(2).split(", ")
for alias in aliases:
subparser = action._name_parser_map[alias]
help = (
self._expand_help(subaction)
if subaction.help and action.help != argparse.SUPPRESS
else ""
)
help = help.split("\n")[0]
subcommands.append((subparser, alias, help))
else:
args = fmt._format_action_invocation(action)
help = (
self._expand_help(action)
if action.help and action.help != argparse.SUPPRESS
else ""
)
help = help.split("\n")[0]
positionals.append((args, action.choices, action.nargs, help))
return Command(prog, description, usage, positionals, optionals, subcommands)
@abc.abstractmethod
def format(self, cmd: Command) -> str:
"""Return the string representation of a single node in the parser tree.
Override this in subclasses to define how each subcommand should be displayed.
Args:
cmd: Parsed information about a command or subcommand.
Returns:
String representation of this subcommand.
"""
def _write(self, parser: ArgumentParser, prog: str, level: int = 0) -> None:
"""Recursively write a parser.
Args:
parser: Command parser.
prog: Program name.
level: Current level.
"""
self.level = level
cmd = self.parse(parser, prog)
self.out.write(self.format(cmd))
for subparser, prog, help in cmd.subcommands:
self._write(subparser, prog, level=level + 1)
def write(self, parser: ArgumentParser) -> None:
"""Write out details about an ArgumentParser.
Args:
parser: Command parser.
"""
try:
self._write(parser, self.prog)
except BrokenPipeError:
# Swallow pipe errors
pass
_rst_levels = ["=", "-", "^", "~", ":", "`"]
| ArgparseWriter |
python | wandb__wandb | wandb/filesync/stats.py | {
"start": 225,
"end": 322
} | class ____(NamedTuple):
uploaded_bytes: int
total_bytes: int
deduped_bytes: int
| Summary |
python | getsentry__sentry | src/sentry/snuba/models.py | {
"start": 1210,
"end": 1748
} | class ____(Enum):
UNKNOWN = 0
NONE = 1
CLIENT_AND_SERVER_WEIGHTED = 2
SERVER_WEIGHTED = 3
@classmethod
def as_choices(cls):
return tuple((mode.value, mode.name.lower()) for mode in cls)
@classmethod
def as_text_choices(cls):
return tuple((mode.name.lower(), mode.value) for mode in cls)
@classmethod
def from_str(cls, name: str):
for mode in cls:
if mode.name.lower() == name:
return mode
return None
@region_silo_model
| ExtrapolationMode |
python | pennersr__django-allauth | allauth/headless/mfa/inputs.py | {
"start": 785,
"end": 857
} | class ____(SignupWebAuthnForm, inputs.Input):
pass
| CreateWebAuthnInput |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 2034,
"end": 2309
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = (
"COMPLETED",
"IN_PROGRESS",
"PENDING",
"QUEUED",
"REQUESTED",
"WAITING",
)
| CheckStatusState |
python | crytic__slither | slither/__main__.py | {
"start": 23127,
"end": 23561
} | class ____(argparse.Action): # pylint: disable=too-few-public-methods
def __call__(
self,
parser: Any,
args: Any,
values: Optional[Union[str, Sequence[Any]]],
option_string: Any = None,
) -> None:
detectors, printers = get_detectors_and_printers()
assert isinstance(values, str)
output_to_markdown(detectors, printers, values)
parser.exit()
| OutputMarkdown |
python | falconry__falcon | falcon/errors.py | {
"start": 45520,
"end": 48792
} | class ____(HTTPError):
"""416 Range Not Satisfiable.
None of the ranges in the request's Range header field overlap the
current extent of the selected resource or that the set of ranges
requested has been rejected due to invalid ranges or an excessive
request of small or overlapping ranges.
For byte ranges, failing to overlap the current extent means that
the first-byte-pos of all of the byte-range-spec values were greater
than the current length of the selected representation. When this
status code is generated in response to a byte-range request, the
sender SHOULD generate a Content-Range header field specifying the
current length of the selected representation.
(See also: RFC 7233, Section 4.4)
`resource_length` is the only positional argument allowed,
the other arguments are defined as keyword-only.
Args:
resource_length: The maximum value for the last-byte-pos of a range
request. Used to set the Content-Range header.
Note:
The existing value of the Content-Range in headers will be
overridden by this value
Keyword Args:
title (str): Error title (default '416 Range Not Satisfiable').
description (str): Human-friendly description of the error, along with
a helpful suggestion or two.
headers (dict or list): A ``dict`` of header names and values
to set, or a ``list`` of (*name*, *value*) tuples. Both *name* and
*value* must be of type ``str`` or ``StringType``, and only
character values 0x00 through 0xFF may be used on platforms that
use wide characters.
Note:
The Content-Type header, if present, will be overridden. If
you wish to return custom error messages, you can create
your own HTTP error class, and install an error handler
to convert it into an appropriate HTTP response for the
client
Note:
Falcon can process a list of ``tuple`` slightly faster
than a ``dict``.
href (str): A URL someone can visit to find out more information
(default ``None``). Unicode characters are percent-encoded.
href_text (str): If href is given, use this as the friendly
title/description for the link (default 'API documentation
for this error').
code (int): An internal code that customers can reference in their
support request or to help them when searching for knowledge
base articles related to this error (default ``None``).
"""
def __init__(
self,
resource_length: int,
*,
title: str | None = None,
description: str | None = None,
headers: HeaderArg | None = None,
**kwargs: HTTPErrorKeywordArguments,
):
headers = _load_headers(headers)
headers['Content-Range'] = 'bytes */' + str(resource_length)
super().__init__(
status.HTTP_416,
title=title,
description=description,
headers=headers,
**kwargs, # type: ignore[arg-type]
)
| HTTPRangeNotSatisfiable |
python | django__django | tests/migrations/test_migrations_squashed_partially_applied/0004_remove_mymodel1_field_1_mymodel1_field_3_and_more.py | {
"start": 43,
"end": 595
} | class ____(migrations.Migration):
dependencies = [("migrations", "0003_alter_mymodel2_unique_together")]
operations = [
migrations.RemoveField(
model_name="mymodel1",
name="field_1",
),
migrations.AddField(
model_name="mymodel1",
name="field_3",
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name="mymodel1",
name="field_4",
field=models.IntegerField(null=True),
),
]
| Migration |
python | huggingface__transformers | src/transformers/models/xlm_roberta/modeling_xlm_roberta.py | {
"start": 3406,
"end": 6543
} | class ____(nn.Module):
def __init__(self, config, is_causal=False, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.scaling = self.attention_head_size**-0.5
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_decoder = config.is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
# get all proj
query_layer = self.query(hidden_states).view(*hidden_shape).transpose(1, 2)
key_layer = self.key(hidden_states).view(*hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*hidden_shape).transpose(1, 2)
if past_key_values is not None:
# decoder-only xlm_roberta can have a simple dynamic cache for example
current_past_key_values = past_key_values
if isinstance(past_key_values, EncoderDecoderCache):
current_past_key_values = past_key_values.self_attention_cache
# save all key/value_layer to cache to be re-used for fast auto-regressive generation
key_layer, value_layer = current_past_key_values.update(
key_layer,
value_layer,
self.layer_idx,
{"cache_position": cache_position},
)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_layer,
key_layer,
value_layer,
attention_mask,
dropout=0.0 if not self.training else self.dropout.p,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
return attn_output, attn_weights
| XLMRobertaSelfAttention |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefaultClass3.py | {
"start": 2162,
"end": 2516
} | class ____[T1 = str, **P4 = [int, T1]]: ...
pc1 = ClassPC()
reveal_type(pc1, expected_text="ClassPC[str, (int, str)]")
pc2 = ClassPC[float]()
reveal_type(pc2, expected_text="ClassPC[float, (int, float)]")
pc3 = ClassPC[float, ...]()
reveal_type(pc3, expected_text="ClassPC[float, ...]")
# This should generate an error because P4 depends on T1.
| ClassPC |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/compiler.py | {
"start": 10642,
"end": 11070
} | class ____(Protocol):
def __call__(
self,
keyname: str,
name: str,
objects: Sequence[Any],
type_: TypeEngine[Any],
) -> None: ...
# integer indexes into ResultColumnsEntry used by cursor.py.
# some profiling showed integer access faster than named tuple
RM_RENDERED_NAME: Literal[0] = 0
RM_NAME: Literal[1] = 1
RM_OBJECTS: Literal[2] = 2
RM_TYPE: Literal[3] = 3
| _ResultMapAppender |
python | langchain-ai__langchain | libs/core/langchain_core/prompts/image.py | {
"start": 403,
"end": 4780
} | class ____(BasePromptTemplate[ImageURL]):
"""Image prompt template for a multimodal model."""
template: dict = Field(default_factory=dict)
"""Template for the prompt."""
template_format: PromptTemplateFormat = "f-string"
"""The format of the prompt template.
Options are: 'f-string', 'mustache', 'jinja2'."""
def __init__(self, **kwargs: Any) -> None:
"""Create an image prompt template.
Raises:
ValueError: If the input variables contain `'url'`, `'path'`, or
`'detail'`.
"""
if "input_variables" not in kwargs:
kwargs["input_variables"] = []
overlap = set(kwargs["input_variables"]) & {"url", "path", "detail"}
if overlap:
msg = (
"input_variables for the image template cannot contain"
" any of 'url', 'path', or 'detail'."
f" Found: {overlap}"
)
raise ValueError(msg)
super().__init__(**kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "image-prompt"
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "prompts", "image"]`
"""
return ["langchain", "prompts", "image"]
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Format the prompt with the inputs.
Args:
**kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return ImagePromptValue(image_url=self.format(**kwargs))
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
"""Async format the prompt with the inputs.
Args:
**kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return ImagePromptValue(image_url=await self.aformat(**kwargs))
def format(
self,
**kwargs: Any,
) -> ImageURL:
"""Format the prompt with the inputs.
Args:
**kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Raises:
ValueError: If the url is not provided.
ValueError: If the url is not a string.
ValueError: If `'path'` is provided in the template or kwargs.
Example:
```python
prompt.format(variable1="foo")
```
"""
formatted = {}
for k, v in self.template.items():
if isinstance(v, str):
formatted[k] = DEFAULT_FORMATTER_MAPPING[self.template_format](
v, **kwargs
)
else:
formatted[k] = v
url = kwargs.get("url") or formatted.get("url")
if kwargs.get("path") or formatted.get("path"):
msg = (
"Loading images from 'path' has been removed as of 0.3.15 for security "
"reasons. Please specify images by 'url'."
)
raise ValueError(msg)
detail = kwargs.get("detail") or formatted.get("detail")
if not url:
msg = "Must provide url."
raise ValueError(msg)
if not isinstance(url, str):
msg = "url must be a string."
raise ValueError(msg) # noqa: TRY004
output: ImageURL = {"url": url}
if detail:
# Don't check literal values here: let the API check them
output["detail"] = detail
return output
async def aformat(self, **kwargs: Any) -> ImageURL:
"""Async format the prompt with the inputs.
Args:
**kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return await run_in_executor(None, self.format, **kwargs)
def pretty_repr(
self,
html: bool = False, # noqa: FBT001,FBT002
) -> str:
"""Return a pretty representation of the prompt.
Args:
html: Whether to return an html formatted string.
Returns:
A pretty representation of the prompt.
"""
raise NotImplementedError
| ImagePromptTemplate |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeAlias18.py | {
"start": 512,
"end": 614
} | class ____(A_Alias_1[T2]): ...
# This should generate an error because the variance is incompatible.
| A_1 |
python | ray-project__ray | python/ray/serve/tests/test_record_routing_stats.py | {
"start": 516,
"end": 5406
} | class ____:
def __init__(self):
self.routing_stats: Dict[str, Any] = {}
self.should_hang: Optional[asyncio.Event] = None
self.should_fail: bool = False
context = _get_internal_replica_context()
self.replica_id: ReplicaID = context.replica_id
async def record_routing_stats(self):
if self.should_hang:
await self.should_hang.wait()
if self.should_fail:
raise Exception("intended to fail")
return self.routing_stats
def __call__(self, *args) -> ReplicaID:
return self.replica_id
def set_routing_stats(self, routing_stats: Dict[str, Any]) -> ReplicaID:
self.routing_stats = routing_stats
return self.replica_id
def set_should_fail(self):
self.should_fail = True
def set_should_hang(self):
self.should_hang = asyncio.Event()
def check_routing_stats_recorded(
handle: DeploymentHandle,
expected_stats: Dict[str, Any],
replica_id: Optional[ReplicaID] = None,
) -> bool:
running_replicas = handle._router._asyncio_router.request_router._replicas
if replica_id:
target_running_replica = running_replicas[replica_id]
else:
target_running_replica = next(iter(running_replicas.values()))
assert (
target_running_replica.routing_stats == expected_stats
), f"{target_running_replica.routing_stats=} != {expected_stats=}"
return True
@pytest.mark.parametrize("use_class", [True, False])
def test_no_user_defined_method(serve_instance, use_class):
"""Check the default behavior."""
if use_class:
@serve.deployment
class A:
def __call__(self, *args):
return ray.get_runtime_context().current_actor
else:
@serve.deployment
def A(*args):
return ray.get_runtime_context().current_actor
h = serve.run(A.bind())
_ = h.remote().result()
replicas = list(h._router._asyncio_router.request_router._replicas.values())
assert len(replicas) == 1
assert replicas[0].routing_stats == {}
@pytest.mark.asyncio
async def test_user_defined_method_fails(serve_instance):
"""Check the behavior when a user-defined method fails."""
expected_stats = {"foo": "bar"}
h = serve.run(Patient.bind())
await h.set_routing_stats.remote(expected_stats)
replica_id = await h.remote()
# Ensure the routing stats are recorded correctly before the failure
wait_for_condition(
check_routing_stats_recorded,
handle=h,
expected_stats=expected_stats,
replica_id=replica_id,
)
await h.set_should_fail.remote()
await asyncio.gather(*[h.remote() for _ in range(100)])
# After the failure, the previous routing stats should still accessible
wait_for_condition(
check_routing_stats_recorded,
handle=h,
expected_stats=expected_stats,
replica_id=replica_id,
)
@pytest.mark.asyncio
async def test_user_defined_method_hangs(serve_instance):
"""Check the behavior when a user-defined method hangs."""
expected_stats = {"foo": "bar"}
h = serve.run(Patient.bind())
await h.set_routing_stats.remote(expected_stats)
replica_id = await h.remote()
# Ensure the routing stats are recorded correctly before the failure
wait_for_condition(
check_routing_stats_recorded,
handle=h,
expected_stats=expected_stats,
replica_id=replica_id,
)
await h.set_should_hang.remote()
await asyncio.gather(*[h.remote() for _ in range(100)])
# After the hang, the previous routing stats should still accessible
wait_for_condition(
check_routing_stats_recorded,
handle=h,
expected_stats=expected_stats,
replica_id=replica_id,
)
@pytest.mark.asyncio
async def test_multiple_replicas(serve_instance):
"""Check the behavior with multiple replicas."""
h = serve.run(Patient.options(num_replicas=2).bind())
replica_ids = set(await asyncio.gather(*[h.remote() for _ in range(100)]))
assert len(replica_ids) == 2
# Ensure that the routing stats is set for one of the replicas.
expected_stats = {"foo": "bar"}
updated_stats_replica_id = await h.set_routing_stats.remote(expected_stats)
wait_for_condition(
check_routing_stats_recorded,
handle=h,
expected_stats=expected_stats,
replica_id=updated_stats_replica_id,
)
# Ensure that the routing stats is not set for the other replica.
replica_ids.remove(updated_stats_replica_id)
unupdated_stats_replica_id = replica_ids.pop()
wait_for_condition(
check_routing_stats_recorded,
handle=h,
expected_stats={},
replica_id=unupdated_stats_replica_id,
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| Patient |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor6.py | {
"start": 294,
"end": 1002
} | class ____(Generic[_T]):
@overload
def __init__(self: "TextField[str]", *, null: Literal[False] = ...) -> None: ...
@overload
def __init__(
self: "TextField[Optional[str]]",
*,
null: Literal[True] = ...,
) -> None: ...
@overload
def __init__(self, *, null: bool = ...) -> None: ...
def __init__(self, *, null: bool = ...) -> None: ...
def __get__(self: "TextField[_T]", instance: Any, owner: Any) -> _T: ...
def foo(a: bool):
reveal_type(TextField(), expected_text="TextField[str]")
reveal_type(TextField(null=True), expected_text="TextField[str | None]")
reveal_type(TextField(null=a), expected_text="TextField[Unknown]")
| TextField |
python | getsentry__sentry | src/sentry/integrations/bitbucket_server/integration.py | {
"start": 9315,
"end": 13181
} | class ____(RepositoryIntegration):
"""
IntegrationInstallation implementation for Bitbucket Server
"""
codeowners_locations = [".bitbucket/CODEOWNERS"]
@property
def integration_name(self) -> str:
return IntegrationProviderSlug.BITBUCKET_SERVER.value
def get_client(self) -> BitbucketServerClient:
try:
return BitbucketServerClient(
integration=self.model,
identity=self.default_identity,
)
except Identity.DoesNotExist:
raise IntegrationError("Identity not found.")
# IntegrationInstallation methods
def error_message_from_json(self, data):
return data.get("error", {}).get("message", "unknown error")
# RepositoryIntegration methods
def get_repositories(
self, query: str | None = None, page_number_limit: int | None = None
) -> list[dict[str, Any]]:
if not query:
resp = self.get_client().get_repos()
return [
{
"identifier": repo["project"]["key"] + "/" + repo["slug"],
"project": repo["project"]["key"],
"repo": repo["slug"],
"name": repo["project"]["name"] + "/" + repo["name"],
}
for repo in resp.get("values", [])
]
resp = self.get_client().search_repositories(query)
return [
{
"identifier": repo["project"]["key"] + "/" + repo["slug"],
"project": repo["project"]["key"],
"repo": repo["slug"],
"name": repo["project"]["name"] + "/" + repo["name"],
}
for repo in resp.get("values", [])
]
def has_repo_access(self, repo: RpcRepository) -> bool:
"""
We can assume user always has repo access, since the Bitbucket API is limiting the results based on the REPO_ADMIN permission
"""
return True
def get_unmigratable_repositories(self):
repos = repository_service.get_repositories(
organization_id=self.organization_id,
providers=[
IntegrationProviderSlug.BITBUCKET_SERVER.value,
],
)
accessible_repos = [r["identifier"] for r in self.get_repositories()]
return list(filter(lambda repo: repo.name not in accessible_repos, repos))
def source_url_matches(self, url: str) -> bool:
return url.startswith(self.model.metadata["base_url"])
def format_source_url(self, repo: Repository, filepath: str, branch: str | None) -> str:
project = quote(repo.config["project"])
repo_name = quote(repo.config["repo"])
source_url = f"{self.model.metadata["base_url"]}/projects/{project}/repos/{repo_name}/browse/{filepath}"
if branch:
source_url += "?" + urlencode({"at": branch})
return source_url
def extract_branch_from_source_url(self, repo: Repository, url: str) -> str:
parsed_url = urlparse(url)
qs = parse_qs(parsed_url.query)
if "at" in qs and len(qs["at"]) == 1:
branch = qs["at"][0]
# branch name may be prefixed with refs/heads/, so we strip that
refs_prefix = "refs/heads/"
if branch.startswith(refs_prefix):
branch = branch[len(refs_prefix) :]
return branch
return ""
def extract_source_path_from_source_url(self, repo: Repository, url: str) -> str:
if repo.url is None:
return ""
parsed_repo_url = urlparse(repo.url)
parsed_url = urlparse(url)
return parsed_url.path.replace(parsed_repo_url.path + "/", "")
# Bitbucket Server only methods
@property
def username(self):
return self.model.name
| BitbucketServerIntegration |
python | tensorflow__tensorflow | tensorflow/python/framework/auto_control_deps.py | {
"start": 6331,
"end": 7161
} | class ____(enum.Enum):
READ_ONLY = "read-only"
READ_WRITE = "read-write"
def collective_manager_ids_from_op(op):
"""Returns CollectiveManager ID from the op if one exists, else None.
CollectiveManager adds collective and no_op operations tagged with an ID,
unique to the manager object. This function extracts that ID, or None, if the
node was not generated by a CollectiveManager.
Args:
op: `Operation` to get the collective manager ID from.
Returns:
List of CollectiveManager IDs used by the op.
"""
if op.type == "CollectiveReduce":
try:
return [op.get_attr("_collective_manager_id")]
except ValueError:
pass
elif op.type == "StatefulPartitionedCall":
try:
return op.get_attr(utils.COLLECTIVE_MANAGER_IDS)
except ValueError:
pass
return []
| ResourceType |
python | PyCQA__pyflakes | pyflakes/test/test_dict.py | {
"start": 138,
"end": 5271
} | class ____(TestCase):
def test_duplicate_keys(self):
self.flakes(
"{'yes': 1, 'yes': 2}",
m.MultiValueRepeatedKeyLiteral,
m.MultiValueRepeatedKeyLiteral,
)
def test_duplicate_keys_bytes_vs_unicode_py3(self):
self.flakes("{b'a': 1, u'a': 2}")
def test_duplicate_values_bytes_vs_unicode_py3(self):
self.flakes(
"{1: b'a', 1: u'a'}",
m.MultiValueRepeatedKeyLiteral,
m.MultiValueRepeatedKeyLiteral,
)
def test_multiple_duplicate_keys(self):
self.flakes(
"{'yes': 1, 'yes': 2, 'no': 2, 'no': 3}",
m.MultiValueRepeatedKeyLiteral,
m.MultiValueRepeatedKeyLiteral,
m.MultiValueRepeatedKeyLiteral,
m.MultiValueRepeatedKeyLiteral,
)
def test_duplicate_keys_in_function(self):
self.flakes(
'''
def f(thing):
pass
f({'yes': 1, 'yes': 2})
''',
m.MultiValueRepeatedKeyLiteral,
m.MultiValueRepeatedKeyLiteral,
)
def test_duplicate_keys_in_lambda(self):
self.flakes(
"lambda x: {(0,1): 1, (0,1): 2}",
m.MultiValueRepeatedKeyLiteral,
m.MultiValueRepeatedKeyLiteral,
)
def test_duplicate_keys_tuples(self):
self.flakes(
"{(0,1): 1, (0,1): 2}",
m.MultiValueRepeatedKeyLiteral,
m.MultiValueRepeatedKeyLiteral,
)
def test_duplicate_keys_tuples_int_and_float(self):
self.flakes(
"{(0,1): 1, (0,1.0): 2}",
m.MultiValueRepeatedKeyLiteral,
m.MultiValueRepeatedKeyLiteral,
)
def test_duplicate_keys_ints(self):
self.flakes(
"{1: 1, 1: 2}",
m.MultiValueRepeatedKeyLiteral,
m.MultiValueRepeatedKeyLiteral,
)
def test_duplicate_keys_bools(self):
self.flakes(
"{True: 1, True: 2}",
m.MultiValueRepeatedKeyLiteral,
m.MultiValueRepeatedKeyLiteral,
)
def test_duplicate_keys_bools_false(self):
# Needed to ensure 2.x correctly coerces these from variables
self.flakes(
"{False: 1, False: 2}",
m.MultiValueRepeatedKeyLiteral,
m.MultiValueRepeatedKeyLiteral,
)
def test_duplicate_keys_none(self):
self.flakes(
"{None: 1, None: 2}",
m.MultiValueRepeatedKeyLiteral,
m.MultiValueRepeatedKeyLiteral,
)
def test_duplicate_variable_keys(self):
self.flakes(
'''
a = 1
{a: 1, a: 2}
''',
m.MultiValueRepeatedKeyVariable,
m.MultiValueRepeatedKeyVariable,
)
def test_duplicate_variable_values(self):
self.flakes(
'''
a = 1
b = 2
{1: a, 1: b}
''',
m.MultiValueRepeatedKeyLiteral,
m.MultiValueRepeatedKeyLiteral,
)
def test_duplicate_variable_values_same_value(self):
# Current behaviour is not to look up variable values. This is to
# confirm that.
self.flakes(
'''
a = 1
b = 1
{1: a, 1: b}
''',
m.MultiValueRepeatedKeyLiteral,
m.MultiValueRepeatedKeyLiteral,
)
def test_duplicate_key_float_and_int(self):
"""
These do look like different values, but when it comes to their use as
keys, they compare as equal and so are actually duplicates.
The literal dict {1: 1, 1.0: 1} actually becomes {1.0: 1}.
"""
self.flakes(
'''
{1: 1, 1.0: 2}
''',
m.MultiValueRepeatedKeyLiteral,
m.MultiValueRepeatedKeyLiteral,
)
def test_no_duplicate_key_error_same_value(self):
self.flakes('''
{'yes': 1, 'yes': 1}
''')
def test_no_duplicate_key_errors(self):
self.flakes('''
{'yes': 1, 'no': 2}
''')
def test_no_duplicate_keys_tuples_same_first_element(self):
self.flakes("{(0,1): 1, (0,2): 1}")
def test_no_duplicate_key_errors_func_call(self):
self.flakes('''
def test(thing):
pass
test({True: 1, None: 2, False: 1})
''')
def test_no_duplicate_key_errors_bool_or_none(self):
self.flakes("{True: 1, None: 2, False: 1}")
def test_no_duplicate_key_errors_ints(self):
self.flakes('''
{1: 1, 2: 1}
''')
def test_no_duplicate_key_errors_vars(self):
self.flakes('''
test = 'yes'
rest = 'yes'
{test: 1, rest: 2}
''')
def test_no_duplicate_key_errors_tuples(self):
self.flakes('''
{(0,1): 1, (0,2): 1}
''')
def test_no_duplicate_key_errors_instance_attributes(self):
self.flakes('''
class Test():
pass
f = Test()
f.a = 1
{f.a: 1, f.a: 1}
''')
| Test |
python | sqlalchemy__sqlalchemy | test/ext/test_mutable.py | {
"start": 40221,
"end": 40361
} | class ____(MutableCompositesUnpickleTest):
@classmethod
def _type_fixture(cls):
return DCPoint
| MutableDCCompositesUnpickleTest |
python | django__django | tests/generic_relations/models.py | {
"start": 3705,
"end": 3823
} | class ____(models.Model):
bases = GenericRelation(ForProxyModelModel, for_concrete_model=False)
| ConcreteRelatedModel |
python | doocs__leetcode | solution/3300-3399/3378.Count Connected Components in LCM Graph/Solution.py | {
"start": 0,
"end": 636
} | class ____:
def __init__(self, n):
self.parent = {i: i for i in range(n)}
self.rank = {i: 0 for i in range(n)}
def make_set(self, v):
self.parent[v] = v
self.rank[v] = 1
def find(self, x):
if self.parent[x] != x:
self.parent[x] = self.find(self.parent[x])
return self.parent[x]
def union_set(self, u, v):
u = self.find(u)
v = self.find(v)
if u != v:
if self.rank[u] < self.rank[v]:
u, v = v, u
self.parent[v] = u
if self.rank[u] == self.rank[v]:
self.rank[u] += 1
| DSU |
python | huggingface__transformers | src/transformers/models/wav2vec2/modeling_wav2vec2.py | {
"start": 75980,
"end": 80889
} | class ____(Wav2Vec2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, "add_adapter") and config.add_adapter:
raise ValueError(
"Sequence classification does not support the use of Wav2Vec2 adapters (config.add_adapter=True)"
)
self.wav2vec2 = Wav2Vec2Model(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.wav2vec2.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.wav2vec2.parameters():
param.requires_grad = False
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[tuple, SequenceClassifierOutput]:
r"""
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
(`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.wav2vec2(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
if attention_mask is None:
pooled_output = hidden_states.mean(dim=1)
else:
padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
expand_padding_mask = padding_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_padding_mask] = 0.0
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| Wav2Vec2ForSequenceClassification |
python | numba__numba | numba/tests/test_array_attr.py | {
"start": 1539,
"end": 4531
} | class ____(MemoryLeakMixin, TestCase):
def setUp(self):
super(TestArrayAttr, self).setUp()
self.a = np.arange(20, dtype=np.int32).reshape(4, 5)
def check_unary(self, pyfunc, arr):
aryty = typeof(arr)
cfunc = self.get_cfunc(pyfunc, (aryty,))
expected = pyfunc(arr)
self.assertPreciseEqual(cfunc(arr), expected)
# Retry with forced any layout
cfunc = self.get_cfunc(pyfunc, (aryty.copy(layout='A'),))
self.assertPreciseEqual(cfunc(arr), expected)
def check_unary_with_arrays(self, pyfunc,):
self.check_unary(pyfunc, self.a)
self.check_unary(pyfunc, self.a.T)
self.check_unary(pyfunc, self.a[::2])
# 0-d array
arr = np.array([42]).reshape(())
self.check_unary(pyfunc, arr)
# array with an empty dimension
arr = np.zeros(0)
self.check_unary(pyfunc, arr)
# check with reshape
self.check_unary(pyfunc, arr.reshape((1, 0, 2)))
def get_cfunc(self, pyfunc, argspec):
return njit(argspec)(pyfunc)
def test_shape(self):
pyfunc = array_shape
cfunc = self.get_cfunc(pyfunc, (types.int32[:,:], types.int32))
for i in range(self.a.ndim):
self.assertEqual(pyfunc(self.a, i), cfunc(self.a, i))
def test_strides(self):
pyfunc = array_strides
cfunc = self.get_cfunc(pyfunc, (types.int32[:,:], types.int32))
for i in range(self.a.ndim):
self.assertEqual(pyfunc(self.a, i), cfunc(self.a, i))
def test_ndim(self):
self.check_unary_with_arrays(array_ndim)
def test_size(self):
self.check_unary_with_arrays(array_size)
def test_itemsize(self):
self.check_unary_with_arrays(array_itemsize)
def test_nbytes(self):
self.check_unary_with_arrays(array_nbytes)
def test_dtype(self):
pyfunc = array_dtype
self.check_unary(pyfunc, self.a)
dtype = np.dtype([('x', np.int8), ('y', np.int8)])
arr = np.zeros(4, dtype=dtype)
self.check_unary(pyfunc, arr)
def test_use_dtype(self):
# Test using the dtype attribute inside the Numba function itself
b = np.empty(1, dtype=np.int16)
pyfunc = use_dtype
cfunc = self.get_cfunc(pyfunc, (typeof(self.a), typeof(b)))
expected = pyfunc(self.a, b)
self.assertPreciseEqual(cfunc(self.a, b), expected)
def test_dtype_equal(self):
# Test checking if a dtype is equal to another dtype
pyfunc = dtype_eq_int64
self.check_unary(pyfunc, np.empty(1, dtype=np.int16))
self.check_unary(pyfunc, np.empty(1, dtype=np.int64))
def test_flags_contiguous(self):
self.check_unary_with_arrays(array_flags_contiguous)
def test_flags_c_contiguous(self):
self.check_unary_with_arrays(array_flags_c_contiguous)
def test_flags_f_contiguous(self):
self.check_unary_with_arrays(array_flags_f_contiguous)
| TestArrayAttr |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride6.py | {
"start": 1895,
"end": 2946
} | class ____(Generic[_T]):
@overload
def method1(self: "Parent2[int]", x: list[int]) -> list[int]: ...
@overload
def method1(self, x: str) -> dict[str, str]: ...
def method1(self, x: Any) -> Any: ...
@overload
def method2(self: "Parent2[int]", x: list[int]) -> list[int]: ...
@overload
def method2(self, x: str) -> dict[str, str]: ...
@overload
def method2(self, x: int) -> int: ...
def method2(self, x: Any) -> Any: ...
@overload
@classmethod
def method3(cls: "type[Parent2[int]]", x: list[int]) -> list[int]: ...
@overload
@classmethod
def method3(cls, x: str) -> dict[str, str]: ...
@classmethod
def method3(cls, x: Any) -> Any: ...
@overload
@classmethod
def method4(cls: "type[Parent2[int]]", x: list[int]) -> list[int]: ...
@overload
@classmethod
def method4(cls, x: str) -> dict[str, str]: ...
@overload
@classmethod
def method4(cls, x: int) -> int: ...
@classmethod
def method4(cls, x: Any) -> Any: ...
| Parent2 |
python | bokeh__bokeh | tests/unit/bokeh/embed/test_server__embed.py | {
"start": 11262,
"end": 11939
} | class ____:
def test_bad_input(self) -> None:
with pytest.raises(ValueError):
bes._process_resources("foo")
def test_None(self) -> None:
assert bes._process_resources(None) == "&resources=none"
def test_default(self) -> None:
assert bes._process_resources("default") == ""
def Test__src_path(object):
def test_args(self) -> None:
assert bes._src_path("http://foo", "1234") =="http://foo/autoload.js?bokeh-autoload-element=1234"
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Test__process_resources |
python | openai__gym | gym/envs/registration.py | {
"start": 2901,
"end": 26208
} | class ____:
"""A specification for creating environments with `gym.make`.
* id: The string used to create the environment with `gym.make`
* entry_point: The location of the environment to create from
* reward_threshold: The reward threshold for completing the environment.
* nondeterministic: If the observation of an environment cannot be repeated with the same initial state, random number generator state and actions.
* max_episode_steps: The max number of steps that the environment can take before truncation
* order_enforce: If to enforce the order of `reset` before `step` and `render` functions
* autoreset: If to automatically reset the environment on episode end
* disable_env_checker: If to disable the environment checker wrapper in `gym.make`, by default False (runs the environment checker)
* kwargs: Additional keyword arguments passed to the environments through `gym.make`
"""
id: str
entry_point: Union[Callable, str]
# Environment attributes
reward_threshold: Optional[float] = field(default=None)
nondeterministic: bool = field(default=False)
# Wrappers
max_episode_steps: Optional[int] = field(default=None)
order_enforce: bool = field(default=True)
autoreset: bool = field(default=False)
disable_env_checker: bool = field(default=False)
apply_api_compatibility: bool = field(default=False)
# Environment arguments
kwargs: dict = field(default_factory=dict)
# post-init attributes
namespace: Optional[str] = field(init=False)
name: str = field(init=False)
version: Optional[int] = field(init=False)
def __post_init__(self):
# Initialize namespace, name, version
self.namespace, self.name, self.version = parse_env_id(self.id)
def make(self, **kwargs) -> Env:
# For compatibility purposes
return make(self, **kwargs)
def _check_namespace_exists(ns: Optional[str]):
"""Check if a namespace exists. If it doesn't, print a helpful error message."""
if ns is None:
return
namespaces = {
spec_.namespace for spec_ in registry.values() if spec_.namespace is not None
}
if ns in namespaces:
return
suggestion = (
difflib.get_close_matches(ns, namespaces, n=1) if len(namespaces) > 0 else None
)
suggestion_msg = (
f"Did you mean: `{suggestion[0]}`?"
if suggestion
else f"Have you installed the proper package for {ns}?"
)
raise error.NamespaceNotFound(f"Namespace {ns} not found. {suggestion_msg}")
def _check_name_exists(ns: Optional[str], name: str):
"""Check if an env exists in a namespace. If it doesn't, print a helpful error message."""
_check_namespace_exists(ns)
names = {spec_.name for spec_ in registry.values() if spec_.namespace == ns}
if name in names:
return
suggestion = difflib.get_close_matches(name, names, n=1)
namespace_msg = f" in namespace {ns}" if ns else ""
suggestion_msg = f"Did you mean: `{suggestion[0]}`?" if suggestion else ""
raise error.NameNotFound(
f"Environment {name} doesn't exist{namespace_msg}. {suggestion_msg}"
)
def _check_version_exists(ns: Optional[str], name: str, version: Optional[int]):
"""Check if an env version exists in a namespace. If it doesn't, print a helpful error message.
This is a complete test whether an environment identifier is valid, and will provide the best available hints.
Args:
ns: The environment namespace
name: The environment space
version: The environment version
Raises:
DeprecatedEnv: The environment doesn't exist but a default version does
VersionNotFound: The ``version`` used doesn't exist
DeprecatedEnv: Environment version is deprecated
"""
if get_env_id(ns, name, version) in registry:
return
_check_name_exists(ns, name)
if version is None:
return
message = f"Environment version `v{version}` for environment `{get_env_id(ns, name, None)}` doesn't exist."
env_specs = [
spec_
for spec_ in registry.values()
if spec_.namespace == ns and spec_.name == name
]
env_specs = sorted(env_specs, key=lambda spec_: int(spec_.version or -1))
default_spec = [spec_ for spec_ in env_specs if spec_.version is None]
if default_spec:
message += f" It provides the default version {default_spec[0].id}`."
if len(env_specs) == 1:
raise error.DeprecatedEnv(message)
# Process possible versioned environments
versioned_specs = [spec_ for spec_ in env_specs if spec_.version is not None]
latest_spec = max(versioned_specs, key=lambda spec: spec.version, default=None) # type: ignore
if latest_spec is not None and version > latest_spec.version:
version_list_msg = ", ".join(f"`v{spec_.version}`" for spec_ in env_specs)
message += f" It provides versioned environments: [ {version_list_msg} ]."
raise error.VersionNotFound(message)
if latest_spec is not None and version < latest_spec.version:
raise error.DeprecatedEnv(
f"Environment version v{version} for `{get_env_id(ns, name, None)}` is deprecated. "
f"Please use `{latest_spec.id}` instead."
)
def find_highest_version(ns: Optional[str], name: str) -> Optional[int]:
version: List[int] = [
spec_.version
for spec_ in registry.values()
if spec_.namespace == ns and spec_.name == name and spec_.version is not None
]
return max(version, default=None)
def load_env_plugins(entry_point: str = "gym.envs") -> None:
# Load third-party environments
for plugin in metadata.entry_points(group=entry_point):
# Python 3.8 doesn't support plugin.module, plugin.attr
# So we'll have to try and parse this ourselves
module, attr = None, None
try:
module, attr = plugin.module, plugin.attr # type: ignore ## error: Cannot access member "attr" for type "EntryPoint"
except AttributeError:
if ":" in plugin.value:
module, attr = plugin.value.split(":", maxsplit=1)
else:
module, attr = plugin.value, None
except Exception as e:
warnings.warn(
f"While trying to load plugin `{plugin}` from {entry_point}, an exception occurred: {e}"
)
module, attr = None, None
finally:
if attr is None:
raise error.Error(
f"Gym environment plugin `{module}` must specify a function to execute, not a root module"
)
context = namespace(plugin.name)
if plugin.name.startswith("__") and plugin.name.endswith("__"):
# `__internal__` is an artifact of the plugin system when
# the root namespace had an allow-list. The allow-list is now
# removed and plugins can register environments in the root
# namespace with the `__root__` magic key.
if plugin.name == "__root__" or plugin.name == "__internal__":
context = contextlib.nullcontext()
else:
logger.warn(
f"The environment namespace magic key `{plugin.name}` is unsupported. "
"To register an environment at the root namespace you should specify the `__root__` namespace."
)
with context:
fn = plugin.load()
try:
fn()
except Exception as e:
logger.warn(str(e))
# fmt: off
@overload
def make(id: str, **kwargs) -> Env: ...
@overload
def make(id: EnvSpec, **kwargs) -> Env: ...
# Classic control
# ----------------------------------------
@overload
def make(id: Literal["CartPole-v0", "CartPole-v1"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, int]]: ...
@overload
def make(id: Literal["MountainCar-v0"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, int]]: ...
@overload
def make(id: Literal["MountainCarContinuous-v0"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, Sequence[SupportsFloat]]]: ...
@overload
def make(id: Literal["Pendulum-v1"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, Sequence[SupportsFloat]]]: ...
@overload
def make(id: Literal["Acrobot-v1"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, int]]: ...
# Box2d
# ----------------------------------------
@overload
def make(id: Literal["LunarLander-v2", "LunarLanderContinuous-v2"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, int]]: ...
@overload
def make(id: Literal["BipedalWalker-v3", "BipedalWalkerHardcore-v3"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, Sequence[SupportsFloat]]]: ...
@overload
def make(id: Literal["CarRacing-v2"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, Sequence[SupportsFloat]]]: ...
# Toy Text
# ----------------------------------------
@overload
def make(id: Literal["Blackjack-v1"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, int]]: ...
@overload
def make(id: Literal["FrozenLake-v1", "FrozenLake8x8-v1"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, int]]: ...
@overload
def make(id: Literal["CliffWalking-v0"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, int]]: ...
@overload
def make(id: Literal["Taxi-v3"], **kwargs) -> Env[np.ndarray, Union[np.ndarray, int]]: ...
# Mujoco
# ----------------------------------------
@overload
def make(id: Literal[
"Reacher-v2", "Reacher-v4",
"Pusher-v2", "Pusher-v4",
"InvertedPendulum-v2", "InvertedPendulum-v4",
"InvertedDoublePendulum-v2", "InvertedDoublePendulum-v4",
"HalfCheetah-v2", "HalfCheetah-v3", "HalfCheetah-v4",
"Hopper-v2", "Hopper-v3", "Hopper-v4",
"Swimmer-v2", "Swimmer-v3", "Swimmer-v4",
"Walker2d-v2", "Walker2d-v3", "Walker2d-v4",
"Ant-v2", "Ant-v3", "Ant-v4",
"HumanoidStandup-v2", "HumanoidStandup-v4",
"Humanoid-v2", "Humanoid-v3", "Humanoid-v4",
], **kwargs) -> Env[np.ndarray, np.ndarray]: ...
# fmt: on
# Global registry of environments. Meant to be accessed through `register` and `make`
registry: Dict[str, EnvSpec] = {}
current_namespace: Optional[str] = None
def _check_spec_register(spec: EnvSpec):
"""Checks whether the spec is valid to be registered. Helper function for `register`."""
global registry
latest_versioned_spec = max(
(
spec_
for spec_ in registry.values()
if spec_.namespace == spec.namespace
and spec_.name == spec.name
and spec_.version is not None
),
key=lambda spec_: int(spec_.version), # type: ignore
default=None,
)
unversioned_spec = next(
(
spec_
for spec_ in registry.values()
if spec_.namespace == spec.namespace
and spec_.name == spec.name
and spec_.version is None
),
None,
)
if unversioned_spec is not None and spec.version is not None:
raise error.RegistrationError(
"Can't register the versioned environment "
f"`{spec.id}` when the unversioned environment "
f"`{unversioned_spec.id}` of the same name already exists."
)
elif latest_versioned_spec is not None and spec.version is None:
raise error.RegistrationError(
"Can't register the unversioned environment "
f"`{spec.id}` when the versioned environment "
f"`{latest_versioned_spec.id}` of the same name "
f"already exists. Note: the default behavior is "
f"that `gym.make` with the unversioned environment "
f"will return the latest versioned environment"
)
# Public API
@contextlib.contextmanager
def namespace(ns: str):
global current_namespace
old_namespace = current_namespace
current_namespace = ns
yield
current_namespace = old_namespace
def register(
id: str,
entry_point: Union[Callable, str],
reward_threshold: Optional[float] = None,
nondeterministic: bool = False,
max_episode_steps: Optional[int] = None,
order_enforce: bool = True,
autoreset: bool = False,
disable_env_checker: bool = False,
apply_api_compatibility: bool = False,
**kwargs,
):
"""Register an environment with gym.
The `id` parameter corresponds to the name of the environment, with the syntax as follows:
`(namespace)/(env_name)-v(version)` where `namespace` is optional.
It takes arbitrary keyword arguments, which are passed to the `EnvSpec` constructor.
Args:
id: The environment id
entry_point: The entry point for creating the environment
reward_threshold: The reward threshold considered to have learnt an environment
nondeterministic: If the environment is nondeterministic (even with knowledge of the initial seed and all actions)
max_episode_steps: The maximum number of episodes steps before truncation. Used by the Time Limit wrapper.
order_enforce: If to enable the order enforcer wrapper to ensure users run functions in the correct order
autoreset: If to add the autoreset wrapper such that reset does not need to be called.
disable_env_checker: If to disable the environment checker for the environment. Recommended to False.
apply_api_compatibility: If to apply the `StepAPICompatibility` wrapper.
**kwargs: arbitrary keyword arguments which are passed to the environment constructor
"""
global registry, current_namespace
ns, name, version = parse_env_id(id)
if current_namespace is not None:
if (
kwargs.get("namespace") is not None
and kwargs.get("namespace") != current_namespace
):
logger.warn(
f"Custom namespace `{kwargs.get('namespace')}` is being overridden by namespace `{current_namespace}`. "
f"If you are developing a plugin you shouldn't specify a namespace in `register` calls. "
"The namespace is specified through the entry point package metadata."
)
ns_id = current_namespace
else:
ns_id = ns
full_id = get_env_id(ns_id, name, version)
new_spec = EnvSpec(
id=full_id,
entry_point=entry_point,
reward_threshold=reward_threshold,
nondeterministic=nondeterministic,
max_episode_steps=max_episode_steps,
order_enforce=order_enforce,
autoreset=autoreset,
disable_env_checker=disable_env_checker,
apply_api_compatibility=apply_api_compatibility,
**kwargs,
)
_check_spec_register(new_spec)
if new_spec.id in registry:
logger.warn(f"Overriding environment {new_spec.id} already in registry.")
registry[new_spec.id] = new_spec
def make(
id: Union[str, EnvSpec],
max_episode_steps: Optional[int] = None,
autoreset: bool = False,
apply_api_compatibility: Optional[bool] = None,
disable_env_checker: Optional[bool] = None,
**kwargs,
) -> Env:
"""Create an environment according to the given ID.
To find all available environments use `gym.envs.registry.keys()` for all valid ids.
Args:
id: Name of the environment. Optionally, a module to import can be included, eg. 'module:Env-v0'
max_episode_steps: Maximum length of an episode (TimeLimit wrapper).
autoreset: Whether to automatically reset the environment after each episode (AutoResetWrapper).
apply_api_compatibility: Whether to wrap the environment with the `StepAPICompatibility` wrapper that
converts the environment step from a done bool to return termination and truncation bools.
By default, the argument is None to which the environment specification `apply_api_compatibility` is used
which defaults to False. Otherwise, the value of `apply_api_compatibility` is used.
If `True`, the wrapper is applied otherwise, the wrapper is not applied.
disable_env_checker: If to run the env checker, None will default to the environment specification `disable_env_checker`
(which is by default False, running the environment checker),
otherwise will run according to this parameter (`True` = not run, `False` = run)
kwargs: Additional arguments to pass to the environment constructor.
Returns:
An instance of the environment.
Raises:
Error: If the ``id`` doesn't exist then an error is raised
"""
if isinstance(id, EnvSpec):
spec_ = id
else:
module, id = (None, id) if ":" not in id else id.split(":")
if module is not None:
try:
importlib.import_module(module)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"{e}. Environment registration via importing a module failed. "
f"Check whether '{module}' contains env registration and can be imported."
)
spec_ = registry.get(id)
ns, name, version = parse_env_id(id)
latest_version = find_highest_version(ns, name)
if (
version is not None
and latest_version is not None
and latest_version > version
):
logger.warn(
f"The environment {id} is out of date. You should consider "
f"upgrading to version `v{latest_version}`."
)
if version is None and latest_version is not None:
version = latest_version
new_env_id = get_env_id(ns, name, version)
spec_ = registry.get(new_env_id)
logger.warn(
f"Using the latest versioned environment `{new_env_id}` "
f"instead of the unversioned environment `{id}`."
)
if spec_ is None:
_check_version_exists(ns, name, version)
raise error.Error(f"No registered env with id: {id}")
_kwargs = spec_.kwargs.copy()
_kwargs.update(kwargs)
if spec_.entry_point is None:
raise error.Error(f"{spec_.id} registered but entry_point is not specified")
elif callable(spec_.entry_point):
env_creator = spec_.entry_point
else:
# Assume it's a string
env_creator = load(spec_.entry_point)
mode = _kwargs.get("render_mode")
apply_human_rendering = False
apply_render_collection = False
# If we have access to metadata we check that "render_mode" is valid and see if the HumanRendering wrapper needs to be applied
if mode is not None and hasattr(env_creator, "metadata"):
assert isinstance(
env_creator.metadata, dict
), f"Expect the environment creator ({env_creator}) metadata to be dict, actual type: {type(env_creator.metadata)}"
if "render_modes" in env_creator.metadata:
render_modes = env_creator.metadata["render_modes"]
if not isinstance(render_modes, Sequence):
logger.warn(
f"Expects the environment metadata render_modes to be a Sequence (tuple or list), actual type: {type(render_modes)}"
)
# Apply the `HumanRendering` wrapper, if the mode=="human" but "human" not in render_modes
if (
mode == "human"
and "human" not in render_modes
and ("rgb_array" in render_modes or "rgb_array_list" in render_modes)
):
logger.warn(
"You are trying to use 'human' rendering for an environment that doesn't natively support it. "
"The HumanRendering wrapper is being applied to your environment."
)
apply_human_rendering = True
if "rgb_array" in render_modes:
_kwargs["render_mode"] = "rgb_array"
else:
_kwargs["render_mode"] = "rgb_array_list"
elif (
mode not in render_modes
and mode.endswith("_list")
and mode[: -len("_list")] in render_modes
):
_kwargs["render_mode"] = mode[: -len("_list")]
apply_render_collection = True
elif mode not in render_modes:
logger.warn(
f"The environment is being initialised with mode ({mode}) that is not in the possible render_modes ({render_modes})."
)
else:
logger.warn(
f"The environment creator metadata doesn't include `render_modes`, contains: {list(env_creator.metadata.keys())}"
)
if apply_api_compatibility is True or (
apply_api_compatibility is None and spec_.apply_api_compatibility is True
):
# If we use the compatibility layer, we treat the render mode explicitly and don't pass it to the env creator
render_mode = _kwargs.pop("render_mode", None)
else:
render_mode = None
try:
env = env_creator(**_kwargs)
except TypeError as e:
if (
str(e).find("got an unexpected keyword argument 'render_mode'") >= 0
and apply_human_rendering
):
raise error.Error(
f"You passed render_mode='human' although {id} doesn't implement human-rendering natively. "
"Gym tried to apply the HumanRendering wrapper but it looks like your environment is using the old "
"rendering API, which is not supported by the HumanRendering wrapper."
)
else:
raise e
# Copies the environment creation specification and kwargs to add to the environment specification details
spec_ = copy.deepcopy(spec_)
spec_.kwargs = _kwargs
env.unwrapped.spec = spec_
# Add step API wrapper
if apply_api_compatibility is True or (
apply_api_compatibility is None and spec_.apply_api_compatibility is True
):
env = EnvCompatibility(env, render_mode)
# Run the environment checker as the lowest level wrapper
if disable_env_checker is False or (
disable_env_checker is None and spec_.disable_env_checker is False
):
env = PassiveEnvChecker(env)
# Add the order enforcing wrapper
if spec_.order_enforce:
env = OrderEnforcing(env)
# Add the time limit wrapper
if max_episode_steps is not None:
env = TimeLimit(env, max_episode_steps)
elif spec_.max_episode_steps is not None:
env = TimeLimit(env, spec_.max_episode_steps)
# Add the autoreset wrapper
if autoreset:
env = AutoResetWrapper(env)
# Add human rendering wrapper
if apply_human_rendering:
env = HumanRendering(env)
elif apply_render_collection:
env = RenderCollection(env)
return env
def spec(env_id: str) -> EnvSpec:
"""Retrieve the spec for the given environment from the global registry."""
spec_ = registry.get(env_id)
if spec_ is None:
ns, name, version = parse_env_id(env_id)
_check_version_exists(ns, name, version)
raise error.Error(f"No registered env with id: {env_id}")
else:
assert isinstance(spec_, EnvSpec)
return spec_
| EnvSpec |
python | walkccc__LeetCode | solutions/1131. Maximum of Absolute Value Expression/1131.py | {
"start": 0,
"end": 363
} | class ____:
def maxAbsValExpr(self, arr1: list[int], arr2: list[int]) -> int:
n = len(arr1)
a = [arr1[i] + arr2[i] + i for i in range(n)]
b = [arr1[i] + arr2[i] - i for i in range(n)]
c = [arr1[i] - arr2[i] + i for i in range(n)]
d = [arr1[i] - arr2[i] - i for i in range(n)]
return max(map(lambda x: max(x) - min(x), (a, b, c, d)))
| Solution |
python | django__django | django/contrib/auth/hashers.py | {
"start": 16778,
"end": 19378
} | class ____(BasePasswordHasher):
"""
Secure password hashing using the bcrypt algorithm (recommended)
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
"""
algorithm = "bcrypt_sha256"
digest = hashlib.sha256
library = ("bcrypt", "bcrypt")
rounds = 12
def salt(self):
bcrypt = self._load_library()
return bcrypt.gensalt(self.rounds)
def encode(self, password, salt):
bcrypt = self._load_library()
password = force_bytes(password)
salt = force_bytes(salt)
# Hash the password prior to using bcrypt to prevent password
# truncation as described in #20138.
if self.digest is not None:
# Use binascii.hexlify() because a hex encoded bytestring is str.
password = binascii.hexlify(self.digest(password).digest())
data = bcrypt.hashpw(password, salt)
return "%s$%s" % (self.algorithm, data.decode("ascii"))
def decode(self, encoded):
algorithm, empty, algostr, work_factor, data = encoded.split("$", 4)
assert algorithm == self.algorithm
return {
"algorithm": algorithm,
"algostr": algostr,
"checksum": data[22:],
"salt": data[:22],
"work_factor": int(work_factor),
}
def verify(self, password, encoded):
algorithm, data = encoded.split("$", 1)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, data.encode("ascii"))
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
decoded = self.decode(encoded)
return {
_("algorithm"): decoded["algorithm"],
_("work factor"): decoded["work_factor"],
_("salt"): mask_hash(decoded["salt"]),
_("checksum"): mask_hash(decoded["checksum"]),
}
def must_update(self, encoded):
decoded = self.decode(encoded)
return decoded["work_factor"] != self.rounds
def harden_runtime(self, password, encoded):
_, data = encoded.split("$", 1)
salt = data[:29] # Length of the salt in bcrypt.
rounds = data.split("$")[2]
# work factor is logarithmic, adding one doubles the load.
diff = 2 ** (self.rounds - int(rounds)) - 1
while diff > 0:
self.encode(password, salt.encode("ascii"))
diff -= 1
| BCryptSHA256PasswordHasher |
python | instagram__MonkeyType | tests/test_util.py | {
"start": 2667,
"end": 2738
} | class ____(Dummy):
def an_instance_method(self):
pass
| Derived |
python | tox-dev__tox | src/tox/execute/stream.py | {
"start": 472,
"end": 3691
} | class ____:
"""
Make sure data collected is synced in-memory and to the target stream on every newline and time period.
Used to propagate executed commands output to the standard output/error streams visible to the user.
"""
REFRESH_RATE = 0.1
def __init__(self, name: str, target: IO[bytes] | None, color: str | None = None) -> None:
self._content = bytearray()
self._target: IO[bytes] | None = target
self._target_enabled: bool = target is not None
self._keep_printing: Event = Event()
self._content_lock: Lock = Lock()
self._lock: Lock = Lock()
self._at: int = 0
self._color: str | None = color
self.name = name
def __repr__(self) -> str:
return f"{self.__class__.__name__}(name={self.name!r}, target={self._target!r}, color={self._color!r})"
def __enter__(self) -> Self:
if self._target_enabled:
self._start()
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
if self._target_enabled:
self._cancel()
self._write(len(self._content))
def handler(self, content: bytes) -> None:
"""A callback called whenever content is written."""
with self._content_lock:
self._content.extend(content)
if self._target_enabled is False:
return
at = content.rfind(b"\n")
if at != -1: # pragma: no branch
at = len(self._content) - len(content) + at + 1
self._cancel()
try:
if at != -1:
self._write(at)
finally:
self._start()
def _start(self) -> None:
self.timer = Timer(self.REFRESH_RATE, self._trigger_timer)
self.timer.name = f"{self.name}-sync-timer"
self.timer.start()
def _cancel(self) -> None:
self.timer.cancel()
def _trigger_timer(self) -> None:
with self._content_lock:
at = len(self._content)
self._write(at)
def _write(self, at: int) -> None:
assert self._target is not None # because _do_print is guarding the call of this method # noqa: S101
with self._lock:
if at > self._at: # pragma: no branch
try:
with self.colored():
self._target.write(self._content[self._at : at])
self._target.flush()
finally:
self._at = at
@contextmanager
def colored(self) -> Iterator[None]:
if self._color is None or self._target is None:
yield
else:
self._target.write(self._color.encode("utf-8"))
try:
yield
finally:
self._target.write(Fore.RESET.encode("utf-8"))
@property
def text(self) -> str:
with self._content_lock:
return self._content.decode("utf-8", errors="surrogateescape")
@property
def content(self) -> bytearray:
with self._content_lock:
return self._content
| SyncWrite |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/containers.py | {
"start": 4918,
"end": 5064
} | class ____(Enum):
"Alignment for `VSplit`."
LEFT = "LEFT"
CENTER = "CENTER"
RIGHT = "RIGHT"
JUSTIFY = "JUSTIFY"
| HorizontalAlign |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1124648,
"end": 1124867
} | class ____(VegaLiteSchema):
"""ScaleInterpolateEnum schema wrapper."""
_schema = {"$ref": "#/definitions/ScaleInterpolateEnum"}
def __init__(self, *args):
super().__init__(*args)
| ScaleInterpolateEnum |
python | run-llama__llama_index | llama-index-integrations/retrievers/llama-index-retrievers-mongodb-atlas-bm25-retriever/llama_index/retrievers/mongodb_atlas_bm25_retriever/base.py | {
"start": 394,
"end": 4055
} | class ____(BaseRetriever):
def __init__(
self,
mongodb_client: Optional[Any] = None,
db_name: str = "default_db",
collection_name: str = "default_collection",
index_name: str = "default",
text_key: str = "text",
metadata_key: str = "metadata",
similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K,
) -> None:
"""
Initialize the vector store.
Args:
mongodb_client: A MongoDB client.
db_name: A MongoDB database name.
collection_name: A MongoDB collection name.
index_name: A MongoDB Atlas Vector Search index name.
text_key: A MongoDB field that will contain the text for each document.
metadata_key: A MongoDB field that will contain
"""
import_err_msg = "`pymongo` package not found, please run `pip install pymongo`"
try:
from importlib.metadata import version
from pymongo import MongoClient
from pymongo.driver_info import DriverInfo
except ImportError:
raise ImportError(import_err_msg)
if mongodb_client is not None:
self._mongodb_client = cast(MongoClient, mongodb_client)
else:
if "MONGO_URI" not in os.environ:
raise ValueError(
"Must specify MONGO_URI via env variable "
"if not directly passing in client."
)
self._mongodb_client = MongoClient(
os.environ["MONGO_URI"],
driver=DriverInfo(name="llama-index", version=version("llama-index")),
)
self._db = self._mongodb_client[db_name]
self._collection = self._db[collection_name]
self._index_name = index_name
self._text_key = text_key
self._metadata_key = metadata_key
self._similarity_top_k = similarity_top_k
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve nodes given query."""
query = query_bundle.query_str
pipeline = [
{
"$search": {
"index": self._index_name,
"text": {"query": query, "path": self._text_key},
}
},
{"$addFields": {"score": {"$meta": "searchScore"}}},
{"$sort": {"score": -1}},
{"$limit": self._similarity_top_k},
]
results = list(self._collection.aggregate(pipeline))
retrieve_nodes = []
for result in results[: self._similarity_top_k]:
doc = self._collection.find_one({"_id": result["_id"]})
node = doc[self._text_key]
node_content = json.loads(
doc.get("metadata", {}).get("_node_content", "{}")
)
metadata_dict = doc.pop(self._metadata_key)
node = None
try:
node = metadata_dict_to_node(metadata_dict)
node.set_content(doc["text"])
except Exception:
node = TextNode(
text=doc["text"],
id_=doc["id"],
metadata=doc.get("metadata", {}),
start_char_idx=node_content.get("start_char_idx", None),
end_char_idx=node_content.get("end_char_idx", None),
relationships=node_content.get("relationships", None),
)
node_with_score = NodeWithScore(node=node, score=result["score"])
retrieve_nodes.append(node_with_score)
return retrieve_nodes
| MongoDBAtlasBM25Retriever |
python | readthedocs__readthedocs.org | readthedocs/api/v2/views/integrations.py | {
"start": 13135,
"end": 21450
} | class ____(WebhookMixin, APIView):
"""
Webhook consumer for GitHub.
Accepts webhook events from GitHub, 'push' and 'pull_request' events trigger builds.
Expects the webhook event type will be included in HTTP header ``X-GitHub-Event``,
and we will have a JSON payload.
Expects the following JSON::
For push, create, delete Events:
{
"ref": "branch-name",
...
}
For pull_request Events:
{
"action": "opened",
"number": 2,
"pull_request": {
"head": {
"sha": "ec26de721c3235aad62de7213c562f8c821"
}
}
}
See full payload here:
- https://developer.github.com/v3/activity/events/types/#pushevent
- https://developer.github.com/v3/activity/events/types/#createevent
- https://developer.github.com/v3/activity/events/types/#deleteevent
- https://developer.github.com/v3/activity/events/types/#pullrequestevent
"""
integration_type = Integration.GITHUB_WEBHOOK
invalid_payload_msg = "Payload not valid, invalid or missing signature"
def get_data(self):
if self.request.content_type == "application/x-www-form-urlencoded":
try:
return json.loads(self.request.data["payload"])
except (ValueError, KeyError):
pass
return super().get_data()
def get_external_version_data(self):
"""Get Commit Sha and pull request number from payload."""
try:
data = ExternalVersionData(
id=str(self.data["number"]),
commit=self.data["pull_request"]["head"]["sha"],
source_branch=self.data["pull_request"]["head"]["ref"],
base_branch=self.data["pull_request"]["base"]["ref"],
)
return data
except KeyError as e:
key = e.args[0]
raise ParseError(f"Invalid payload. {key} is required.") from e
def is_payload_valid(self):
"""
GitHub use a HMAC hexdigest hash to sign the payload.
It is sent in the request's header.
See https://developer.github.com/webhooks/securing/.
"""
signature = self.request.headers.get(GITHUB_SIGNATURE_HEADER)
if not signature:
return False
secret = self.get_integration().secret
msg = self.request.body.decode()
digest = WebhookMixin.get_digest(secret, msg)
result = hmac.compare_digest(
b"sha256=" + digest.encode(),
signature.encode(),
)
return result
def handle_webhook(self):
"""
Handle GitHub webhook events.
It checks for all the events we support currently:
- PUSH: Triggered on a push to a repository branch. Branch pushes and repository tag pushes
also trigger webhook push events.
.. note::
``created`` and ``deleted`` indicate if the push was a branch/tag created or deleted.
This is required for old webhook created at Read the Docs that do not register the
``create`` and ``delete`` events.
Newer webhooks created on Read the Docs, will trigger a PUSH+created=True **and** a
CREATE event. We need to handle this in a specific way to not trigger the sync twice.
- CREATE: Represents a created branch or tag.
- DELETE: Represents a deleted branch or tag.
- PULL_REQUEST: Triggered when a pull request is assigned, unassigned, labeled, unlabeled,
opened, edited, closed, reopened, synchronize, ready_for_review, locked, unlocked or when
a pull request review is requested or removed (``action`` will contain this data)
See https://developer.github.com/v3/activity/events/types/
"""
if self.project.is_github_app_project:
Notification.objects.add(
message_id=MESSAGE_PROJECT_DEPRECATED_WEBHOOK,
attached_to=self.project,
dismissable=True,
)
return Response(
{
"detail": " ".join(
dedent(
"""
This project is connected to our GitHub App and doesn't require a separate webhook, ignoring webhook event.
Remove the deprecated webhook from your repository to avoid duplicate events,
see https://docs.readthedocs.com/platform/stable/reference/git-integration.html#manually-migrating-a-project.
"""
)
.strip()
.splitlines()
)
},
status=status.HTTP_400_BAD_REQUEST,
)
# Get event and trigger other webhook events
action = self.data.get("action", None)
created = self.data.get("created", False)
deleted = self.data.get("deleted", False)
event = self.request.headers.get(GITHUB_EVENT_HEADER, GITHUB_PUSH)
structlog.contextvars.bind_contextvars(webhook_event=event)
webhook_github.send(
Project,
project=self.project,
data=self.data,
event=event,
)
# Always update `latest` branch to point to the default branch in the repository
# even if the event is not gonna be handled. This helps us to keep our db in sync.
default_branch = self.data.get("repository", {}).get("default_branch", None)
if default_branch:
self.update_default_branch(default_branch)
if event == GITHUB_PING:
return {"detail": "Webhook configured correctly"}
# Sync versions when a branch/tag was created/deleted
if event in (GITHUB_CREATE, GITHUB_DELETE):
log.debug("Triggered sync_versions.")
return self.sync_versions_response(self.project)
integration = self.get_integration()
# Handle pull request events.
if self.project.external_builds_enabled and event == GITHUB_PULL_REQUEST:
if action in [
GITHUB_PULL_REQUEST_OPENED,
GITHUB_PULL_REQUEST_REOPENED,
GITHUB_PULL_REQUEST_SYNC,
]:
# Trigger a build when PR is opened/reopened/sync
return self.get_external_version_response(self.project)
if action == GITHUB_PULL_REQUEST_CLOSED:
# Delete external version when PR is closed
return self.get_closed_external_version_response(self.project)
# Sync versions when push event is created/deleted action
if all(
[
event == GITHUB_PUSH,
(created or deleted),
]
):
events = (
integration.provider_data.get("events", []) if integration.provider_data else []
) # noqa
if any(
[
GITHUB_CREATE in events,
GITHUB_DELETE in events,
]
):
# GitHub will send PUSH **and** CREATE/DELETE events on a creation/deletion in newer
# webhooks. If we receive a PUSH event we need to check if the webhook doesn't
# already have the CREATE/DELETE events. So we don't trigger the sync twice.
return self.sync_versions_response(self.project, sync=False)
log.debug(
"Triggered sync_versions.",
integration_events=events,
)
return self.sync_versions_response(self.project)
# Trigger a build for all branches in the push
if event == GITHUB_PUSH:
try:
version_name, version_type = parse_version_from_ref(self.data["ref"])
return self.get_response_push(
self.project, [VersionInfo(name=version_name, type=version_type)]
)
except KeyError as exc:
raise ParseError('Parameter "ref" is required') from exc
return None
| GitHubWebhookView |
python | getsentry__sentry | tests/sentry/core/endpoints/test_organization_environments.py | {
"start": 225,
"end": 2849
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-environments"
def setUp(self) -> None:
self.login_as(user=self.user)
@cached_property
def project(self) -> Project:
return self.create_project()
def test_simple(self) -> None:
Environment.objects.create(organization_id=self.project.organization_id, name="not project")
prod = self.create_environment(name="production", project=self.project)
staging = self.create_environment(name="staging", project=self.project)
response = self.get_success_response(self.project.organization.slug)
assert response.data == serialize([prod, staging])
def test_visibility(self) -> None:
visible = self.create_environment(name="visible", project=self.project, is_hidden=False)
hidden = self.create_environment(name="not visible", project=self.project, is_hidden=True)
not_set = self.create_environment(name="null visible", project=self.project)
response = self.get_success_response(self.project.organization.slug, visibility="visible")
assert response.data == serialize([not_set, visible])
response = self.get_success_response(self.project.organization.slug, visibility="hidden")
assert response.data == serialize([hidden])
response = self.get_success_response(self.project.organization.slug, visibility="all")
assert response.data == serialize([hidden, not_set, visible])
def test_project_filter(self) -> None:
other_project = self.create_project()
project_env = self.create_environment(name="project", project=self.project)
other_project_env = self.create_environment(name="other", project=other_project)
response = self.get_success_response(
self.project.organization.slug, project=[self.project.id]
)
assert response.data == serialize([project_env])
response = self.get_success_response(
self.project.organization.slug, project=[other_project.id]
)
assert response.data == serialize([other_project_env])
response = self.get_success_response(
self.project.organization.slug, project=[self.project.id, other_project.id]
)
assert response.data == serialize([other_project_env, project_env])
def test_invalid_visibility(self) -> None:
response = self.get_response(self.project.organization.slug, visibility="invalid-vis")
assert response.status_code == 400
assert response.data["detail"].startswith("Invalid value for 'visibility'")
| OrganizationEnvironmentsTest |
python | pypa__warehouse | tests/unit/admin/views/test_organizations.py | {
"start": 20953,
"end": 23577
} | class ____:
@pytest.mark.usefixtures("_enable_organizations")
def test_rename_not_found(self, db_request):
admin = UserFactory.create()
db_request.matchdict = {
"organization_id": "deadbeef-dead-beef-dead-beefdeadbeef"
}
db_request.params = {
"new_organization_name": "widget",
}
db_request.user = admin
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foo/bar/")
with pytest.raises(HTTPNotFound):
views.organization_rename(db_request)
@pytest.mark.usefixtures("_enable_organizations")
def test_rename(self, db_request):
admin = UserFactory.create()
organization = OrganizationFactory.create(name="example")
db_request.matchdict = {"organization_id": organization.id}
db_request.params = {
"new_organization_name": " widget ", # Test trimming whitespace
}
db_request.user = admin
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: f"/admin/organizations/{organization.id}/"
)
db_request.session.flash = pretend.call_recorder(lambda *a, **kw: None)
result = views.organization_rename(db_request)
assert db_request.session.flash.calls == [
pretend.call(
'"example" organization renamed "widget"',
queue="success",
),
]
assert result.status_code == 303
assert result.location == f"/admin/organizations/{organization.id}/"
@pytest.mark.usefixtures("_enable_organizations")
def test_rename_fails_on_conflict(self, db_request):
admin = UserFactory.create()
OrganizationFactory.create(name="widget")
organization = OrganizationFactory.create(name="example")
db_request.matchdict = {"organization_id": organization.id}
db_request.params = {
"new_organization_name": "widget",
}
db_request.user = admin
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: f"/admin/organizations/{organization.id}/"
)
db_request.session.flash = pretend.call_recorder(lambda *a, **kw: None)
result = views.organization_rename(db_request)
assert db_request.session.flash.calls == [
pretend.call(
'Organization name "widget" has been used',
queue="error",
),
]
assert result.status_code == 303
assert result.location == f"/admin/organizations/{organization.id}/"
| TestOrganizationActions |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_sdk_deprecations.py | {
"start": 191,
"end": 4927
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.url = reverse(
"sentry-api-0-organization-sdk-deprecations",
kwargs={"organization_id_or_slug": self.organization.slug},
)
def test_no_event_type(self) -> None:
response = self.client.get(self.url, format="json")
assert response.status_code == 400, response.content
assert response.data == {
"event_type": [ErrorDetail(string="This field is required.", code="required")],
}
def test_unknown_event_type(self) -> None:
response = self.client.get(
self.url,
{"event_type": "foo"},
format="json",
)
assert response.status_code == 400, response.content
assert response.data == {
"event_type": [
ErrorDetail(string='"foo" is not a valid choice.', code="invalid_choice")
],
}
def test_no_sdks_seen(self) -> None:
response = self.client.get(
self.url,
{"event_type": "profile"},
format="json",
)
assert response.status_code == 200, response.content
assert response.data == {"data": []}
def test_sdk_non_semver_version(self) -> None:
ProjectSDK.objects.create(
project=self.project,
event_type=EventType.PROFILE_CHUNK.value,
sdk_name="sentry.python",
sdk_version="something",
)
response = self.client.get(
self.url,
{"event_type": "profile"},
format="json",
)
assert response.status_code == 200, response.content
assert response.data == {"data": []}
def test_malformed_sdk_name(self) -> None:
ProjectSDK.objects.create(
project=self.project,
event_type=EventType.PROFILE_CHUNK.value,
sdk_name="idontknow",
sdk_version="0.0.0",
)
response = self.client.get(
self.url,
{"event_type": "profile"},
format="json",
)
assert response.status_code == 200, response.content
assert response.data == {"data": []}
def test_sdk_with_no_minimum_version(self) -> None:
ProjectSDK.objects.create(
project=self.project,
event_type=EventType.PROFILE_CHUNK.value,
sdk_name="sentry.idontknow",
sdk_version="0.0.0",
)
response = self.client.get(
self.url,
{"event_type": "profile"},
format="json",
)
assert response.status_code == 200, response.content
assert response.data == {"data": []}
def test_up_to_date_sdk(self) -> None:
ProjectSDK.objects.create(
project=self.project,
event_type=EventType.PROFILE_CHUNK.value,
sdk_name="sentry.python",
sdk_version="2.24.1",
)
response = self.client.get(
self.url,
{"event_type": "profile"},
format="json",
)
assert response.status_code == 200, response.content
assert response.data == {"data": []}
def test_deprecated_sdk(self) -> None:
ProjectSDK.objects.create(
project=self.project,
event_type=EventType.PROFILE_CHUNK.value,
sdk_name="sentry.python",
sdk_version="2.24.0",
)
response = self.client.get(
self.url,
{"event_type": "profile"},
format="json",
)
assert response.status_code == 200, response.content
assert response.data == {
"data": [
{
"projectId": str(self.project.id),
"minimumVersion": "2.24.1",
"sdkName": "sentry.python",
"sdkVersion": "2.24.0",
},
]
}
def test_mixed_sdks(self) -> None:
ProjectSDK.objects.create(
project=self.project,
event_type=EventType.PROFILE_CHUNK.value,
sdk_name="sentry.python",
sdk_version="2.24.0",
)
ProjectSDK.objects.create(
project=self.project,
event_type=EventType.PROFILE_CHUNK.value,
sdk_name="sentry.cocoa",
sdk_version="8.49.2",
)
response = self.client.get(
self.url,
{"event_type": "profile"},
format="json",
)
assert response.status_code == 200, response.content
assert response.data == {"data": []}
| TestOrganizationSdkDeprecations |
python | html5lib__html5lib-python | doc/conf.py | {
"start": 3404,
"end": 4162
} | class ____(object):
"""Required for autodoc on readthedocs.org where you cannot build C extensions."""
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return CExtMock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
else:
return CExtMock()
try:
import lxml # noqa
except ImportError:
sys.modules['lxml'] = CExtMock()
sys.modules['lxml.etree'] = CExtMock()
print("warning: lxml modules mocked.")
try:
import genshi # noqa
except ImportError:
sys.modules['genshi'] = CExtMock()
sys.modules['genshi.core'] = CExtMock()
print("warning: genshi modules mocked.")
| CExtMock |
python | pola-rs__polars | py-polars/src/polars/interchange/protocol.py | {
"start": 7346,
"end": 8799
} | class ____:
"""Data structure compatibility level."""
_version: int
def __init__(self) -> None:
msg = "it is not allowed to create a CompatLevel object"
raise TypeError(msg)
@staticmethod
def _with_version(version: int) -> CompatLevel:
compat_level = CompatLevel.__new__(CompatLevel)
compat_level._version = version
return compat_level
@staticmethod
def _newest() -> CompatLevel:
return CompatLevel._future1 # type: ignore[attr-defined]
@staticmethod
def newest() -> CompatLevel:
"""
Get the highest supported compatibility level.
.. warning::
Highest compatibility level is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
"""
issue_unstable_warning(
"using the highest compatibility level is considered unstable."
)
return CompatLevel._newest()
@staticmethod
def oldest() -> CompatLevel:
"""Get the most compatible level."""
return CompatLevel._compatible # type: ignore[attr-defined]
def __repr__(self) -> str:
return f"<{self.__class__.__module__}.{self.__class__.__qualname__}: {self._version}>"
CompatLevel._compatible = CompatLevel._with_version(0) # type: ignore[attr-defined]
CompatLevel._future1 = CompatLevel._with_version(1) # type: ignore[attr-defined]
| CompatLevel |
python | numba__llvmlite | llvmlite/ir/values.py | {
"start": 10507,
"end": 12672
} | class ____(_StrCaching, _StringReferenceCaching, _ConstOpMixin, Value):
"""
A constant LLVM value.
"""
def __init__(self, typ, constant):
assert isinstance(typ, types.Type)
assert not isinstance(typ, types.VoidType)
self.type = typ
constant = typ.wrap_constant_value(constant)
self.constant = constant
def _to_string(self):
return '{0} {1}'.format(self.type, self.get_reference())
def _get_reference(self):
if self.constant is None:
val = self.type.null
elif self.constant is Undefined:
val = "undef"
elif isinstance(self.constant, bytearray):
val = 'c"{0}"'.format(_escape_string(self.constant))
else:
val = self.type.format_constant(self.constant)
return val
@classmethod
def literal_array(cls, elems):
"""
Construct a literal array constant made of the given members.
"""
tys = [el.type for el in elems]
if len(tys) == 0:
raise ValueError("need at least one element")
ty = tys[0]
for other in tys:
if ty != other:
raise TypeError("all elements must have the same type")
return cls(types.ArrayType(ty, len(elems)), elems)
@classmethod
def literal_struct(cls, elems, packed=False):
"""
Construct a literal structure constant made of the given members.
"""
tys = [el.type for el in elems]
return cls(types.LiteralStructType(tys, packed), elems)
@property
def addrspace(self):
if not isinstance(self.type, types.PointerType):
raise TypeError("Only pointer constant have address spaces")
return self.type.addrspace
def __eq__(self, other):
if isinstance(other, Constant):
return str(self) == str(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(str(self))
def __repr__(self):
return "<ir.Constant type='%s' value=%r>" % (self.type, self.constant)
| Constant |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_contain_valid_email.py | {
"start": 1622,
"end": 11478
} | class ____(ColumnMapExpectation):
"""Expect values in given column to be valid email addresses."""
# These examples will be shown in the public gallery, and also executed as unit tests for your Expectation
examples = [
{
"data": {
"fail_case_1": ["a123@something", "a123@something.", "a123."],
"fail_case_2": ["aaaa.a123.co", "aaaa.a123.", "aaaa.a123.com"],
"fail_case_3": ["aaaa@a123.e", "aaaa@a123.a", "aaaa@a123.d"],
"fail_case_4": ["@a123.com", "@a123.io", "@a123.eu"],
"pass_case_1": [
"a123@something.com",
"vinod.km@something.au",
"this@better.work",
],
"pass_case_2": [
"example@website.dom",
"ex.ample@example.ex",
"great@expectations.email",
],
"valid_emails": [
"janedoe@company.org",
"someone123@stuff.net",
"mycompany@mycompany.com",
],
"bad_emails": ["Hello, world!", "Sophia", "this should fail"],
},
"tests": [
{
"title": "negative_test_for_no_domain_name",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "fail_case_1"},
"out": {
"success": False,
"unexpected_index_list": [0, 1, 2],
"unexpected_list": [
"a123@something",
"a123@something.",
"a123.",
],
},
},
{
"title": "negative_test_for_no_at_symbol",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "fail_case_2"},
"out": {
"success": False,
"unexpected_index_list": [0, 1, 2],
"unexpected_list": [
"aaaa.a123.co",
"aaaa.a123.",
"aaaa.a123.com",
],
},
},
{
"title": "negative_test_for_ending_with_one_character",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "fail_case_3"},
"out": {
"success": False,
"unexpected_index_list": [0, 1, 2],
"unexpected_list": [
"aaaa@a123.e",
"aaaa@a123.a",
"aaaa@a123.d",
],
},
},
{
"title": "negative_test_for_emails_with_no_leading_string",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "fail_case_4"},
"out": {
"success": False,
},
},
{
"title": "pass_test_1",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "pass_case_1"},
"out": {
"success": True,
"unexpected_index_list": [],
"unexpected_list": [],
},
},
{
"title": "pass_test_2",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "pass_case_2"},
"out": {
"success": True,
"unexpected_index_list": [],
"unexpected_list": [],
},
},
{
"title": "valid_emails",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_emails"},
"out": {
"success": True,
},
},
{
"title": "invalid_emails",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "bad_emails"},
"out": {
"success": False,
"unexpected_index_list": [0, 1, 2],
"unexpected_list": [
"Hello, world!",
"Sophia",
"this should fail",
],
},
},
],
}
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "column map expectation"],
"contributors": [ # Github
"@aworld1",
"@enagola",
"@spencerhardwick",
"@vinodkri1",
"@degulati",
"@ljohnston931",
"@rexboyce",
"@lodeous",
"@sophiarawlings",
"@vtdangg",
],
}
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_email"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
# Please see {some doc} for more information about domain and success keys, and other arguments to Expectations
success_keys = ()
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This method defines a question Renderer
# For more info on Renderers, see {some doc}
# !!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.question")
# def _question_renderer(
# cls, configuration, result=None, runtime_configuration=None
# ):
# column = configuration.kwargs.get("column")
# mostly = configuration.kwargs.get("mostly")
# return f'Do at least {mostly * 100}% of values in column "{column}" equal 3?'
# This method defines an answer Renderer
# !!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.answer")
# def _answer_renderer(
# cls, configuration=None, result=None, runtime_configuration=None
# ):
# column = result.expectation_config.kwargs.get("column")
# mostly = result.expectation_config.kwargs.get("mostly")
# regex = result.expectation_config.kwargs.get("regex")
# if result.success:
# return f'At least {mostly * 100}% of values in column "{column}" equal 3.'
# else:
# return f'Less than {mostly * 100}% of values in column "{column}" equal 3.'
# This method defines a prescriptive Renderer
# @classmethod
# @renderer(renderer_type="renderer.prescriptive")
# @render_suite_parameter_string
# def _prescriptive_renderer(
# cls,
# configuration=None,
# result=None,
# runtime_configuration=None,
# **kwargs,
# ):
# !!! This example renderer should be shorter
# runtime_configuration = runtime_configuration or {}
# include_column_name = False if runtime_configuration.get("include_column_name") is False else True
# styling = runtime_configuration.get("styling")
# params = substitute_none_for_missing(
# configuration.kwargs,
# ["column", "regex", "mostly", "row_condition", "condition_parser"],
# )
# template_str = "values must be equal to 3"
# if params["mostly"] is not None:
# params["mostly_pct"] = num_to_str(
# params["mostly"] * 100, precision=15, no_scientific=True
# )
# # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
# template_str += ", at least $mostly_pct % of the time."
# else:
# template_str += "."
# if include_column_name:
# template_str = "$column " + template_str
# if params["row_condition"] is not None:
# (
# conditional_template_str,
# conditional_params,
# ) = parse_row_condition_string_pandas_engine(params["row_condition"])
# template_str = conditional_template_str + ", then " + template_str
# params.update(conditional_params)
# return [
# RenderedStringTemplateContent(
# **{
# "content_block_type": "string_template",
# "string_template": {
# "template": template_str,
# "params": params,
# "styling": styling,
# },
# }
# )
# ]
if __name__ == "__main__":
ExpectColumnValuesToContainValidEmail().print_diagnostic_checklist()
| ExpectColumnValuesToContainValidEmail |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_event_details.py | {
"start": 480,
"end": 11518
} | class ____(APITestCase, SnubaTestCase, OccurrenceTestMixin):
def setUp(self) -> None:
super().setUp()
min_ago = before_now(minutes=1).isoformat()
two_min_ago = before_now(minutes=2).isoformat()
three_min_ago = before_now(minutes=3).isoformat()
self.login_as(user=self.user)
self.project = self.create_project()
self.project_2 = self.create_project()
self.store_event(
data={
"event_id": "a" * 32,
"message": "oh no",
"timestamp": three_min_ago,
"fingerprint": ["group-1"],
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "b" * 32,
"message": "very bad",
"timestamp": two_min_ago,
"fingerprint": ["group-1"],
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "c" * 32,
"message": "very bad",
"timestamp": min_ago,
"fingerprint": ["group-2"],
},
project_id=self.project.id,
)
self.groups = list(Group.objects.all().order_by("id"))
def test_performance_flag(self) -> None:
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": "a" * 32,
},
)
with self.feature(
{"organizations:discover-basic": False, "organizations:performance-view": True}
):
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == "a" * 32
assert response.data["projectSlug"] == self.project.slug
def test_simple(self) -> None:
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": "a" * 32,
},
)
with self.feature("organizations:discover-basic"):
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == "a" * 32
assert response.data["projectSlug"] == self.project.slug
def test_simple_with_id(self) -> None:
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.id,
"event_id": "a" * 32,
},
)
with self.feature("organizations:discover-basic"):
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == "a" * 32
assert response.data["projectSlug"] == self.project.slug
def test_simple_transaction(self) -> None:
min_ago = before_now(minutes=1).isoformat()
event = self.store_event(
data={
"event_id": "d" * 32,
"type": "transaction",
"transaction": "api.issue.delete",
"spans": [],
"contexts": {"trace": {"op": "foobar", "trace_id": "a" * 32, "span_id": "a" * 16}},
"start_timestamp": before_now(minutes=1, seconds=5).isoformat(),
"timestamp": min_ago,
},
project_id=self.project.id,
)
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": event.event_id,
},
)
with self.feature("organizations:discover-basic"):
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["id"] == "d" * 32
assert response.data["type"] == "transaction"
def test_no_access_missing_feature(self) -> None:
with self.feature({"organizations:discover-basic": False}):
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": "a" * 32,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 404, response.content
def test_access_non_member_project(self) -> None:
# Add a new user to a project and then access events on project they are not part of.
member_user = self.create_user()
team = self.create_team(members=[member_user])
self.create_project(organization=self.organization, teams=[team])
# Enable open membership
self.organization.flags.allow_joinleave = True
self.organization.save()
self.login_as(member_user)
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": "a" * 32,
},
)
with self.feature("organizations:discover-basic"):
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
# When open membership is off, access should be denied to non owner users
self.organization.flags.allow_joinleave = False
self.organization.save()
with self.feature("organizations:discover-basic"):
response = self.client.get(url, format="json")
assert response.status_code == 404, response.content
def test_no_event(self) -> None:
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": "d" * 32,
},
)
with self.feature("organizations:discover-basic"):
response = self.client.get(url, format="json")
assert response.status_code == 404, response.content
def test_invalid_event_id(self) -> None:
with pytest.raises(NoReverseMatch):
reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": "not-an-event",
},
)
def test_long_trace_description(self) -> None:
data = load_data("transaction")
data["event_id"] = "d" * 32
data["timestamp"] = before_now(minutes=1).isoformat()
data["start_timestamp"] = (before_now(minutes=1) - timedelta(seconds=5)).isoformat()
data["contexts"]["trace"]["description"] = "b" * 512
self.store_event(data=data, project_id=self.project.id)
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": "d" * 32,
},
)
with self.feature("organizations:discover-basic"):
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
trace = response.data["contexts"]["trace"]
original_trace = data["contexts"]["trace"]
assert trace["trace_id"] == original_trace["trace_id"]
assert trace["span_id"] == original_trace["span_id"]
assert trace["parent_span_id"] == original_trace["parent_span_id"]
assert trace["description"][:-3] in original_trace["description"]
def test_blank_fields(self) -> None:
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": "a" * 32,
},
)
with self.feature("organizations:discover-basic"):
response = self.client.get(
url,
data={"field": ["", " "], "statsPeriod": "24h"},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["id"] == "a" * 32
assert response.data["projectSlug"] == self.project.slug
def test_out_of_retention(self) -> None:
self.store_event(
data={
"event_id": "d" * 32,
"message": "oh no",
"timestamp": before_now(days=2).isoformat(),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
)
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": "d" * 32,
},
)
with self.options({"system.event-retention-days": 1}):
response = self.client.get(
url,
format="json",
)
assert response.status_code == 404, response.content
def test_generic_event(self) -> None:
occurrence, _ = self.process_occurrence(
project_id=self.project.id,
event_data={
"level": "info",
},
)
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": occurrence.event_id,
},
)
with self.feature("organizations:discover-basic"):
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == occurrence.event_id
assert response.data["projectSlug"] == self.project.slug
assert response.data["occurrence"] is not None
assert response.data["occurrence"]["id"] == occurrence.id
| OrganizationEventDetailsEndpointTest |
python | sympy__sympy | sympy/tensor/array/expressions/from_array_to_indexed.py | {
"start": 581,
"end": 3936
} | class ____:
def __init__(self):
self.count_dummies = 0
def do_convert(self, expr, indices):
if isinstance(expr, ArrayTensorProduct):
cumul = list(accumulate([0] + [get_rank(arg) for arg in expr.args]))
indices_grp = [indices[cumul[i]:cumul[i+1]] for i in range(len(expr.args))]
return Mul.fromiter(self.do_convert(arg, ind) for arg, ind in zip(expr.args, indices_grp))
if isinstance(expr, ArrayContraction):
new_indices = [None for i in range(get_rank(expr.expr))]
limits = []
bottom_shape = get_shape(expr.expr)
for contraction_index_grp in expr.contraction_indices:
d = Dummy(f"d{self.count_dummies}")
self.count_dummies += 1
dim = bottom_shape[contraction_index_grp[0]]
limits.append((d, 0, dim-1))
for i in contraction_index_grp:
new_indices[i] = d
j = 0
for i in range(len(new_indices)):
if new_indices[i] is None:
new_indices[i] = indices[j]
j += 1
newexpr = self.do_convert(expr.expr, new_indices)
return Sum(newexpr, *limits)
if isinstance(expr, ArrayDiagonal):
new_indices = [None for i in range(get_rank(expr.expr))]
ind_pos = expr._push_indices_down(expr.diagonal_indices, list(range(len(indices))), get_rank(expr))
for i, index in zip(ind_pos, indices):
if isinstance(i, collections.abc.Iterable):
for j in i:
new_indices[j] = index
else:
new_indices[i] = index
newexpr = self.do_convert(expr.expr, new_indices)
return newexpr
if isinstance(expr, PermuteDims):
permuted_indices = _apply_permutation_to_list(expr.permutation, indices)
return self.do_convert(expr.expr, permuted_indices)
if isinstance(expr, ArrayAdd):
return Add.fromiter(self.do_convert(arg, indices) for arg in expr.args)
if isinstance(expr, _ArrayExpr):
return expr.__getitem__(tuple(indices))
if isinstance(expr, ArrayElementwiseApplyFunc):
return expr.function(self.do_convert(expr.expr, indices))
if isinstance(expr, Reshape):
shape_up = expr.shape
shape_down = get_shape(expr.expr)
cumul = list(accumulate([1] + list(reversed(shape_up)), operator.mul))
one_index = Add.fromiter(i*s for i, s in zip(reversed(indices), cumul))
dest_indices = [None for _ in shape_down]
c = 1
for i, e in enumerate(reversed(shape_down)):
if c == 1:
if i == len(shape_down) - 1:
dest_indices[i] = one_index
else:
dest_indices[i] = one_index % e
elif i == len(shape_down) - 1:
dest_indices[i] = one_index // c
else:
dest_indices[i] = one_index // c % e
c *= e
dest_indices.reverse()
return self.do_convert(expr.expr, dest_indices)
return _get_array_element_or_slice(expr, indices)
| _ConvertArrayToIndexed |
python | getsentry__sentry | src/sentry/identity/base.py | {
"start": 362,
"end": 2325
} | class ____(PipelineProvider["IdentityPipeline"], abc.ABC):
"""
A provider indicates how identity authenticate should happen for a given service.
"""
def __init__(self, **config):
super().__init__()
self.config = config
self.logger = logging.getLogger(f"sentry.identity.{self.key}")
def build_identity(self, state):
"""
Return a mapping containing the identity information.
- ``state`` is the resulting data captured by the pipeline
>>> {
>>> "id": "foo@example.com",
>>> "email": "foo@example.com",
>>> "name": "Foo Bar",
>>> "scopes": ['email', ...],
>>> "data": { ... },
>>> }
The ``id`` key is required.
The ``id`` may be passed in as a ``MigratingIdentityId`` should the
the id key be migrating from one value to another and have multiple
lookup values.
If the identity can not be constructed an ``IdentityNotValid`` error
should be raised.
"""
raise NotImplementedError
def update_identity(self, new_data, current_data):
"""
When re-authenticating with a provider, the identity data may need to
be mutated based on the previous state. An example of this is Google,
which will not return a `refresh_token` unless the user explicitly
goes through an approval process.
Return the new state which should be used for an identity.
"""
return new_data
def refresh_identity(self, identity: Identity | RpcIdentity, **kwargs: Any) -> None:
"""
Updates the AuthIdentity with any changes from upstream. The primary
example of a change would be signalling this identity is no longer
valid.
If the identity is no longer valid an ``IdentityNotValid`` error should
be raised.
"""
raise NotImplementedError
| Provider |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.