language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | google__pytype | pytype/tests/test_dict2.py | {
"start": 68,
"end": 4250
} | class ____(test_base.BaseTest):
"""Tests for dictionaries."""
def test_filtered_getitem(self):
ty = self.Infer("""
from typing import Union
MAP = {0: "foo"}
def foo(x: Union[int, None]):
if x is not None:
return MAP[x]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, Optional, Union
MAP = ... # type: Dict[int, str]
def foo(x: Union[int, None]) -> Optional[str]: ...
""",
)
def test_object_in_dict(self):
self.CheckWithErrors("""
from typing import Any, Dict
def objectIsStr() -> Dict[str, Any]:
return {object(): ""} # bad-return-type
""")
def test_big_concrete_dict(self):
# Test that we don't timeout.
self.CheckWithErrors("""
from typing import Dict, Tuple, Union
# A concrete dictionary with lots of concrete keys and a complicated
# value type.
d = {}
ValueType = Dict[Union[str, int], Union[str, int]]
v = ... # type: ValueType
d['a'] = v
d['b'] = v
d['c'] = v
d['d'] = v
d['e'] = v
d[('a', None)] = v
d[('b', None)] = v
d[('c', None)] = v
d[('d', None)] = v
d[('e', None)] = v
def f() -> Dict[Union[str, Tuple[str, None]], ValueType]:
return d
def g() -> Dict[int, int]:
return d # bad-return-type
""")
def test_dict_of_tuple(self):
# utils.deep_variable_product(group_dict) generates a lot of combinations.
# Test that we finish checking this code in a reasonable amount of time.
self.Check("""
from typing import Dict, Tuple
def iter_equality_constraints(op):
yield (op, 0 if __random__ else __any_object__)
def get_equality_groups(ops) -> Dict[Tuple, Tuple]:
group_dict = {}
for op in ops:
for a0 in iter_equality_constraints(op):
group_dict[a0] = a0
group_dict[__any_object__] = a0
return group_dict
""")
def test_recursion(self):
# Regression test for code that caused a RecursionError in STORE_SUBSCR.
self.Check("""
from typing import Any, Dict
def convert(d: Dict[Any, Any]):
keys = ['foo', 'bar']
for key in keys:
if key not in d:
d[key + '_suffix1'] = {}
if key + '_suffix2' in d:
d[key + '_suffix1']['suffix2'] = d[key + '_suffix2']
if key + '_suffix3' in d:
d[key + '_suffix1']['suffix3'] = d[key + '_suffix3']
""")
def test_union(self):
ty, _ = self.InferWithErrors("""
from typing import Dict
a = {'a': 1} | {'b': 2}
b = {'a': 1}
b |= {1: 'a'}
c: Dict[str, int] = {'a': 1} | {1: 'a'} # annotation-type-mismatch
d = {}
d |= (('1', '2'), ('3', '4'))
e = {}
e |= [(1, 2), (3, '4')]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, Union
a: Dict[str, int]
b: Dict[Union[str, int], Union[str, int]]
c: Dict[str, int]
d: Dict[str, str]
e: Dict[int, Union[int, str]]
""",
)
def test_reverse_views(self):
self.Check("""
x = {'a': 'b'}
print(reversed(x.keys()))
print(reversed(x.values()))
print(reversed(x.items()))
""")
def test_does_not_match_sequence(self):
self.CheckWithErrors("""
from typing import Sequence
x: Sequence[str] = {1: 'a'} # annotation-type-mismatch
y: Sequence[str] = {'a': 1} # annotation-type-mismatch
""")
def test_bad_update(self):
self.CheckWithErrors("""
d = {}
d.update(1) # wrong-arg-types
""")
def test_bad_unpack(self):
self.CheckWithErrors("""
lst = [3, 4]
def f(**kwargs):
pass
f(**lst) # wrong-arg-types
""")
def test_update_multiple_types(self):
self.Check("""
def f(**kwargs):
kwargs.update(a=0, b='1')
""")
def test_any(self):
self.Check("""
from typing import Dict
def do_something(d: Dict) -> None:
d['a'] = 0
d['b'] = d['b'].split(':')
""")
if __name__ == "__main__":
test_base.main()
| DictTest |
python | doocs__leetcode | solution/0300-0399/0334.Increasing Triplet Subsequence/Solution.py | {
"start": 0,
"end": 296
} | class ____:
def increasingTriplet(self, nums: List[int]) -> bool:
mi, mid = inf, inf
for num in nums:
if num > mid:
return True
if num <= mi:
mi = num
else:
mid = num
return False
| Solution |
python | apache__airflow | devel-common/src/sphinx_exts/operators_and_hooks_ref.py | {
"start": 18959,
"end": 19289
} | class ____(BaseJinjaReferenceDirective):
"""Generate list of deferrable operators"""
def render_content(self, *, tags: set[str] | None, header_separator: str = DEFAULT_HEADER_SEPARATOR):
return _render_deferrable_operator_content(
header_separator=header_separator,
)
| DeferrableOperatorDirective |
python | Netflix__metaflow | metaflow/task.py | {
"start": 1163,
"end": 41967
} | class ____(object):
"""
MetaflowTask prepares a Flow instance for execution of a single step.
"""
def __init__(
self,
flow,
flow_datastore,
metadata,
environment,
console_logger,
event_logger,
monitor,
ubf_context,
orig_flow_datastore=None,
spin_artifacts=None,
):
self.flow = flow
self.flow_datastore = flow_datastore
self.metadata = metadata
self.environment = environment
self.console_logger = console_logger
self.event_logger = event_logger
self.monitor = monitor
self.ubf_context = ubf_context
self.orig_flow_datastore = orig_flow_datastore
self.spin_artifacts = spin_artifacts
def _exec_step_function(self, step_function, orig_step_func, input_obj=None):
wrappers_stack = []
wrapped_func = None
# Will set to non-Falsy if we need to fake calling `self.next`
# This is used when skipping the step.
# If a dictionary, it will
# contain the arguments to pass to `self.next`. If
# True, it means we are using whatever the usual
# arguments to `self.next` are for this step.
fake_next_call_args = False
raised_exception = None
had_raised_exception = False
# If we have wrappers w1, w2 and w3, we need to execute
# - w3_pre
# - w2_pre
# - w1_pre
# - step_function
# - w1_post
# - w2_post
# - w3_post
# in that order. We do this by maintaining a stack of generators.
# Note that if any of the pre functions returns a function, we execute that
# instead of the rest of the inside part. This is useful if you want to create
# no-op function for example.
for w in reversed(orig_step_func.wrappers):
wrapped_func = w.pre_step(orig_step_func.name, self.flow, input_obj)
wrappers_stack.append(w)
if w.skip_step:
# We are not going to run anything so we will have to fake calling
# next.
fake_next_call_args = w.skip_step
break
if wrapped_func:
break # We have nothing left to do since we now execute the
# wrapped function
# Else, we continue down the list of wrappers
try:
# fake_next_call is used here to also indicate that the step was skipped
# so we do not execute anything.
if not fake_next_call_args:
if input_obj is None:
if wrapped_func:
fake_next_call_args = wrapped_func(self.flow)
else:
step_function()
else:
if wrapped_func:
fake_next_call_args = wrapped_func(self.flow, input_obj)
else:
step_function(input_obj)
except Exception as ex:
raised_exception = ex
had_raised_exception = True
# We back out of the stack of generators
for w in reversed(wrappers_stack):
try:
r = w.post_step(orig_step_func.name, self.flow, raised_exception)
except Exception as ex:
r = ex
if r is None:
raised_exception = None
elif isinstance(r, Exception):
raised_exception = r
elif isinstance(r, tuple):
if len(r) == 2:
raised_exception, fake_next_call_args = r
else:
# The last argument is an exception to be re-raised. Used in
# user_step_decorator's post_step
raise r[2]
else:
raise RuntimeError(
"Invalid return value from a UserStepDecorator. Expected an"
"exception or an exception and arguments for self.next, got: %s" % r
)
if raised_exception:
# We have an exception that we need to propagate
raise raised_exception
if fake_next_call_args or had_raised_exception:
# We want to override the next call or we caught an exception (in which
# case the regular step code didn't call self.next). In this case,
# we need to set the transition variables
# properly. We call the next function as needed
# We also do this in case we want to gobble the exception.
graph_node = self.flow._graph[orig_step_func.name]
out_funcs = [getattr(self.flow, f) for f in graph_node.out_funcs]
if out_funcs:
self.flow._transition = None
if isinstance(fake_next_call_args, dict) and fake_next_call_args:
# Not an empty dictionary -- we use this as arguments for the next
# call
self.flow.next(*out_funcs, **fake_next_call_args)
elif (
fake_next_call_args == True
or fake_next_call_args == {}
or had_raised_exception
):
# We need to extract things from the self.next. This is not possible
# in the case where there was a num_parallel.
if graph_node.parallel_foreach:
raise RuntimeError(
"Skipping a parallel foreach step without providing "
"the arguments to the self.next call is not supported. "
)
if graph_node.foreach_param:
self.flow.next(*out_funcs, foreach=graph_node.foreach_param)
else:
self.flow.next(*out_funcs)
else:
raise RuntimeError(
"Invalid value passed to self.next; expected "
" bool of a dictionary; got: %s" % fake_next_call_args
)
def _init_parameters(self, parameter_ds, passdown=True):
cls = self.flow.__class__
def _set_cls_var(_, __):
raise AttributeError(
"Flow level attributes and Parameters are not modifiable"
)
def set_as_parameter(name, value):
if callable(value):
setattr(cls, name, property(fget=value, fset=_set_cls_var))
else:
setattr(
cls,
name,
property(fget=lambda _, val=value: val, fset=_set_cls_var),
)
# overwrite Parameters in the flow object
all_vars = []
for var, param in self.flow._get_parameters():
# make the parameter a read-only property
# note x=x binds the current value of x to the closure
def property_setter(
_,
param=param,
var=var,
parameter_ds=parameter_ds,
):
v = param.load_parameter(parameter_ds[var])
# Reset the parameter to just return the value now that we have loaded it
set_as_parameter(var, v)
return v
set_as_parameter(var, property_setter)
all_vars.append(var)
param_only_vars = list(all_vars)
# make class-level values read-only to be more consistent across steps in a flow
# they are also only persisted once, so we similarly pass them down if
# required
for var in dir(cls):
if var[0] == "_" or var in cls._NON_PARAMETERS or var in all_vars:
continue
val = getattr(cls, var)
# Exclude methods, properties and other classes
if isinstance(val, (MethodType, FunctionType, property, type)):
continue
set_as_parameter(var, val)
all_vars.append(var)
# We also passdown _graph_info through the entire graph
set_as_parameter(
"_graph_info",
lambda _, parameter_ds=parameter_ds: parameter_ds["_graph_info"],
)
all_vars.append("_graph_info")
if passdown:
self.flow._datastore.passdown_partial(parameter_ds, all_vars)
return param_only_vars
def _init_data(self, run_id, join_type, input_paths):
# We prefer to use the parallelized version to initialize datastores
# (via TaskDataStoreSet) only with more than 4 datastores, because
# the baseline overhead of using the set is ~1.5s and each datastore
# init takes ~200-300ms when run sequentially.
if len(input_paths) > 4:
prefetch_data_artifacts = None
if join_type and join_type == "foreach":
# Prefetch 'foreach' related artifacts to improve time taken by
# _init_foreach.
prefetch_data_artifacts = [
"_iteration_stack",
"_foreach_stack",
"_foreach_num_splits",
"_foreach_var",
]
# Note: Specify `pathspecs` while creating the datastore set to
# guarantee strong consistency and guard against missing input.
datastore_set = TaskDataStoreSet(
self.flow_datastore,
run_id,
pathspecs=input_paths,
prefetch_data_artifacts=prefetch_data_artifacts,
join_type=join_type,
orig_flow_datastore=self.orig_flow_datastore,
spin_artifacts=self.spin_artifacts,
)
ds_list = [ds for ds in datastore_set]
if len(ds_list) != len(input_paths):
raise MetaflowDataMissing(
"Some input datastores are missing. "
"Expected: %d Actual: %d" % (len(input_paths), len(ds_list))
)
else:
# initialize directly in the single input case.
ds_list = []
for input_path in input_paths:
parts = input_path.split("/")
if len(parts) == 3:
run_id, step_name, task_id = parts
attempt = None
else:
run_id, step_name, task_id, attempt = parts
attempt = int(attempt)
ds_list.append(
self.flow_datastore.get_task_datastore(
run_id,
step_name,
task_id,
attempt=attempt,
join_type=join_type,
orig_flow_datastore=self.orig_flow_datastore,
spin_artifacts=self.spin_artifacts,
)
)
from_start("MetaflowTask: got datastore for input path %s" % input_path)
if not ds_list:
# this guards against errors in input paths
raise MetaflowDataMissing(
"Input paths *%s* resolved to zero inputs" % ",".join(input_paths)
)
return ds_list
def _init_foreach(self, step_name, join_type, inputs, split_index):
# these variables are only set by the split step in the output
# data. They don't need to be accessible in the flow.
self.flow._foreach_var = None
self.flow._foreach_num_splits = None
# There are three cases that can alter the foreach state:
# 1) start - initialize an empty foreach stack
# 2) join - pop the topmost frame from the stack
# 3) step following a split - push a new frame in the stack
# We have a non-modifying case (case 4)) where we propagate the
# foreach-stack information to all tasks in the foreach. This is
# then used later to write the foreach-stack metadata for that task
# case 1) - reset the stack
if step_name == "start":
self.flow._foreach_stack = []
# case 2) - this is a join step
elif join_type:
# assert the lineage of incoming branches
def lineage():
for i in inputs:
if join_type == "foreach":
top = i["_foreach_stack"][-1]
bottom = i["_foreach_stack"][:-1]
# the topmost indices and values in the stack are
# all different naturally, so ignore them in the
# assertion
yield bottom + [top._replace(index=0, value=0)]
else:
yield i["_foreach_stack"]
if not all_equal(lineage()):
raise MetaflowInternalError(
"Step *%s* tried to join branches "
"whose lineages don't match." % step_name
)
# assert that none of the inputs are splits - we don't
# allow empty `foreach`s (joins immediately following splits)
if any(not i.is_none("_foreach_var") for i in inputs):
raise MetaflowInternalError(
"Step *%s* tries to join a foreach "
"split with no intermediate steps." % step_name
)
inp = inputs[0]
if join_type == "foreach":
# Make sure that the join got all splits as its inputs.
# Datastore.resolve() leaves out all undone tasks, so if
# something strange happened upstream, the inputs list
# may not contain all inputs which should raise an exception
stack = inp["_foreach_stack"]
if stack[-1].num_splits and len(inputs) != stack[-1].num_splits:
raise MetaflowDataMissing(
"Foreach join *%s* expected %d "
"splits but only %d inputs were "
"found" % (step_name, stack[-1].num_splits, len(inputs))
)
# foreach-join pops the topmost frame from the stack
self.flow._foreach_stack = stack[:-1]
else:
# a non-foreach join doesn't change the stack
self.flow._foreach_stack = inp["_foreach_stack"]
# case 3) - our parent was a split. Initialize a new foreach frame.
elif not inputs[0].is_none("_foreach_var"):
if len(inputs) != 1:
raise MetaflowInternalError(
"Step *%s* got multiple inputs "
"although it follows a split step." % step_name
)
if self.ubf_context != UBF_CONTROL and split_index is None:
raise MetaflowInternalError(
"Step *%s* follows a split step "
"but no split_index is "
"specified." % step_name
)
split_value = (
inputs[0]["_foreach_values"][split_index]
if not inputs[0].is_none("_foreach_values")
else None
)
# push a new index after a split to the stack
frame = ForeachFrame(
step_name,
inputs[0]["_foreach_var"],
inputs[0]["_foreach_num_splits"],
split_index,
split_value,
)
stack = inputs[0]["_foreach_stack"]
stack.append(frame)
self.flow._foreach_stack = stack
# case 4) - propagate in the foreach nest
elif "_foreach_stack" in inputs[0]:
self.flow._foreach_stack = inputs[0]["_foreach_stack"]
def _init_iteration(self, step_name, inputs, is_recursive_step):
# We track the iteration "stack" for loops. At this time, we
# only support one type of "looping" which is a recursive step but
# this can generalize to arbitrary well-scoped loops in the future.
# _iteration_stack will contain the iteration count for each loop
# level. Currently, there will be only no elements (no loops) or
# a single element (a single recursive step).
# We just need to determine the rules to add a new looping level,
# increment the looping level or pop the looping level. In our
# current support for only recursive steps, this is pretty straightforward:
# 1) if is_recursive_step:
# - we are entering a loop -- we are either entering for the first time
# or we are continuing the loop. Note that a recursive step CANNOT
# be a join step so there is always a single input
# 1a) If inputs[0]["_iteration_stack"] contains an element, we are looping
# so we increment the count
# 1b) If inputs[0]["_iteration_stack"] is empty, this is the first time we
# are entering the loop so we set the iteration count to 0
# 2) if it is not a recursive step, we need to determine if this is the step
# *after* the recursive step. The easiest way to determine that is to
# look at all inputs (there can be multiple in case of a join) and pop
# _iteration_stack if it is set. However, since we know that non recursive
# steps are *never* part of an iteration, we can simplify and just set it
# to [] without even checking anything. We will have to revisit this if/when
# more complex loop structures are supported.
# Note that just like _foreach_stack, we need to set _iteration_stack to *something*
# so that it doesn't get clobbered weirdly by merge_artifacts.
if is_recursive_step:
# Case 1)
if len(inputs) != 1:
raise MetaflowInternalError(
"Step *%s* is a recursive step but got multiple inputs." % step_name
)
inp = inputs[0]
if "_iteration_stack" not in inp or not inp["_iteration_stack"]:
# Case 1b)
self.flow._iteration_stack = [0]
else:
# Case 1a)
stack = inp["_iteration_stack"]
stack[-1] += 1
self.flow._iteration_stack = stack
else:
# Case 2)
self.flow._iteration_stack = []
def _clone_flow(self, datastore):
x = self.flow.__class__(use_cli=False)
x._set_datastore(datastore)
return x
def clone_only(
self,
step_name,
run_id,
task_id,
clone_origin_task,
retry_count,
):
if not clone_origin_task:
raise MetaflowInternalError(
"task.clone_only needs a valid clone_origin_task value."
)
origin_run_id, _, origin_task_id = clone_origin_task.split("/")
# Update system logger and monitor context
# We also pass this context as part of the task payload to support implementations that
# can't access the context directly
task_payload = {
"run_id": run_id,
"step_name": step_name,
"task_id": task_id,
"retry_count": retry_count,
"project_name": current.get("project_name"),
"branch_name": current.get("branch_name"),
"is_user_branch": current.get("is_user_branch"),
"is_production": current.get("is_production"),
"project_flow_name": current.get("project_flow_name"),
"origin_run_id": origin_run_id,
"origin_task_id": origin_task_id,
}
msg = "Cloning task from {}/{}/{}/{} to {}/{}/{}/{}".format(
self.flow.name,
origin_run_id,
step_name,
origin_task_id,
self.flow.name,
run_id,
step_name,
task_id,
)
with _system_monitor.count("metaflow.task.clone"):
_system_logger.log_event(
level="info",
module="metaflow.task",
name="clone",
payload={**task_payload, "msg": msg},
)
# If we actually have to do the clone ourselves, proceed...
clone_task_helper(
self.flow.name,
origin_run_id,
run_id,
step_name,
origin_task_id,
task_id,
self.flow_datastore,
self.metadata,
attempt_id=retry_count,
)
def _finalize_control_task(self):
# Update `_transition` which is expected by the NativeRuntime.
step_name = self.flow._current_step
next_steps = self.flow._graph[step_name].out_funcs
self.flow._transition = (next_steps, None)
if self.flow._task_ok:
# Throw an error if `_control_mapper_tasks` isn't populated.
mapper_tasks = self.flow._control_mapper_tasks
if not mapper_tasks:
msg = (
"Step *{step}* has a control task which didn't "
"specify the artifact *_control_mapper_tasks* for "
"the subsequent *{join}* step."
)
raise MetaflowInternalError(
msg.format(step=step_name, join=next_steps[0])
)
elif not (
isinstance(mapper_tasks, list)
and isinstance(mapper_tasks[0], unicode_type)
):
msg = (
"Step *{step}* has a control task which didn't "
"specify the artifact *_control_mapper_tasks* as a "
"list of strings but instead specified it as {typ} "
"with elements of {elem_typ}."
)
raise MetaflowInternalError(
msg.format(
step=step_name,
typ=type(mapper_tasks),
elem_typ=type(mapper_tasks[0]),
)
)
def run_step(
self,
step_name,
run_id,
task_id,
origin_run_id,
input_paths,
split_index,
retry_count,
max_user_code_retries,
whitelist_decorators=None,
persist=True,
):
if run_id and task_id:
self.metadata.register_run_id(run_id)
self.metadata.register_task_id(run_id, step_name, task_id, retry_count)
else:
raise MetaflowInternalError(
"task.run_step needs a valid run_id and task_id"
)
if retry_count >= MAX_ATTEMPTS:
# any results with an attempt ID >= MAX_ATTEMPTS will be ignored
# by datastore, so running a task with such a retry_could would
# be pointless and dangerous
raise MetaflowInternalError(
"Too many task attempts (%d)! MAX_ATTEMPTS exceeded." % retry_count
)
metadata_tags = ["attempt_id:{0}".format(retry_count)]
metadata = [
MetaDatum(
field="attempt",
value=str(retry_count),
type="attempt",
tags=metadata_tags,
),
MetaDatum(
field="origin-run-id",
value=str(origin_run_id),
type="origin-run-id",
tags=metadata_tags,
),
MetaDatum(
field="ds-type",
value=self.flow_datastore.TYPE,
type="ds-type",
tags=metadata_tags,
),
MetaDatum(
field="ds-root",
value=self.flow_datastore.datastore_root,
type="ds-root",
tags=metadata_tags,
),
]
trace_id = get_trace_id()
if trace_id:
metadata.append(
MetaDatum(
field="otel-trace-id",
value=trace_id,
type="trace-id",
tags=metadata_tags,
)
)
step_func = getattr(self.flow, step_name)
decorators = step_func.decorators
if self.orig_flow_datastore:
# We filter only the whitelisted decorators in case of spin step.
decorators = (
[]
if not whitelist_decorators
else [deco for deco in decorators if deco.name in whitelist_decorators]
)
from_start("MetaflowTask: decorators initialized")
node = self.flow._graph[step_name]
join_type = None
if node.type == "join":
join_type = self.flow._graph[node.split_parents[-1]].type
# 1. initialize output datastore
output = self.flow_datastore.get_task_datastore(
run_id, step_name, task_id, attempt=retry_count, mode="w", persist=persist
)
output.init_task()
from_start("MetaflowTask: output datastore initialized")
if input_paths:
# 2. initialize input datastores
inputs = self._init_data(run_id, join_type, input_paths)
from_start("MetaflowTask: input datastores initialized")
# 3. initialize foreach state
self._init_foreach(step_name, join_type, inputs, split_index)
from_start("MetaflowTask: foreach state initialized")
# 4. initialize the iteration state
is_recursive_step = (
node.type == "split-switch" and step_name in node.out_funcs
)
self._init_iteration(step_name, inputs, is_recursive_step)
# Add foreach stack to metadata of the task
foreach_stack = (
self.flow._foreach_stack
if hasattr(self.flow, "_foreach_stack") and self.flow._foreach_stack
else []
)
foreach_stack_formatted = []
current_foreach_path_length = 0
for frame in foreach_stack:
if not (frame.var and frame.value):
break
foreach_step = "%s=%s" % (frame.var, frame.value)
if (
current_foreach_path_length + len(foreach_step)
> MAX_FOREACH_PATH_LENGTH
):
break
current_foreach_path_length += len(foreach_step)
foreach_stack_formatted.append(foreach_step)
if foreach_stack_formatted:
metadata.append(
MetaDatum(
field="foreach-stack",
value=foreach_stack_formatted,
type="foreach-stack",
tags=metadata_tags,
)
)
# Add runtime dag information to the metadata of the task
foreach_execution_path = ",".join(
[
"{}:{}".format(foreach_frame.step, foreach_frame.index)
for foreach_frame in foreach_stack
]
)
if foreach_execution_path:
metadata.extend(
[
MetaDatum(
field="foreach-execution-path",
value=foreach_execution_path,
type="foreach-execution-path",
tags=metadata_tags,
),
]
)
from_start("MetaflowTask: finished input processing")
self.metadata.register_metadata(
run_id,
step_name,
task_id,
metadata,
)
# 4. initialize the current singleton
current._set_env(
flow=self.flow,
run_id=run_id,
step_name=step_name,
task_id=task_id,
retry_count=retry_count,
origin_run_id=origin_run_id,
namespace=resolve_identity(),
username=get_username(),
metadata_str=self.metadata.metadata_str(),
is_running=True,
tags=self.metadata.sticky_tags,
)
# 5. run task
output.save_metadata(
{
"task_begin": {
"code_package_metadata": os.environ.get(
"METAFLOW_CODE_METADATA", ""
),
"code_package_sha": os.environ.get("METAFLOW_CODE_SHA"),
"code_package_ds": os.environ.get("METAFLOW_CODE_DS"),
"code_package_url": os.environ.get("METAFLOW_CODE_URL"),
"retry_count": retry_count,
}
}
)
# 6. Update system logger and monitor context
# We also pass this context as part of the task payload to support implementations that
# can't access the context directly
task_payload = {
"run_id": run_id,
"step_name": step_name,
"task_id": task_id,
"retry_count": retry_count,
"project_name": current.get("project_name"),
"branch_name": current.get("branch_name"),
"is_user_branch": current.get("is_user_branch"),
"is_production": current.get("is_production"),
"project_flow_name": current.get("project_flow_name"),
"trace_id": trace_id or None,
}
from_start("MetaflowTask: task metadata initialized")
start = time.time()
self.metadata.start_task_heartbeat(self.flow.name, run_id, step_name, task_id)
from_start("MetaflowTask: heartbeat started")
with self.monitor.measure("metaflow.task.duration"):
try:
with self.monitor.count("metaflow.task.start"):
_system_logger.log_event(
level="info",
module="metaflow.task",
name="start",
payload={**task_payload, "msg": "Task started"},
)
self.flow._current_step = step_name
self.flow._success = False
self.flow._task_ok = None
self.flow._exception = None
# Note: All internal flow attributes (ie: non-user artifacts)
# should either be set prior to running the user code or listed in
# FlowSpec._EPHEMERAL to allow for proper merging/importing of
# user artifacts in the user's step code.
if join_type:
# Join step:
# Ensure that we have the right number of inputs.
if join_type != "foreach":
# Find the corresponding split node from the graph.
split_node = self.flow._graph[node.split_parents[-1]]
# The number of expected inputs is the number of branches
# from that split -- we can't use in_funcs because there may
# be more due to split-switch branches that all converge here.
expected_inputs = len(split_node.out_funcs)
if len(inputs) != expected_inputs:
raise MetaflowDataMissing(
"Join *%s* expected %d inputs but only %d inputs "
"were found" % (step_name, expected_inputs, len(inputs))
)
# Multiple input contexts are passed in as an argument
# to the step function.
input_obj = Inputs(self._clone_flow(inp) for inp in inputs)
self.flow._set_datastore(output)
# initialize parameters (if they exist)
# We take Parameter values from the first input,
# which is always safe since parameters are read-only
current._update_env(
{
"parameter_names": self._init_parameters(
inputs[0], passdown=True
),
"graph_info": self.flow._graph_info,
}
)
else:
# Linear step:
# We are running with a single input context.
# The context is embedded in the flow.
if len(inputs) > 1:
# This should be captured by static checking but
# let's assert this again
raise MetaflowInternalError(
"Step *%s* is not a join "
"step but it gets multiple "
"inputs." % step_name
)
self.flow._set_datastore(inputs[0])
if input_paths:
# initialize parameters (if they exist)
# We take Parameter values from the first input,
# which is always safe since parameters are read-only
current._update_env(
{
"parameter_names": self._init_parameters(
inputs[0], passdown=False
),
"graph_info": self.flow._graph_info,
}
)
from_start("MetaflowTask: before pre-step decorators")
for deco in decorators:
if deco.name == "card" and self.orig_flow_datastore:
# if spin step and card decorator, pass spin metadata
metadata = [m for m in METADATA_PROVIDERS if m.TYPE == "spin"][
0
](self.environment, self.flow, self.event_logger, self.monitor)
else:
metadata = self.metadata
deco.task_pre_step(
step_name,
output,
metadata,
run_id,
task_id,
self.flow,
self.flow._graph,
retry_count,
max_user_code_retries,
self.ubf_context,
inputs,
)
orig_step_func = step_func
for deco in decorators:
# decorators can actually decorate the step function,
# or they can replace it altogether. This functionality
# is used e.g. by catch_decorator which switches to a
# fallback code if the user code has failed too many
# times.
step_func = deco.task_decorate(
step_func,
self.flow,
self.flow._graph,
retry_count,
max_user_code_retries,
self.ubf_context,
)
from_start("MetaflowTask: finished decorator processing")
if join_type:
self._exec_step_function(step_func, orig_step_func, input_obj)
else:
self._exec_step_function(step_func, orig_step_func)
from_start("MetaflowTask: step function executed")
for deco in decorators:
deco.task_post_step(
step_name,
self.flow,
self.flow._graph,
retry_count,
max_user_code_retries,
)
self.flow._task_ok = True
self.flow._success = True
except Exception as ex:
with self.monitor.count("metaflow.task.exception"):
_system_logger.log_event(
level="error",
module="metaflow.task",
name="exception",
payload={**task_payload, "msg": traceback.format_exc()},
)
exception_handled = False
for deco in decorators:
res = deco.task_exception(
ex,
step_name,
self.flow,
self.flow._graph,
retry_count,
max_user_code_retries,
)
exception_handled = bool(res) or exception_handled
if exception_handled:
self.flow._task_ok = True
else:
self.flow._task_ok = False
self.flow._exception = MetaflowExceptionWrapper(ex)
print("%s failed:" % self.flow, file=sys.stderr)
raise
finally:
from_start("MetaflowTask: decorators finalized")
if self.ubf_context == UBF_CONTROL:
self._finalize_control_task()
# Emit metrics to logger/monitor sidecar implementations
with self.monitor.count("metaflow.task.end"):
_system_logger.log_event(
level="info",
module="metaflow.task",
name="end",
payload={**task_payload, "msg": "Task ended"},
)
try:
# persisting might fail due to unpicklable artifacts.
output.persist(self.flow)
except Exception as ex:
self.flow._task_ok = False
raise ex
finally:
# The attempt_ok metadata is used to determine task status so it is important
# we ensure that it is written even in case of preceding failures.
# f.ex. failing to serialize artifacts leads to a non-zero exit code for the process,
# even if user code finishes successfully. Flow execution will not continue due to the exit,
# so arguably we should mark the task as failed.
attempt_ok = str(bool(self.flow._task_ok))
self.metadata.register_metadata(
run_id,
step_name,
task_id,
[
MetaDatum(
field="attempt_ok",
value=attempt_ok,
type="internal_attempt_status",
tags=["attempt_id:{0}".format(retry_count)],
),
],
)
output.save_metadata({"task_end": {}})
from_start("MetaflowTask: output persisted")
# this writes a success marker indicating that the
# "transaction" is done
output.done()
# final decorator hook: The task results are now
# queryable through the client API / datastore
for deco in decorators:
deco.task_finished(
step_name,
self.flow,
self.flow._graph,
self.flow._task_ok,
retry_count,
max_user_code_retries,
)
# terminate side cars
self.metadata.stop_heartbeat()
# Task duration consists of the time taken to run the task as well as the time taken to
# persist the task metadata and data to the datastore.
duration = time.time() - start
_system_logger.log_event(
level="info",
module="metaflow.task",
name="duration",
payload={**task_payload, "msg": str(duration)},
)
from_start("MetaflowTask: task run completed")
| MetaflowTask |
python | django__django | django/db/models/functions/text.py | {
"start": 6623,
"end": 6698
} | class ____(Transform):
function = "LTRIM"
lookup_name = "ltrim"
| LTrim |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_import_error.py | {
"start": 9223,
"end": 17003
} | class ____:
@pytest.mark.parametrize(
("query_params", "expected_status_code", "expected_total_entries", "expected_filenames"),
[
(
{},
200,
3,
[FILENAME1, FILENAME2, FILENAME3],
),
# offset, limit
(
{"limit": 1, "offset": 1},
200,
3,
[FILENAME2],
),
(
{"limit": 1, "offset": 2},
200,
3,
[FILENAME3],
),
# order_by
(
{"order_by": "-filename"},
200,
3,
[FILENAME2, FILENAME1, FILENAME3],
),
(
{"order_by": "timestamp"},
200,
3,
[FILENAME1, FILENAME3, FILENAME2],
),
(
{"order_by": "import_error_id"},
200,
3,
[FILENAME1, FILENAME2, FILENAME3],
),
(
{"order_by": "-import_error_id"},
200,
3,
[FILENAME3, FILENAME2, FILENAME1],
),
# invalid order_by
(
{"order_by": "invalid_order_by"},
400,
0,
[],
),
# combination of query parameters
(
{"limit": 2, "offset": 1, "order_by": "-filename"},
200,
3,
[FILENAME1, FILENAME3],
),
(
{"limit": 1, "offset": 2, "order_by": "-filename"},
200,
3,
[FILENAME3],
),
(
{"limit": 5, "offset": 1, "order_by": "timestamp"},
200,
3,
[FILENAME3, FILENAME2],
),
],
)
def test_get_import_errors(
self,
test_client,
query_params,
expected_status_code,
expected_total_entries,
expected_filenames,
):
with assert_queries_count(2):
response = test_client.get("/importErrors", params=query_params)
assert response.status_code == expected_status_code
if expected_status_code != 200:
return
response_json = response.json()
assert response_json["total_entries"] == expected_total_entries
assert [
import_error["filename"] for import_error in response_json["import_errors"]
] == expected_filenames
def test_should_raises_401_unauthenticated(self, unauthenticated_test_client):
response = unauthenticated_test_client.get("/importErrors")
assert response.status_code == 401
def test_should_raises_403_unauthorized(self, unauthorized_test_client):
response = unauthorized_test_client.get("/importErrors")
assert response.status_code == 403
@pytest.mark.parametrize(
("team", "batch_is_authorized_dag_return_value", "expected_stack_trace"),
[
pytest.param(
"test_team",
True,
STACKTRACE1,
id="user_has_read_access_to_all_dags_in_current_file_with_team",
),
pytest.param(
None, True, STACKTRACE1, id="user_has_read_access_to_all_dags_in_current_file_without_team"
),
pytest.param(
None,
False,
"REDACTED - you do not have read permission on all DAGs in the file",
id="user_does_not_have_read_access_to_all_dags_in_current_file",
),
],
)
@pytest.mark.usefixtures("permitted_dag_model")
@mock.patch.object(DagModel, "get_dag_id_to_team_name_mapping")
@mock.patch("airflow.api_fastapi.core_api.routes.public.import_error.get_auth_manager")
def test_user_can_not_read_all_dags_in_file(
self,
mock_get_auth_manager,
mock_get_dag_id_to_team_name_mapping,
test_client,
team,
batch_is_authorized_dag_return_value,
expected_stack_trace,
permitted_dag_model,
import_errors,
):
mock_get_dag_id_to_team_name_mapping.return_value = {permitted_dag_model.dag_id: team}
set_mock_auth_manager__is_authorized_dag(mock_get_auth_manager)
mock_get_authorized_dag_ids = set_mock_auth_manager__get_authorized_dag_ids(
mock_get_auth_manager, {permitted_dag_model.dag_id}
)
mock_batch_is_authorized_dag = set_mock_auth_manager__batch_is_authorized_dag(
mock_get_auth_manager, batch_is_authorized_dag_return_value
)
# Act
with assert_queries_count(3):
response = test_client.get("/importErrors")
# Assert
mock_get_authorized_dag_ids.assert_called_once_with(method="GET", user=mock.ANY)
assert response.status_code == 200
response_json = response.json()
assert response_json == {
"total_entries": 1,
"import_errors": [
{
"import_error_id": import_errors[0].id,
"timestamp": from_datetime_to_zulu_without_ms(TIMESTAMP1),
"filename": FILENAME1,
"stack_trace": expected_stack_trace,
"bundle_name": BUNDLE_NAME,
}
],
}
mock_batch_is_authorized_dag.assert_called_once_with(
[
{
"method": "GET",
"details": DagDetails(id=permitted_dag_model.dag_id, team_name=team),
}
],
user=mock.ANY,
)
@pytest.mark.usefixtures("permitted_dag_model")
@mock.patch("airflow.api_fastapi.core_api.routes.public.import_error.get_auth_manager")
def test_bundle_name_join_condition_for_import_errors(
self, mock_get_auth_manager, test_client, permitted_dag_model, import_errors, session
):
"""Test that the bundle_name join condition works correctly."""
set_mock_auth_manager__is_authorized_dag(mock_get_auth_manager)
mock_get_authorized_dag_ids = set_mock_auth_manager__get_authorized_dag_ids(
mock_get_auth_manager, {permitted_dag_model.dag_id}
)
set_mock_auth_manager__batch_is_authorized_dag(mock_get_auth_manager, True)
response = test_client.get("/importErrors")
# Assert
mock_get_authorized_dag_ids.assert_called_once_with(method="GET", user=mock.ANY)
assert response.status_code == 200
response_json = response.json()
# Should return the import error with matching bundle_name and filename
assert response_json["total_entries"] == 1
assert response_json["import_errors"][0]["bundle_name"] == BUNDLE_NAME
assert response_json["import_errors"][0]["filename"] == FILENAME1
# Now test that removing the bundle_name from the DagModel causes the import error to not be returned
permitted_dag_model.bundle_name = "another_bundle_name"
session.add(DagBundleModel(name="another_bundle_name"))
session.flush()
session.merge(permitted_dag_model)
session.commit()
response2 = test_client.get("/importErrors")
# Assert - should return 0 entries because bundle_name no longer matches
assert response2.status_code == 200
response_json2 = response2.json()
assert response_json2["total_entries"] == 0
assert response_json2["import_errors"] == []
| TestGetImportErrors |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_kinesis_analytics.py | {
"start": 6908,
"end": 13719
} | class ____:
APPLICATION_ARN = "arn:aws:kinesisanalytics:us-east-1:123456789012:application/demo"
ROLE_ARN = "arn:aws:iam::123456789012:role/KinesisExecutionRole"
RUN_CONFIGURATION = {"FlinkRunConfiguration": {"AllowNonRestoredState": True}}
@pytest.fixture
def mock_conn(self) -> Generator[BaseAwsConnection, None, None]:
with mock.patch.object(KinesisAnalyticsV2Hook, "conn") as _conn:
_conn.start_application.return_value = {}
_conn.describe_application.return_value = {
"ApplicationDetail": {"ApplicationARN": self.APPLICATION_ARN}
}
yield _conn
@pytest.fixture
def kinesis_analytics_v2_hook(self) -> Generator[KinesisAnalyticsV2Hook, None, None]:
with mock_aws():
hook = KinesisAnalyticsV2Hook(aws_conn_id="aws_default")
yield hook
def setup_method(self):
self.operator = KinesisAnalyticsV2StartApplicationOperator(
task_id="start_application_operator",
application_name="demo",
run_configuration=self.RUN_CONFIGURATION,
aws_conn_id="fake-conn-id",
region_name="eu-west-2",
verify=True,
botocore_config={"read_timeout": 42},
)
self.operator.defer = mock.MagicMock()
def test_init(self):
op = KinesisAnalyticsV2StartApplicationOperator(
task_id="start_application_operator",
application_name="demo",
run_configuration=self.RUN_CONFIGURATION,
aws_conn_id="fake-conn-id",
region_name="eu-west-2",
verify=True,
botocore_config={"read_timeout": 42},
)
assert op.application_name == "demo"
assert op.run_configuration == self.RUN_CONFIGURATION
assert op.hook.client_type == "kinesisanalyticsv2"
assert op.hook.resource_type is None
assert op.hook.aws_conn_id == "fake-conn-id"
assert op.hook._region_name == "eu-west-2"
assert op.hook._verify is True
assert op.hook._config is not None
assert op.hook._config.read_timeout == 42
op = KinesisAnalyticsV2StartApplicationOperator(
task_id="start_application_operator",
application_name="demo",
run_configuration=self.RUN_CONFIGURATION,
)
assert op.hook.aws_conn_id == "aws_default"
assert op.hook._region_name is None
assert op.hook._verify is None
assert op.hook._config is None
@mock.patch.object(KinesisAnalyticsV2Hook, "conn")
def test_start_application(self, kinesis_analytics_mock_conn):
kinesis_analytics_mock_conn.describe_application.return_value = {
"ApplicationDetail": {"ApplicationARN": self.APPLICATION_ARN}
}
kinesis_analytics_mock_conn.start_application.return_value = {}
self.op = KinesisAnalyticsV2StartApplicationOperator(
task_id="start_application_operator",
application_name="demo",
run_configuration=self.RUN_CONFIGURATION,
)
self.op.wait_for_completion = False
response = self.op.execute({})
assert response == {"ApplicationARN": self.APPLICATION_ARN}
kinesis_analytics_mock_conn.start_application.assert_called_once_with(
ApplicationName="demo", RunConfiguration=self.RUN_CONFIGURATION
)
@pytest.mark.parametrize(
("wait_for_completion", "deferrable"),
[
pytest.param(False, False, id="no_wait"),
pytest.param(True, False, id="wait"),
pytest.param(False, True, id="defer"),
],
)
@mock.patch.object(KinesisAnalyticsV2Hook, "get_waiter")
def test_start_application_wait_combinations(
self, _, wait_for_completion, deferrable, mock_conn, kinesis_analytics_v2_hook
):
self.operator.wait_for_completion = wait_for_completion
self.operator.deferrable = deferrable
response = self.operator.execute({})
assert response == {"ApplicationARN": self.APPLICATION_ARN}
assert kinesis_analytics_v2_hook.get_waiter.call_count == wait_for_completion
assert self.operator.defer.call_count == deferrable
@mock.patch.object(KinesisAnalyticsV2Hook, "conn")
def test_start_application_throw_error_when_invalid_config_provided(self, kinesis_analytics_mock_conn):
operator = KinesisAnalyticsV2StartApplicationOperator(
task_id="start_application_operator",
application_name="demo",
run_configuration={
"ApplicationRestoreConfiguration": {
"ApplicationRestoreType": "SKIP_RESTORE",
}
},
aws_conn_id="fake-conn-id",
region_name="eu-west-2",
verify=True,
botocore_config={"read_timeout": 42},
)
operator.defer = mock.MagicMock()
error_message = "Invalid config provided"
err_response = {
"Error": {"Code": "InvalidApplicationConfigurationException", "Message": error_message}
}
exception = client("kinesisanalyticsv2").exceptions.ClientError(
err_response, operation_name="StartApplication"
)
returned_exception = type(exception)
kinesis_analytics_mock_conn.exceptions.InvalidArgumentException = returned_exception
kinesis_analytics_mock_conn.start_application.side_effect = exception
with pytest.raises(AirflowException, match=error_message):
operator.execute({})
@mock.patch.object(KinesisAnalyticsV2Hook, "conn")
def test_execute_complete(self, kinesis_analytics_mock_conn):
kinesis_analytics_mock_conn.describe_application.return_value = {
"ApplicationDetail": {"ApplicationARN": self.APPLICATION_ARN}
}
event = {"status": "success", "application_name": "demo"}
response = self.operator.execute_complete(context=None, event=event)
assert response == {"ApplicationARN": self.APPLICATION_ARN}
@mock.patch.object(KinesisAnalyticsV2Hook, "conn")
def test_execute_complete_failure(self, kinesis_analytics_mock_conn):
kinesis_analytics_mock_conn.describe_application.return_value = {
"ApplicationDetail": {"ApplicationARN": self.APPLICATION_ARN}
}
event = {"status": "error", "application_name": "demo"}
with pytest.raises(
AirflowException,
match="Error while starting AWS Managed Service for Apache Flink application",
):
self.operator.execute_complete(context=None, event=event)
def test_template_fields(self):
validate_template_fields(self.operator)
| TestKinesisAnalyticsV2StartApplicationOperator |
python | doocs__leetcode | solution/0900-0999/0980.Unique Paths III/Solution.py | {
"start": 0,
"end": 781
} | class ____:
def uniquePathsIII(self, grid: List[List[int]]) -> int:
def dfs(i: int, j: int, k: int) -> int:
if grid[i][j] == 2:
return int(k == cnt + 1)
ans = 0
for a, b in pairwise(dirs):
x, y = i + a, j + b
if 0 <= x < m and 0 <= y < n and (x, y) not in vis and grid[x][y] != -1:
vis.add((x, y))
ans += dfs(x, y, k + 1)
vis.remove((x, y))
return ans
m, n = len(grid), len(grid[0])
start = next((i, j) for i in range(m) for j in range(n) if grid[i][j] == 1)
dirs = (-1, 0, 1, 0, -1)
cnt = sum(row.count(0) for row in grid)
vis = {start}
return dfs(*start, 0)
| Solution |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 68556,
"end": 70937
} | class ____(Elemwise):
"""Column Selection"""
_parameters = ["frame", "columns"]
operation = operator.getitem
@functools.cached_property
def unique_partition_mapping_columns_from_shuffle(self):
col_op = self.operand("columns")
columns = set(col_op) if isinstance(col_op, list) else {col_op}
return {
c
for c in self.frame.unique_partition_mapping_columns_from_shuffle
if c in columns or isinstance(c, tuple) and set(c).issubset(columns)
}
@property
def columns(self):
cols = self.operand("columns")
if isinstance(cols, list):
return cols
elif isinstance(cols, pd.Index):
return list(cols)
else:
return [cols]
@functools.cached_property
def _meta(self):
if is_dataframe_like(self.frame._meta):
return super()._meta
# if we are not a DataFrame and have a scalar, we reduce to a scalar
if not isinstance(self.operand("columns"), (list, slice)) and not hasattr(
self.operand("columns"), "dtype"
):
return meta_nonempty(self.frame._meta).iloc[0]
# Avoid column selection for Series/Index
return self.frame._meta
def _node_label_args(self):
return [self.frame, self.operand("columns")]
def __str__(self):
base = str(self.frame)
if " " in base:
base = f"({base})"
return f"{base}[{self.operand('columns')!r}]"
def _divisions(self):
if self.ndim == 0:
return (None, None)
return super()._divisions()
def _simplify_down(self):
if (
str(self.frame.columns) == str(self.columns)
and self._meta.ndim == self.frame._meta.ndim
):
# TODO: we should get more precise around Expr.columns types
return self.frame
if isinstance(self.frame, Projection):
# df[a][b]
a = self.frame.operand("columns")
b = self.operand("columns")
if not isinstance(a, list):
# df[scalar][b] -> First selection coerces to Series
return
elif isinstance(b, list):
assert all(bb in a for bb in b)
else:
assert b in a
return self.frame.frame[b]
| Projection |
python | openai__openai-python | src/openai/types/embedding.py | {
"start": 209,
"end": 637
} | class ____(BaseModel):
embedding: List[float]
"""The embedding vector, which is a list of floats.
The length of vector depends on the model as listed in the
[embedding guide](https://platform.openai.com/docs/guides/embeddings).
"""
index: int
"""The index of the embedding in the list of embeddings."""
object: Literal["embedding"]
"""The object type, which is always "embedding"."""
| Embedding |
python | xlwings__xlwings | tests/test_shape.py | {
"start": 3372,
"end": 8240
} | class ____(TestBase):
def test_two_books(self):
filename = os.path.join(this_dir, "sample_picture.png")
pic1 = self.wb1.sheets[0].pictures.add(filename, name="pic1")
pic2 = self.wb2.sheets[0].pictures.add(filename, name="pic1")
self.assertEqual(pic1.name, "pic1")
self.assertEqual(pic2.name, "pic1")
def test_name(self):
filename = os.path.join(this_dir, "sample_picture.png")
pic = self.wb1.sheets[0].pictures.add(filename, name="pic1")
self.assertEqual(pic.name, "pic1")
pic.name = "pic_new"
self.assertEqual(pic.name, "pic_new")
def test_left(self):
filename = os.path.join(this_dir, "sample_picture.png")
pic = self.wb1.sheets[0].pictures.add(filename, name="pic1")
self.assertEqual(pic.left, 0)
pic.left = 20
self.assertEqual(pic.left, 20)
def test_top(self):
filename = os.path.join(this_dir, "sample_picture.png")
pic = self.wb1.sheets[0].pictures.add(filename, name="pic1")
self.assertEqual(pic.left, 0)
pic.top = 20
self.assertEqual(pic.top, 20)
def test_width(self):
filename = os.path.join(this_dir, "sample_picture.png")
pic = self.wb1.sheets[0].pictures.add(filename, name="pic1")
self.assertEqual(int(pic.width), 30)
pic.width = 50
self.assertEqual(pic.width, 50)
def test_picture_object(self):
filename = os.path.join(this_dir, "sample_picture.png")
pic = self.wb1.sheets[0].pictures.add(filename, name="pic1")
self.assertEqual(pic.name, self.wb1.sheets[0].pictures["pic1"].name)
def test_height(self):
filename = os.path.join(this_dir, "sample_picture.png")
pic = self.wb1.sheets[0].pictures.add(filename, name="pic1")
self.assertEqual(int(pic.height), 30)
pic.height = 50
self.assertEqual(int(pic.height), 50)
def test_delete(self):
filename = os.path.join(this_dir, "sample_picture.png")
pic = self.wb1.sheets[0].pictures.add(filename, name="pic1")
self.assertTrue("pic1" in self.wb1.sheets[0].pictures)
pic.delete()
self.assertFalse("pic1" in self.wb1.sheets[0].pictures)
def test_duplicate(self):
with self.assertRaises(xw.ShapeAlreadyExists):
filename = os.path.join(this_dir, "sample_picture.png")
self.wb1.sheets[0].pictures.add(filename, name="pic1")
self.wb1.sheets[0].pictures.add(filename, name="pic1")
def test_picture_update(self):
filename = os.path.join(this_dir, "sample_picture.png")
pic1 = self.wb1.sheets[0].pictures.add(filename, name="pic1")
pic1.update(filename)
@unittest.skipIf(pathlib is None, "pathlib unavailable")
def test_picture_update_pathlib(self):
filename = pathlib.Path(this_dir) / "sample_picture.png"
pic1 = self.wb1.sheets[0].pictures.add(filename, name="pic1")
pic1.update(filename)
def test_picture_auto_update(self):
filename = os.path.join(this_dir, "sample_picture.png")
self.wb1.sheets[0].pictures.add(filename, name="pic1", update=True)
self.wb1.sheets[0].pictures.add(filename, name="pic1", update=True)
self.assertEqual(len(self.wb1.sheets[0].pictures), 1)
def test_picture_auto_update_without_name(self):
with self.assertRaises(ValueError):
filename = os.path.join(this_dir, "sample_picture.png")
self.wb1.sheets[0].pictures.add(filename, update=True)
def test_picture_index(self):
filename = os.path.join(this_dir, "sample_picture.png")
self.wb1.sheets[0].pictures.add(filename, name="pic1")
self.assertEqual(
self.wb1.sheets[0].pictures[0], self.wb1.sheets[0].pictures["pic1"]
)
self.assertEqual(self.wb1.sheets[0].pictures(1), self.wb1.sheets[0].pictures[0])
def test_len(self):
filename = os.path.join(this_dir, "sample_picture.png")
self.wb1.sheets[0].pictures.add(filename, name="pic1")
self.wb1.sheets[0].pictures.add(filename, name="pic2")
self.assertEqual(len(self.wb1.sheets[0].pictures), 2)
def test_iter(self):
filename = os.path.join(this_dir, "sample_picture.png")
names = ["pic1", "pic2"]
self.wb1.sheets[0].pictures.add(filename, name=names[0])
self.wb1.sheets[0].pictures.add(filename, name=names[1])
for ix, pic in enumerate(self.wb1.sheets[0].pictures):
self.assertEqual(self.wb1.sheets[0].pictures[ix].name, names[ix])
def test_contains(self):
filename = os.path.join(this_dir, "sample_picture.png")
self.wb1.sheets[0].pictures.add(filename, name="pic 1")
self.assertTrue("pic 1" in self.wb1.sheets[0].pictures)
@unittest.skipIf(mpl is None, "matplotlib missing")
| TestPicture |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP046_0.py | {
"start": 334,
"end": 379
} | class ____(Generic[*Ts]):
var: tuple[*Ts]
| B |
python | ray-project__ray | python/ray/train/v2/tests/util.py | {
"start": 7193,
"end": 8853
} | class ____(TrainContext):
"""A dummy TrainContext subclass for testing."""
def __init__(self):
self.train_run_context = create_dummy_run_context()
self.distributed_context = DistributedContext(
world_rank=0,
world_size=1,
local_rank=0,
local_world_size=1,
node_rank=0,
)
# Mock everything else since we don't need the actual functionality
self.execution_context = MagicMock()
self.storage_context = MagicMock()
self.dataset_shards = {}
def get_run_config(self):
return self.train_run_context.run_config
def create_dummy_train_context() -> TrainContext:
"""Create a standardized TrainContext for testing.
Returns:
TrainContext: A standardized TrainContext instance for testing.
"""
return DummyTrainContext()
def create_dummy_training_results(
num_results: int,
storage_context: StorageContext,
include_metrics: bool = True,
) -> List[_TrainingResult]:
training_results = []
for i in range(num_results):
metrics = {"score": i} if include_metrics else {}
checkpoint_path = os.path.join(
storage_context.experiment_fs_path, f"checkpoint_{i}"
)
os.makedirs(checkpoint_path, exist_ok=True)
training_results.append(
_TrainingResult(
checkpoint=Checkpoint(
path=Path(checkpoint_path).as_posix(),
filesystem=storage_context.storage_filesystem,
),
metrics=metrics,
)
)
return training_results
| DummyTrainContext |
python | apache__airflow | devel-common/src/sphinx_exts/exampleinclude.py | {
"start": 1562,
"end": 10132
} | class ____(SphinxDirective):
"""
Like ``.. literalinclude:: ``, but it does not support caption option.
Adds a header with a reference to the full source code
Based on:
https://raw.githubusercontent.com/sphinx-doc/sphinx/v1.8.3/sphinx/directives/code.py
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
"dedent": int,
"linenos": directives.flag,
"lineno-start": int,
"lineno-match": directives.flag,
"tab-width": int,
"language": directives.unchanged_required,
"encoding": directives.encoding,
"pyobject": directives.unchanged_required,
"lines": directives.unchanged_required,
"start-after": directives.unchanged_required,
"end-before": directives.unchanged_required,
"start-at": directives.unchanged_required,
"end-at": directives.unchanged_required,
"prepend": directives.unchanged_required,
"append": directives.unchanged_required,
"emphasize-lines": directives.unchanged_required,
"class": directives.class_option,
"name": directives.unchanged,
"caption": directives.unchanged_required,
"diff": directives.unchanged_required,
}
def run(self):
document = self.state.document
if not document.settings.file_insertion_enabled:
return [document.reporter.warning("File insertion disabled", line=self.lineno)]
# convert options['diff'] to absolute a_path
if "diff" in self.options:
_, a_path = self.env.relfn2path(self.options["diff"])
self.options["diff"] = a_path
try:
location = self.state_machine.get_source_and_line(self.lineno)
rel_filename, filename = self.env.relfn2path(self.arguments[0])
self.env.note_dependency(rel_filename)
reader = LiteralIncludeReader(filename, self.options, self.config)
text, lines = reader.read(location=location)
retnode = nodes.literal_block(text, text, source=filename)
set_source_info(self, retnode)
if self.options.get("diff"): # if diff is set, set udiff
retnode["language"] = "udiff"
elif "language" in self.options:
retnode["language"] = self.options["language"]
retnode["linenos"] = (
"linenos" in self.options or "lineno-start" in self.options or "lineno-match" in self.options
)
retnode["classes"] += self.options.get("class", [])
extra_args = retnode["highlight_args"] = {}
if "emphasize-lines" in self.options:
hl_lines = parselinenos(self.options["emphasize-lines"], lines)
if any(i >= lines for i in hl_lines):
logger.warning(
"line number spec is out of range(1-%d): %r", lines, self.options["emphasize-lines"]
)
extra_args["hl_lines"] = [x + 1 for x in hl_lines if x < lines]
extra_args["linenostart"] = reader.lineno_start
container_node = nodes.compound(classes=["example-block-wrapper"])
container_node += ExampleHeader(filename=filename)
container_node += retnode
retnode = container_node
return [retnode]
except Exception as exc:
return [document.reporter.warning(str(exc), line=self.lineno)]
def register_source(app, env, modname):
"""
Registers source code.
:param app: application
:param env: environment of the plugin
:param modname: name of the module to load
:return: True if the code is registered successfully, False otherwise
"""
if modname is None:
return False
entry = env._viewcode_modules.get(modname, None)
if entry is False:
print(f"[{modname}] Entry is false for ")
return False
code_tags = app.emit_firstresult("viewcode-find-source", modname)
if code_tags is None:
try:
analyzer = ModuleAnalyzer.for_module(modname)
except Exception as ex:
logger.info(
'Module "%s" could not be loaded. Full source will not be available. "%s"', modname, ex
)
# We cannot use regular warnings or exception methods because those warnings are interpreted
# by running python process and converted into "real" warnings, so we need to print the
# traceback here at info level
tb = traceback.format_exc()
logger.info("%s", tb)
env._viewcode_modules[modname] = False
return False
if not isinstance(analyzer.code, str):
code = analyzer.code.decode(analyzer.encoding)
else:
code = analyzer.code
analyzer.find_tags()
tags = analyzer.tags
else:
code, tags = code_tags
if entry is None or entry[0] != code:
entry = code, tags, {}, ""
env._viewcode_modules[modname] = entry
return True
def create_node(env, relative_path, show_button):
"""
Creates documentation node for example include.
:param env: environment of the documentation
:param relative_path: path of the code
:param show_button: whether to show "view code" button
:return paragraph with the node
"""
if relative_path.startswith("providers/"):
relative_path = relative_path.replace("providers/", "", 1)
if relative_path.startswith("airflow-core/src/"):
relative_path = relative_path.replace("airflow-core/src/", "", 1)
if relative_path.startswith("src/"):
relative_path = relative_path.replace("src/", "", 1)
if relative_path.endswith(".py"):
pagename = "_modules/" + relative_path[:-3]
else:
pagename = "_modules/" + relative_path
header_classes = ["example-header"]
if show_button:
header_classes += ["example-header--with-button"]
paragraph = nodes.paragraph(relative_path, classes=header_classes)
paragraph += nodes.inline("", relative_path, classes=["example-title"])
if show_button:
pending_ref = viewcode_anchor(
reftarget=pagename,
refid="",
refdoc=env.docname,
classes=["example-header-button viewcode-button"],
)
pending_ref += nodes.inline("", _("View Source"))
paragraph += pending_ref
return paragraph
def doctree_read(app, doctree):
"""
Reads documentation tree for the application and register sources in the generated documentation.
:param app: application
:param doctree: documentation tree
:return None
"""
env = app.builder.env
if not hasattr(env, "_viewcode_modules"):
env._viewcode_modules = {}
if app.builder.name == "singlehtml":
return
for objnode in doctree.traverse(ExampleHeader):
source_root_path = Path(app.config.exampleinclude_sourceroot)
filepath = Path(objnode.get("filename"))
if filepath.is_relative_to(source_root_path) and filepath.name.endswith(".py"):
module_path = filepath.relative_to(source_root_path)
split_modname = module_path.parts
if "src" in split_modname:
modname = ".".join(split_modname[split_modname.index("src") + 1 :])
elif "tests" in split_modname:
modname = ".".join(split_modname[split_modname.index("tests") + 1 :])
else:
modname = ".".join(split_modname)
modname = modname.replace(".py", "")
else:
modname = None
module_path = filepath.resolve()
show_button = register_source(app, env, modname)
onlynode = create_node(env, module_path.as_posix(), show_button)
objnode.replace_self(onlynode)
def setup(app):
"""
Sets the plugin up and returns configuration of the plugin.
:param app: application.
:return json description of the configuration that is needed by the plugin.
"""
directives.register_directive("exampleinclude", ExampleInclude)
app.connect("doctree-read", doctree_read)
app.add_config_value("exampleinclude_sourceroot", None, "env")
if not airflow_theme_is_available:
# Sphinx airflow theme has its own styles.
app.add_css_file("exampleinclude.css")
return {"version": "builtin", "parallel_read_safe": True, "parallel_write_safe": True}
| ExampleInclude |
python | facebook__pyre-check | pyre_extensions/__init__.py | {
"start": 989,
"end": 3893
} | class ____(list):
"""This kind of type variable captures callable parameter specifications
(known as argspecs in the runtime and inspect library) instead of types,
allowing the typing of decorators which transform the return type of the
given callable.
For example:
from typing import TypeVar, Callable, List
from pyre_extensions import ParameterSpecification
Tparams = ParameterSpecification("Tparams")
Treturn = TypeVar("Treturn")
def unwrap(
f: Callable[Tparams, List[Treturn],
) -> Callable[Tparams, Treturn]: ...
@unwrap
def foo(x: int, y: str, z: bool = False) -> List[int]:
return [1, 2, 3]
decorates foo into a callable that returns int, but still has the same
parameters, including their names and whether they are required.
The list inheritance is required for backwards compatibility with the runtime
implementation for callables, which requires the first argument to be
a list.
The args and kwargs properties are used for specifying that a literal definition
has the same signature as a ParameterSpecification, like:
def listify(
f: Callable[TParams, TReturn]
) -> Callable[TParams, List[TReturn]]:
def wrapped( *args: TParams.args, **kwargs: TParams.kwargs):
return [f(*args, **kwargs)]
"""
args = object()
kwargs = object()
def __init__(self, *args: object, **kwargs: object) -> None:
pass
def TypeVarTuple(name: str) -> object:
return Any
_A = TypeVar("_A", bound=int)
_B = TypeVar("_B", bound=int)
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
_Ts = TypeVarTuple("_Ts")
_P = ParameterSpecification("_P")
T = TypeVar("T", bound=Callable[..., object])
def override(__f: T) -> T:
"""Indicate that a method is intended to override a method in a base class.
Usage:
class Base:
def method(self) -> None: ...
pass
class Child(Base):
@override
def method(self) -> None:
super().method()
When this decorator is applied to a method, the type checker will
validate that it overrides a method with the same name on a base class.
This helps prevent bugs that may occur when a base class is changed
without an equivalent change to a child class.
There is no runtime checking of these properties. The decorator
sets the ``__override__`` attribute to ``True`` on the decorated object
to allow runtime introspection.
See PEP 698 for details.
"""
try:
__f.__override__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return __f
| ParameterSpecification |
python | apache__airflow | providers/standard/tests/unit/standard/utils/test_weekday.py | {
"start": 924,
"end": 3233
} | class ____:
def test_weekday_enum_length(self):
assert len(WeekDay) == 7
def test_weekday_name_value(self):
weekdays = "MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY SUNDAY"
weekdays = weekdays.split()
for i, weekday in enumerate(weekdays, start=1):
weekday_enum = WeekDay(i)
assert weekday_enum == i
assert int(weekday_enum) == i
assert weekday_enum.name == weekday
assert weekday_enum in WeekDay
assert 0 < weekday_enum < 8
assert isinstance(weekday_enum, WeekDay)
assert isinstance(weekday_enum, int)
assert isinstance(weekday_enum, Enum)
@pytest.mark.parametrize(
("weekday", "expected"),
[
("Monday", 1),
(WeekDay.MONDAY, 1),
],
ids=["with-string", "with-enum"],
)
def test_convert(self, weekday, expected):
result = WeekDay.convert(weekday)
assert result == expected
def test_convert_with_incorrect_input(self):
invalid = "Sun"
error_message = rf'Invalid Week Day passed: "{invalid}"'
with pytest.raises(AttributeError, match=error_message):
WeekDay.convert(invalid)
@pytest.mark.parametrize(
("weekday", "expected"),
[
("Monday", {WeekDay.MONDAY}),
(WeekDay.MONDAY, {WeekDay.MONDAY}),
({"Thursday": "1"}, {WeekDay.THURSDAY}),
(["Thursday"], {WeekDay.THURSDAY}),
(["Thursday", WeekDay.MONDAY], {WeekDay.MONDAY, WeekDay.THURSDAY}),
],
ids=[
"with-string",
"with-enum",
"with-dict",
"with-list",
"with-mix",
],
)
def test_validate_week_day(self, weekday, expected):
result = WeekDay.validate_week_day(weekday)
assert expected == result
def test_validate_week_day_with_invalid_type(self):
invalid_week_day = 5
with pytest.raises(
TypeError,
match=f"Unsupported Type for week_day parameter: {type(invalid_week_day)}."
"Input should be iterable type:"
"str, set, list, dict or Weekday enum type",
):
WeekDay.validate_week_day(invalid_week_day)
| TestWeekDay |
python | gevent__gevent | src/gevent/tests/test__queue.py | {
"start": 15526,
"end": 15790
} | class ____(AbstractTestWeakRefMixin, AbstractGenericGetTestCase):
Timeout = Empty
kind = queue.SimpleQueue
def wait(self, timeout):
return self._makeOne().get(timeout=timeout)
def _makeOne(self):
return self.kind()
| TestGetInterrupt |
python | wandb__wandb | wandb/vendor/pygments/lexers/dsls.py | {
"start": 17922,
"end": 19078
} | class ____(RegexLexer):
"""
For `SampleManager VGL <http://www.thermoscientific.com/samplemanager>`_
source code.
.. versionadded:: 1.6
"""
name = 'VGL'
aliases = ['vgl']
filenames = ['*.rpf']
flags = re.MULTILINE | re.DOTALL | re.IGNORECASE
tokens = {
'root': [
(r'\{[^}]*\}', Comment.Multiline),
(r'declare', Keyword.Constant),
(r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object'
r'|create|on|line|with|global|routine|value|endroutine|constant'
r'|global|set|join|library|compile_option|file|exists|create|copy'
r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])',
Keyword),
(r'(true|false|null|empty|error|locked)', Keyword.Constant),
(r'[~^*#!%&\[\]()<>|+=:;,./?-]', Operator),
(r'"[^"]*"', String),
(r'(\.)([a-z_$][\w$]*)', bygroups(Operator, Name.Attribute)),
(r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number),
(r'[a-z_$][\w$]*', Name),
(r'[\r\n]+', Text),
(r'\s+', Text)
]
}
| VGLLexer |
python | paramiko__paramiko | paramiko/auth_strategy.py | {
"start": 5753,
"end": 7574
} | class ____(list):
"""
Represents a partial or complete SSH authentication attempt.
This class conceptually extends `AuthStrategy` by pairing the former's
authentication **sources** with the **results** of trying to authenticate
with them.
`AuthResult` is a (subclass of) `list` of `namedtuple`, which are of the
form ``namedtuple('SourceResult', 'source', 'result')`` (where the
``source`` member is an `AuthSource` and the ``result`` member is either a
return value from the relevant `.Transport` method, or an exception
object).
.. note::
Transport auth method results are always themselves a ``list`` of "next
allowable authentication methods".
In the simple case of "you just authenticated successfully", it's an
empty list; if your auth was rejected but you're allowed to try again,
it will be a list of string method names like ``pubkey`` or
``password``.
The ``__str__`` of this class represents the empty-list scenario as the
word ``success``, which should make reading the result of an
authentication session more obvious to humans.
Instances also have a `strategy` attribute referencing the `AuthStrategy`
which was attempted.
"""
def __init__(self, strategy, *args, **kwargs):
self.strategy = strategy
super().__init__(*args, **kwargs)
def __str__(self):
# NOTE: meaningfully distinct from __repr__, which still wants to use
# superclass' implementation.
# TODO: go hog wild, use rich.Table? how is that on degraded term's?
# TODO: test this lol
return "\n".join(
f"{x.source} -> {x.result or 'success'}" for x in self
)
# TODO 4.0: descend from SSHException or even just Exception
| AuthResult |
python | huggingface__transformers | tests/models/granite/test_modeling_granite.py | {
"start": 5614,
"end": 6705
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
GraniteModel,
GraniteForCausalLM,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": GraniteModel,
"text-generation": GraniteForCausalLM,
}
if is_torch_available()
else {}
)
# Need to use `0.8` instead of `0.9` for `test_cpu_offload`
# This is because we are hitting edge cases with the causal_mask buffer
model_split_percents = [0.5, 0.7, 0.8]
def setUp(self):
self.model_tester = GraniteModelTester(self)
self.config_tester = ConfigTester(self, config_class=GraniteConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@require_torch_accelerator
| GraniteModelTest |
python | fastapi__sqlmodel | docs_src/tutorial/one/tutorial001.py | {
"start": 100,
"end": 1638
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.add(hero_4)
session.add(hero_5)
session.add(hero_6)
session.add(hero_7)
session.commit()
def select_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.age <= 35)
results = session.exec(statement)
hero = results.first()
print("Hero:", hero)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| Hero |
python | tensorflow__tensorflow | tensorflow/python/training/basic_session_run_hooks.py | {
"start": 37868,
"end": 38394
} | class ____(session_run_hook.SessionRunHook):
"""Runs `feed_fn` and sets the `feed_dict` accordingly."""
def __init__(self, feed_fn):
"""Initializes a `FeedFnHook`.
Args:
feed_fn: function that takes no arguments and returns `dict` of `Tensor`
to feed.
"""
self.feed_fn = feed_fn
def before_run(self, run_context): # pylint: disable=unused-argument
return session_run_hook.SessionRunArgs(
fetches=None, feed_dict=self.feed_fn())
@tf_export(v1=["train.ProfilerHook"])
| FeedFnHook |
python | huggingface__transformers | src/transformers/models/speech_to_text/modeling_speech_to_text.py | {
"start": 45193,
"end": 51208
} | class ____(Speech2TextPreTrainedModel, GenerationMixin):
input_modalities = ("audio", "text")
base_model_prefix = "model"
_tied_weights_keys = {"lm_head.weight": "model.decoder.embed_tokens.weight"}
def __init__(self, config: Speech2TextConfig):
super().__init__(config)
self.model = Speech2TextModel(config)
self.lm_head = nn.Linear(config.d_model, self.config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_features: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Cache] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`SpeechToTextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
SpeechToText uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read
[`modeling_speech_to_text._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the
paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]`
or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is
only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> import torch
>>> from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration
>>> from datasets import load_dataset
>>> model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr")
>>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = processor(
... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt"
... )
>>> input_features = inputs.input_features
>>> generated_ids = model.generate(inputs=input_features)
>>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> transcription
'mister quilter is the apostle of the middle classes and we are glad to welcome his gospel'
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_features,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
past_key_values=past_key_values,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
lm_logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
__all__ = ["Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel"]
| Speech2TextForConditionalGeneration |
python | django-mptt__django-mptt | tests/myapp/models.py | {
"start": 5522,
"end": 5727
} | class ____(MultiTableInheritanceB1):
parent = TreeForeignKey(
"self", null=True, blank=True, related_name="children", on_delete=models.CASCADE
)
# 3. abstract models
| MultiTableInheritanceB2 |
python | huggingface__transformers | src/transformers/models/efficientloftr/modeling_efficientloftr.py | {
"start": 4102,
"end": 7512
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
# Ignore copy
def __init__(self, config: EfficientLoFTRConfig, device=None):
super().__init__()
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
# Ignore copy
def compute_default_rope_parameters(
config: Optional[EfficientLoFTRConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0)
head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
dim = int(head_dim * partial_rotary_factor)
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
# Ignore copy
@torch.no_grad()
def forward(
self, x: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, layer_type=None
) -> tuple[torch.Tensor, torch.Tensor]:
feats_height, feats_width = x.shape[-2:]
embed_height = (feats_height - self.config.q_aggregation_kernel_size) // self.config.q_aggregation_stride + 1
embed_width = (feats_width - self.config.q_aggregation_kernel_size) // self.config.q_aggregation_stride + 1
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
emb = compute_embeddings(self.inv_freq, embed_height, embed_width, self.config.hidden_size)
sin = emb.sin()
cos = emb.cos()
sin = sin.repeat_interleave(2, dim=-1)
cos = cos.repeat_interleave(2, dim=-1)
sin = sin.to(device=x.device, dtype=x.dtype)
cos = cos.to(device=x.device, dtype=x.dtype)
return cos, sin
# Copied from transformers.models.rt_detr_v2.modeling_rt_detr_v2.RTDetrV2ConvNormLayer with RTDetrV2->EfficientLoFTR
| EfficientLoFTRRotaryEmbedding |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/utils/type_info.py | {
"start": 378,
"end": 4886
} | class ____(metaclass=visitor_meta.VisitorMeta):
__slots__ = '_schema', '_type_stack', '_parent_type_stack', '_input_type_stack', '_field_def_stack', '_directive', \
'_argument', '_get_field_def_fn'
def __init__(self, schema, get_field_def_fn=get_field_def):
self._schema = schema
self._type_stack = []
self._parent_type_stack = []
self._input_type_stack = []
self._field_def_stack = []
self._directive = None
self._argument = None
self._get_field_def_fn = get_field_def_fn
def get_type(self):
if self._type_stack:
return self._type_stack[-1]
def get_parent_type(self):
if self._parent_type_stack:
return self._parent_type_stack[-1]
def get_input_type(self):
if self._input_type_stack:
return self._input_type_stack[-1]
def get_field_def(self):
if self._field_def_stack:
return self._field_def_stack[-1]
def get_directive(self):
return self._directive
def get_argument(self):
return self._argument
def leave(self, node):
method = self._get_leave_handler(type(node))
if method:
return method(self)
def enter(self, node):
method = self._get_enter_handler(type(node))
if method:
return method(self, node)
def enter_SelectionSet(self, node):
named_type = get_named_type(self.get_type())
composite_type = None
if is_composite_type(named_type):
composite_type = named_type
self._parent_type_stack.append(composite_type)
def enter_Field(self, node):
parent_type = self.get_parent_type()
field_def = None
if parent_type:
field_def = self._get_field_def_fn(self._schema, parent_type, node)
self._field_def_stack.append(field_def)
self._type_stack.append(field_def and field_def.type)
def enter_Directive(self, node):
self._directive = self._schema.get_directive(node.name.value)
def enter_OperationDefinition(self, node):
definition_type = None
if node.operation == 'query':
definition_type = self._schema.get_query_type()
elif node.operation == 'mutation':
definition_type = self._schema.get_mutation_type()
self._type_stack.append(definition_type)
def enter_InlineFragment(self, node):
type_condition_ast = node.type_condition
type = type_from_ast(self._schema, type_condition_ast) if type_condition_ast else self.get_type()
self._type_stack.append(type)
enter_FragmentDefinition = enter_InlineFragment
def enter_VariableDefinition(self, node):
self._input_type_stack.append(type_from_ast(self._schema, node.type))
def enter_Argument(self, node):
arg_def = None
arg_type = None
field_or_directive = self.get_directive() or self.get_field_def()
if field_or_directive:
arg_def = field_or_directive.args.get(node.name.value)
if arg_def:
arg_type = arg_def.type
self._argument = arg_def
self._input_type_stack.append(arg_type)
def enter_ListValue(self, node):
list_type = get_nullable_type(self.get_input_type())
self._input_type_stack.append(
list_type.of_type if isinstance(list_type, GraphQLList) else None
)
def enter_ObjectField(self, node):
object_type = get_named_type(self.get_input_type())
field_type = None
if isinstance(object_type, GraphQLInputObjectType):
input_field = object_type.fields.get(node.name.value)
field_type = input_field.type if input_field else None
self._input_type_stack.append(field_type)
def leave_SelectionSet(self):
pop(self._parent_type_stack)
def leave_Field(self):
pop(self._field_def_stack)
pop(self._type_stack)
def leave_Directive(self):
self._directive = None
def leave_OperationDefinition(self):
pop(self._type_stack)
leave_InlineFragment = leave_OperationDefinition
leave_FragmentDefinition = leave_OperationDefinition
def leave_VariableDefinition(self):
pop(self._input_type_stack)
def leave_Argument(self):
self._argument = None
pop(self._input_type_stack)
def leave_ListType(self):
pop(self._input_type_stack)
leave_ObjectField = leave_ListType
| TypeInfo |
python | ray-project__ray | python/ray/dag/tests/experimental/test_collective_dag.py | {
"start": 2255,
"end": 16878
} | class ____:
def __init__(self):
return
def backward(self, _):
return 0
@pytest.mark.parametrize("ray_start_regular", [{"num_cpus": 4}], indirect=True)
def test_all_reduce_duplicate_actors(ray_start_regular):
"""
Test an error is thrown when two input nodes from the same actor bind to
an all-reduce.
"""
actor_cls = CPUTorchTensorWorker.options()
worker = actor_cls.remote()
with InputNode() as inp:
computes = [worker.return_tensor.bind(inp) for _ in range(2)]
with pytest.raises(
ValueError,
match="Expected unique actor handles, but found duplicate actor handles from input nodes",
):
collective.allreduce.bind(computes)
@pytest.mark.parametrize("ray_start_regular", [{"num_cpus": 4}], indirect=True)
def test_all_reduce_custom_comm_wrong_actors(ray_start_regular):
"""
Test an error is thrown when an all-reduce binds to a custom NCCL group and
a wrong set of actors.
"""
actor_cls = CPUTorchTensorWorker.options()
num_workers = 2
workers = [actor_cls.remote() for _ in range(num_workers)]
nccl_group = AbstractNcclGroup([workers[0]])
with InputNode() as inp:
computes = [worker.return_tensor.bind(inp) for worker in workers]
with pytest.raises(
ValueError,
match="Expected actor handles to match the custom communicator group",
):
collective.allreduce.bind(computes, transport=nccl_group)
@pytest.mark.parametrize("ray_start_regular", [{"num_cpus": 4}], indirect=True)
def test_all_reduce_bind_list_of_nodes_duplicate_nodes(ray_start_regular):
"""
Test an error is thrown when an all-reduce binds to lists of nodes
that are duplicated.
"""
actor_cls = CPUTorchTensorWorker.options()
num_workers = 2
workers = [actor_cls.remote() for _ in range(num_workers)]
nccl_group = AbstractNcclGroup([workers[0]])
with InputNode() as inp:
computes_0 = [worker.return_tensor.bind(inp) for worker in workers]
computes_1 = [workers[0].return_tensor.bind(inp) for _ in range(2)]
with pytest.raises(
ValueError,
match="Expected unique actor handles at list at index",
):
collective.allreduce.bind([computes_0, computes_1], transport=nccl_group)
@pytest.mark.parametrize("ray_start_regular", [{"num_cpus": 4}], indirect=True)
def test_all_reduce_bind_list_of_nodes_unequal_number_of_nodes(ray_start_regular):
"""
Test an error is thrown when an all-reduce binds to lists of nodes
of different number of nodes across actors.
"""
actor_cls = CPUTorchTensorWorker.options()
num_workers = 2
workers = [actor_cls.remote() for _ in range(num_workers)]
nccl_group = AbstractNcclGroup([workers[0]])
with InputNode() as inp:
computes_0 = [worker.return_tensor.bind(inp) for worker in workers]
computes_1 = [worker.return_tensor.bind(inp) for worker in workers[1:]]
with pytest.raises(
ValueError,
match="Expected all input lists to have the same number of nodes",
):
collective.allreduce.bind([computes_0, computes_1], transport=nccl_group)
@pytest.mark.parametrize("ray_start_regular", [{"num_cpus": 4}], indirect=True)
def test_all_reduce_bind_list_of_nodes_different_actors(ray_start_regular):
"""
Test an error is thrown when an all-reduce binds to a list of nodes
from different set of actors.
"""
actor_cls = CPUTorchTensorWorker.options()
num_workers = 3
workers = [actor_cls.remote() for _ in range(num_workers)]
nccl_group = AbstractNcclGroup([workers[0]])
with InputNode() as inp:
computes_0 = [worker.return_tensor.bind(inp) for worker in workers[:2]]
computes_1 = [worker.return_tensor.bind(inp) for worker in workers[1:]]
with pytest.raises(
ValueError,
match="Expected all input lists to have the same set of actor handles",
):
collective.allreduce.bind([computes_0, computes_1], transport=nccl_group)
@pytest.mark.parametrize("ray_start_regular", [{"num_cpus": 4}], indirect=True)
def test_all_reduce_bind_list_of_nodes_different_dtypes(ray_start_regular):
"""
Test an error is thrown when an all-reduce binds to a list of nodes
that execute with tensors of different dtypes.
"""
actor_cls = CPUTorchTensorWorker.options()
num_workers = 3
workers = [actor_cls.remote() for _ in range(num_workers)]
comm = MockCommunicator(num_workers, workers)
with InputNode() as inp:
computes_0 = [worker.return_tensor.bind(inp[0], inp[1]) for worker in workers]
computes_1 = [worker.return_tensor.bind(inp[0], inp[2]) for worker in workers]
collectives = collective.allreduce.bind(
[computes_0, computes_1], transport=comm
)
recvs = [
worker.recv_tensors.bind(*collective)
for worker, collective in zip(workers, collectives)
]
dag = MultiOutputNode(recvs)
compiled_dag = dag.experimental_compile()
with pytest.raises(
ValueError,
match="Expected all input tensors to have the same dtype",
):
import torch
ray.get(compiled_dag.execute(1, torch.float16, torch.float32))
@pytest.mark.parametrize(
"ray_start_regular", [{"num_cpus": 4, "num_gpus": 4}], indirect=True
)
def test_comm_all_reduces(ray_start_regular, monkeypatch):
"""
Test different communicators are used for different all-reduce calls of
different sets of actors.
"""
actor_cls = CPUTorchTensorWorker.options(num_cpus=0, num_gpus=1)
num_workers = 2
workers = [actor_cls.remote() for _ in range(num_workers)]
with InputNode() as inp:
computes = [worker.return_tensor.bind(inp) for worker in workers]
# There are two all-reduces, each on one actor.
collectives = [collective.allreduce.bind([compute]) for compute in computes]
# collective[0] is the only CollectiveOutputNode for each all-reduce.
dag = MultiOutputNode([collective[0] for collective in collectives])
compiled_dag, mock_nccl_group_set = check_nccl_group_init(
monkeypatch,
dag,
{
(frozenset([workers[0]]), None),
(frozenset([workers[1]]), None),
},
)
check_nccl_group_teardown(monkeypatch, compiled_dag, mock_nccl_group_set)
@pytest.mark.parametrize(
"ray_start_regular", [{"num_cpus": 4, "num_gpus": 4}], indirect=True
)
def test_comm_deduplicate_all_reduces(ray_start_regular, monkeypatch):
"""
Test communicators are deduplicated when all-reduces are called on the same
group of actors more than once.
"""
actor_cls = CPUTorchTensorWorker.options(num_cpus=0, num_gpus=1)
num_workers = 2
workers = [actor_cls.remote() for _ in range(num_workers)]
with InputNode() as inp:
tensors = [worker.return_tensor.bind(inp) for worker in workers]
collectives = collective.allreduce.bind(tensors)
collectives = collective.allreduce.bind(collectives)
dag = MultiOutputNode(collectives)
compiled_dag, mock_nccl_group_set = check_nccl_group_init(
monkeypatch,
dag,
{(frozenset(workers), None)},
)
check_nccl_group_teardown(monkeypatch, compiled_dag, mock_nccl_group_set)
@pytest.mark.parametrize(
"ray_start_regular", [{"num_cpus": 4, "num_gpus": 4}], indirect=True
)
def test_comm_deduplicate_p2p_and_collective(ray_start_regular, monkeypatch):
"""
Test communicators are deduplicated when the collective and the P2P are on
the same set of actors.
"""
actor_cls = CPUTorchTensorWorker.options(num_cpus=0, num_gpus=1)
num_workers = 2
workers = [actor_cls.remote() for _ in range(num_workers)]
with InputNode() as inp:
computes = [worker.return_tensor.bind(inp) for worker in workers]
collectives = collective.allreduce.bind(computes)
recvs = [
# Each of the 2 workers receives from the other.
workers[0].recv.bind(
collectives[1].with_tensor_transport(transport="nccl")
),
workers[1].recv.bind(
collectives[0].with_tensor_transport(transport="nccl")
),
]
dag = MultiOutputNode(recvs)
compiled_dag, mock_nccl_group_set = check_nccl_group_init(
monkeypatch,
dag,
{(frozenset(workers), None)},
)
check_nccl_group_teardown(monkeypatch, compiled_dag, mock_nccl_group_set)
with InputNode() as inp:
computes = [worker.return_tensor.bind(inp) for worker in workers]
collectives = collective.allreduce.bind(computes)
# Sender is workers[0] and receiver is workers[1].
dag = workers[1].recv.bind(
collectives[0].with_tensor_transport(transport="nccl")
)
dag = MultiOutputNode([dag, collectives[1]])
compiled_dag, mock_nccl_group_set = check_nccl_group_init(
monkeypatch,
dag,
{(frozenset(workers), None)},
)
check_nccl_group_teardown(monkeypatch, compiled_dag, mock_nccl_group_set)
@pytest.mark.parametrize(
"ray_start_regular", [{"num_cpus": 4, "num_gpus": 4}], indirect=True
)
def test_custom_comm(ray_start_regular, monkeypatch):
"""
Test a custom GPU communicator is used when specified and a default
communicator is used otherwise.
"""
actor_cls = CPUTorchTensorWorker.options(num_cpus=0, num_gpus=1)
num_workers = 2
workers = [actor_cls.remote() for _ in range(num_workers)]
comm = AbstractNcclGroup(workers)
with InputNode() as inp:
computes = [worker.return_tensor.bind(inp) for worker in workers]
collectives = collective.allreduce.bind(computes, transport=comm)
collectives = collective.allreduce.bind(collectives)
dag = workers[0].recv.bind(
collectives[1].with_tensor_transport(transport="nccl")
)
dag = MultiOutputNode([dag, collectives[0]])
compiled_dag, mock_nccl_group_set = check_nccl_group_init(
monkeypatch,
dag,
{
(frozenset(workers), comm),
(frozenset(workers), None),
},
)
check_nccl_group_teardown(monkeypatch, compiled_dag, mock_nccl_group_set)
comm = AbstractNcclGroup(workers)
with InputNode() as inp:
computes = [worker.return_tensor.bind(inp) for worker in workers]
collectives = collective.allreduce.bind(computes)
collectives = collective.allreduce.bind(collectives)
dag = workers[0].recv.bind(collectives[1].with_tensor_transport(transport=comm))
dag = MultiOutputNode([dag, collectives[0]])
compiled_dag, mock_nccl_group_set = check_nccl_group_init(
monkeypatch,
dag,
{
(frozenset(workers), comm),
(frozenset(workers), None),
},
)
check_nccl_group_teardown(monkeypatch, compiled_dag, mock_nccl_group_set)
@pytest.mark.parametrize(
"ray_start_regular", [{"num_cpus": 4, "num_gpus": 4}], indirect=True
)
def test_custom_comm_init_teardown(ray_start_regular, monkeypatch):
"""
Test custom NCCL groups are properly initialized and destroyed.
1. Test when multiple type hints have the same `transport=custom_nccl_group`,
the `custom_nccl_group` is initialized only once.
2. Test all initialized NCCL groups are destroyed during teardown.
"""
actor_cls = CPUTorchTensorWorker.options(num_cpus=0, num_gpus=1)
num_workers = 2
workers = [actor_cls.remote() for _ in range(num_workers)]
comm = AbstractNcclGroup(workers)
with InputNode() as inp:
tensors = [worker.return_tensor.bind(inp) for worker in workers]
allreduce = collective.allreduce.bind(tensors, transport=comm)
dag = workers[0].recv.bind(allreduce[1].with_tensor_transport(transport=comm))
dag = MultiOutputNode([dag, allreduce[0]])
compiled_dag, mock_nccl_group_set = check_nccl_group_init(
monkeypatch,
dag,
{(frozenset(workers), comm)},
)
check_nccl_group_teardown(monkeypatch, compiled_dag, mock_nccl_group_set)
comm_1 = AbstractNcclGroup(workers)
comm_2 = AbstractNcclGroup(workers)
comm_3 = AbstractNcclGroup(workers)
with InputNode() as inp:
tensors = [worker.return_tensor.bind(inp) for worker in workers]
allreduce1 = collective.allreduce.bind(tensors, transport=comm_1)
allreduce2 = collective.allreduce.bind(allreduce1, transport=comm_2)
dag = workers[0].recv.bind(
allreduce2[1].with_tensor_transport(transport=comm_3)
)
dag = MultiOutputNode([dag, allreduce2[0]])
compiled_dag, mock_nccl_group_set = check_nccl_group_init(
monkeypatch,
dag,
{
(frozenset(workers), comm_1),
(frozenset(workers), comm_2),
(frozenset(workers), comm_3),
},
)
check_nccl_group_teardown(monkeypatch, compiled_dag, mock_nccl_group_set)
@pytest.mark.parametrize("ray_start_regular", [{"num_cpus": 4}], indirect=True)
@pytest.mark.parametrize("num_workers", [2, 4])
def test_exec_schedules_ddp(ray_start_regular, num_workers):
"""
Test the execution schedules for the DDP strategy. Each worker should have
identical schedules.
"""
actor_cls = DDPWorker.options(num_cpus=1)
workers = [actor_cls.remote() for _ in range(num_workers)]
comm = MockCommunicator(num_workers, workers)
outputs = []
with InputNode() as inp:
grads = [worker.backward.bind(inp) for worker in workers]
grads_reduced = collective.allreduce.bind(grads, transport=comm)
outputs.extend(grads_reduced)
grads = [worker.backward.bind(grad) for worker, grad in zip(workers, grads)]
grads_reduced = collective.allreduce.bind(grads, transport=comm)
outputs.extend(grads_reduced)
dag = MultiOutputNode(outputs)
compiled_dag = dag.experimental_compile(_default_communicator=comm)
actor_to_execution_schedule = list(
compiled_dag.actor_to_execution_schedule.values()
)
expected_schedule = actor_to_execution_schedule[0]
for schedule in actor_to_execution_schedule[1:]:
assert schedule == expected_schedule
if __name__ == "__main__":
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
| DDPWorker |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 185202,
"end": 185671
} | class ____:
_col_type = DATERANGE
_col_str = "DATERANGE"
def _data_str(self):
return "[2013-03-23,2013-03-30)"
def _data_obj(self):
return Range(datetime.date(2013, 3, 23), datetime.date(2013, 3, 30))
_epsilon = datetime.timedelta(days=1)
def _step_value_up(self, value):
return value + datetime.timedelta(days=1)
def _step_value_down(self, value):
return value - datetime.timedelta(days=1)
| _DateRangeTests |
python | doocs__leetcode | solution/1700-1799/1785.Minimum Elements to Add to Form a Given Sum/Solution.py | {
"start": 0,
"end": 164
} | class ____:
def minElements(self, nums: List[int], limit: int, goal: int) -> int:
d = abs(sum(nums) - goal)
return (d + limit - 1) // limit
| Solution |
python | python-jsonschema__jsonschema | jsonschema/tests/test_validators.py | {
"start": 55432,
"end": 64529
} | class ____(MetaSchemaTestsMixin):
def test_it_implements_the_validator_protocol(self):
self.assertIsInstance(self.Validator({}), protocols.Validator)
def test_valid_instances_are_valid(self):
schema, instance = self.valid
self.assertTrue(self.Validator(schema).is_valid(instance))
def test_invalid_instances_are_not_valid(self):
schema, instance = self.invalid
self.assertFalse(self.Validator(schema).is_valid(instance))
def test_non_existent_properties_are_ignored(self):
self.Validator({object(): object()}).validate(instance=object())
def test_evolve(self):
schema, format_checker = {"type": "integer"}, FormatChecker()
original = self.Validator(
schema,
format_checker=format_checker,
)
new = original.evolve(
schema={"type": "string"},
format_checker=self.Validator.FORMAT_CHECKER,
)
expected = self.Validator(
{"type": "string"},
format_checker=self.Validator.FORMAT_CHECKER,
_resolver=new._resolver,
)
self.assertEqual(new, expected)
self.assertNotEqual(new, original)
def test_evolve_with_subclass(self):
"""
Subclassing validators isn't supported public API, but some users have
done it, because we don't actually error entirely when it's done :/
We need to deprecate doing so first to help as many of these users
ensure they can move to supported APIs, but this test ensures that in
the interim, we haven't broken those users.
"""
with self.assertWarns(DeprecationWarning):
@define
class OhNo(self.Validator):
foo = field(factory=lambda: [1, 2, 3])
_bar = field(default=37)
validator = OhNo({}, bar=12)
self.assertEqual(validator.foo, [1, 2, 3])
new = validator.evolve(schema={"type": "integer"})
self.assertEqual(new.foo, [1, 2, 3])
self.assertEqual(new._bar, 12)
def test_is_type_is_true_for_valid_type(self):
self.assertTrue(self.Validator({}).is_type("foo", "string"))
def test_is_type_is_false_for_invalid_type(self):
self.assertFalse(self.Validator({}).is_type("foo", "array"))
def test_is_type_evades_bool_inheriting_from_int(self):
self.assertFalse(self.Validator({}).is_type(True, "integer"))
self.assertFalse(self.Validator({}).is_type(True, "number"))
def test_it_can_validate_with_decimals(self):
schema = {"items": {"type": "number"}}
Validator = validators.extend(
self.Validator,
type_checker=self.Validator.TYPE_CHECKER.redefine(
"number",
lambda checker, thing: isinstance(
thing, (int, float, Decimal),
) and not isinstance(thing, bool),
),
)
validator = Validator(schema)
validator.validate([1, 1.1, Decimal(1) / Decimal(8)])
invalid = ["foo", {}, [], True, None]
self.assertEqual(
[error.instance for error in validator.iter_errors(invalid)],
invalid,
)
def test_it_returns_true_for_formats_it_does_not_know_about(self):
validator = self.Validator(
{"format": "carrot"}, format_checker=FormatChecker(),
)
validator.validate("bugs")
def test_it_does_not_validate_formats_by_default(self):
validator = self.Validator({})
self.assertIsNone(validator.format_checker)
def test_it_validates_formats_if_a_checker_is_provided(self):
checker = FormatChecker()
bad = ValueError("Bad!")
@checker.checks("foo", raises=ValueError)
def check(value):
if value == "good":
return True
elif value == "bad":
raise bad
else: # pragma: no cover
self.fail(f"What is {value}? [Baby Don't Hurt Me]")
validator = self.Validator(
{"format": "foo"}, format_checker=checker,
)
validator.validate("good")
with self.assertRaises(exceptions.ValidationError) as cm:
validator.validate("bad")
# Make sure original cause is attached
self.assertIs(cm.exception.cause, bad)
def test_non_string_custom_type(self):
non_string_type = object()
schema = {"type": [non_string_type]}
Crazy = validators.extend(
self.Validator,
type_checker=self.Validator.TYPE_CHECKER.redefine(
non_string_type,
lambda checker, thing: isinstance(thing, int),
),
)
Crazy(schema).validate(15)
def test_it_properly_formats_tuples_in_errors(self):
"""
A tuple instance properly formats validation errors for uniqueItems.
See #224
"""
TupleValidator = validators.extend(
self.Validator,
type_checker=self.Validator.TYPE_CHECKER.redefine(
"array",
lambda checker, thing: isinstance(thing, tuple),
),
)
with self.assertRaises(exceptions.ValidationError) as e:
TupleValidator({"uniqueItems": True}).validate((1, 1))
self.assertIn("(1, 1) has non-unique elements", str(e.exception))
def test_check_redefined_sequence(self):
"""
Allow array to validate against another defined sequence type
"""
schema = {"type": "array", "uniqueItems": True}
MyMapping = namedtuple("MyMapping", "a, b")
Validator = validators.extend(
self.Validator,
type_checker=self.Validator.TYPE_CHECKER.redefine_many(
{
"array": lambda checker, thing: isinstance(
thing, (list, deque),
),
"object": lambda checker, thing: isinstance(
thing, (dict, MyMapping),
),
},
),
)
validator = Validator(schema)
valid_instances = [
deque(["a", None, "1", "", True]),
deque([[False], [0]]),
[deque([False]), deque([0])],
[[deque([False])], [deque([0])]],
[[[[[deque([False])]]]], [[[[deque([0])]]]]],
[deque([deque([False])]), deque([deque([0])])],
[MyMapping("a", 0), MyMapping("a", False)],
[
MyMapping("a", [deque([0])]),
MyMapping("a", [deque([False])]),
],
[
MyMapping("a", [MyMapping("a", deque([0]))]),
MyMapping("a", [MyMapping("a", deque([False]))]),
],
[deque(deque(deque([False]))), deque(deque(deque([0])))],
]
for instance in valid_instances:
validator.validate(instance)
invalid_instances = [
deque(["a", "b", "a"]),
deque([[False], [False]]),
[deque([False]), deque([False])],
[[deque([False])], [deque([False])]],
[[[[[deque([False])]]]], [[[[deque([False])]]]]],
[deque([deque([False])]), deque([deque([False])])],
[MyMapping("a", False), MyMapping("a", False)],
[
MyMapping("a", [deque([False])]),
MyMapping("a", [deque([False])]),
],
[
MyMapping("a", [MyMapping("a", deque([False]))]),
MyMapping("a", [MyMapping("a", deque([False]))]),
],
[deque(deque(deque([False]))), deque(deque(deque([False])))],
]
for instance in invalid_instances:
with self.assertRaises(exceptions.ValidationError):
validator.validate(instance)
def test_it_creates_a_ref_resolver_if_not_provided(self):
with self.assertWarns(DeprecationWarning):
resolver = self.Validator({}).resolver
self.assertIsInstance(resolver, validators._RefResolver)
def test_it_upconverts_from_deprecated_RefResolvers(self):
ref, schema = "someCoolRef", {"type": "integer"}
resolver = validators._RefResolver("", {}, store={ref: schema})
validator = self.Validator({"$ref": ref}, resolver=resolver)
with self.assertRaises(exceptions.ValidationError):
validator.validate(None)
def test_it_upconverts_from_yet_older_deprecated_legacy_RefResolvers(self):
"""
Legacy RefResolvers support only the context manager form of
resolution.
"""
class LegacyRefResolver:
@contextmanager
def resolving(this, ref):
self.assertEqual(ref, "the ref")
yield {"type": "integer"}
resolver = LegacyRefResolver()
schema = {"$ref": "the ref"}
with self.assertRaises(exceptions.ValidationError):
self.Validator(schema, resolver=resolver).validate(None)
| ValidatorTestMixin |
python | getsentry__sentry | src/sentry/integrations/example/integration.py | {
"start": 2667,
"end": 7214
} | class ____(RepositoryIntegration, SourceCodeIssueIntegration, IssueSyncIntegration):
comment_key = "sync_comments"
outbound_status_key = "sync_status_outbound"
inbound_status_key = "sync_status_inbound"
outbound_assignee_key = "sync_assignee_outbound"
inbound_assignee_key = "sync_assignee_inbound"
@property
def integration_name(self) -> str:
return "example"
def get_client(self):
pass
def get_issue_url(self, key) -> str:
return f"https://example/issues/{key}"
def create_comment(self, issue_id, user_id, group_note):
user = user_service.get_user(user_id)
assert user is not None
attribution = f"{user.name} wrote:\n\n"
return {
"id": "123456789",
"text": "{}<blockquote>{}</blockquote>".format(attribution, group_note.data["text"]),
}
def get_persisted_default_config_fields(self) -> list[str]:
return ["project", "issueType"]
def get_persisted_user_default_config_fields(self) -> list[str]:
return ["assignedTo", "reportedBy"]
def get_create_issue_config(self, group, user, **kwargs):
kwargs["link_referrer"] = "example_integration"
fields = super().get_create_issue_config(group, user, **kwargs)
default = self.get_project_defaults(group.project_id)
example_project_field = self.generate_example_project_field(default)
return fields + [example_project_field]
def generate_example_project_field(self, default_fields):
project_field = {
"name": "project",
"label": "Project",
"choices": [("1", "Project 1"), ("2", "Project 2")],
"type": "select",
}
default_project = default_fields.get("project")
if default_project is not None:
project_field["default"] = default_project
return project_field
def get_link_issue_config(self, group, **kwargs):
fields = super().get_link_issue_config(group, **kwargs)
default = self.get_project_defaults(group.project_id)
example_project_field = self.generate_example_project_field(default)
return fields + [example_project_field]
def create_issue(self, data, **kwargs):
if "assignee" not in data:
raise IntegrationError("Assignee is required")
return {
"key": "APP-123",
"title": "This is a test external issue title",
"description": "This is a test external issue description",
}
def get_issue(self, issue_id, **kwargs):
return {
"key": issue_id,
"title": "This is a test external issue title",
"description": "This is a test external issue description",
}
def get_repositories(
self, query: str | None = None, page_number_limit: int | None = None
) -> list[dict[str, Any]]:
return [{"name": "repo", "identifier": "user/repo"}]
def get_unmigratable_repositories(self):
return []
def sync_assignee_outbound(
self,
external_issue: ExternalIssue,
user: RpcUser | None,
assign: bool = True,
**kwargs: Any,
) -> None:
pass
def sync_status_outbound(
self, external_issue: ExternalIssue, is_resolved: bool, project_id: int
) -> None:
pass
def get_resolve_sync_action(self, data: Mapping[str, Any]) -> ResolveSyncAction:
category = data["status"]["category"]
return ResolveSyncAction.from_resolve_unresolve(
should_resolve=category == "done",
should_unresolve=category != "done",
)
def get_issue_display_name(self, external_issue) -> str:
return f"display name: {external_issue.key}"
def get_stacktrace_link(
self, repo: Repository, filepath: str, default: str, version: str | None
) -> str | None:
pass
def format_source_url(self, repo: Repository, filepath: str, branch: str | None) -> str:
return f"https://example.com/{repo.name}/blob/{branch}/{filepath}"
def source_url_matches(self, url: str) -> bool:
return True
def extract_branch_from_source_url(self, repo: Repository, url: str) -> str:
return ""
def extract_source_path_from_source_url(self, repo: Repository, url: str) -> str:
return ""
def has_repo_access(self, repo: RpcRepository) -> bool:
return False
def search_issues(self, query: str | None, **kwargs):
return []
| ExampleIntegration |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/property_subclasses/my_models.py | {
"start": 642,
"end": 1039
} | class ____(ndb.StringProperty):
def _validate(self, value):
if not isinstance(value, (int, long)):
raise TypeError("expected an integer, got %s" % repr(value))
def _to_base_type(self, value):
return str(value) # Doesn't matter if it's an int or a long
def _from_base_type(self, value):
return long(value) # Always return a long
| LongIntegerProperty |
python | sympy__sympy | sympy/stats/random_matrix.py | {
"start": 109,
"end": 1028
} | class ____(PSpace):
"""
Represents probability space for
random matrices. It contains the mechanics
for handling the API calls for random matrices.
"""
def __new__(cls, sym, model=None):
sym = _symbol_converter(sym)
if model:
return Basic.__new__(cls, sym, model)
else:
return Basic.__new__(cls, sym)
@property
def model(self):
try:
return self.args[1]
except IndexError:
return None
def compute_density(self, expr, *args):
rms = expr.atoms(RandomMatrixSymbol)
if len(rms) > 2 or (not isinstance(expr, RandomMatrixSymbol)):
raise NotImplementedError("Currently, no algorithm has been "
"implemented to handle general expressions containing "
"multiple random matrices.")
return self.model.density(expr)
| RandomMatrixPSpace |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/decoders.py | {
"start": 141,
"end": 793
} | class ____(nn.Module):
def __init__(self, stream_names: List[str], input_size: int, output_size: int = 1):
super().__init__()
self.stream_names = stream_names
_value_heads = {}
for name in stream_names:
value = linear_layer(input_size, output_size)
_value_heads[name] = value
self.value_heads = nn.ModuleDict(_value_heads)
def forward(self, hidden: torch.Tensor) -> Dict[str, torch.Tensor]:
value_outputs = {}
for stream_name, head in self.value_heads.items():
value_outputs[stream_name] = head(hidden).squeeze(-1)
return value_outputs
| ValueHeads |
python | prabhupant__python-ds | data_structures/bst/trim_bst.py | {
"start": 79,
"end": 469
} | class ____():
def __init__(self, val):
self.val = val
self.right = None
self.left = None
def trim(root, L, R):
if not root:
return None
if root.val > R:
return trim(root.left, L, R)
if root.val < L:
return trim(root.right, L, R)
root.left = trim(root.left, L, R)
root.right = trim(root.right, L, R)
return root
| Node |
python | huggingface__transformers | src/transformers/models/clvp/number_normalizer.py | {
"start": 813,
"end": 8932
} | class ____:
def __init__(self):
# List of (regular expression, replacement) pairs for abbreviations:
self._abbreviations = [
(re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1])
for x in [
("mrs", "misess"),
("mr", "mister"),
("dr", "doctor"),
("st", "saint"),
("co", "company"),
("jr", "junior"),
("maj", "major"),
("gen", "general"),
("drs", "doctors"),
("rev", "reverend"),
("lt", "lieutenant"),
("hon", "honorable"),
("sgt", "sergeant"),
("capt", "captain"),
("esq", "esquire"),
("ltd", "limited"),
("col", "colonel"),
("ft", "fort"),
]
]
self.ones = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
self.teens = [
"ten",
"eleven",
"twelve",
"thirteen",
"fourteen",
"fifteen",
"sixteen",
"seventeen",
"eighteen",
"nineteen",
]
self.tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
def number_to_words(self, num: int) -> str:
"""
Converts numbers(`int`) to words(`str`).
Please note that it only supports upto - "'nine hundred ninety-nine quadrillion, nine hundred ninety-nine
trillion, nine hundred ninety-nine billion, nine hundred ninety-nine million, nine hundred ninety-nine
thousand, nine hundred ninety-nine'" or `number_to_words(999_999_999_999_999_999)`.
"""
if num == 0:
return "zero"
elif num < 0:
return "minus " + self.number_to_words(abs(num))
elif num < 10:
return self.ones[num]
elif num < 20:
return self.teens[num - 10]
elif num < 100:
return self.tens[num // 10] + ("-" + self.number_to_words(num % 10) if num % 10 != 0 else "")
elif num < 1000:
return (
self.ones[num // 100] + " hundred" + (" " + self.number_to_words(num % 100) if num % 100 != 0 else "")
)
elif num < 1_000_000:
return (
self.number_to_words(num // 1000)
+ " thousand"
+ (", " + self.number_to_words(num % 1000) if num % 1000 != 0 else "")
)
elif num < 1_000_000_000:
return (
self.number_to_words(num // 1_000_000)
+ " million"
+ (", " + self.number_to_words(num % 1_000_000) if num % 1_000_000 != 0 else "")
)
elif num < 1_000_000_000_000:
return (
self.number_to_words(num // 1_000_000_000)
+ " billion"
+ (", " + self.number_to_words(num % 1_000_000_000) if num % 1_000_000_000 != 0 else "")
)
elif num < 1_000_000_000_000_000:
return (
self.number_to_words(num // 1_000_000_000_000)
+ " trillion"
+ (", " + self.number_to_words(num % 1_000_000_000_000) if num % 1_000_000_000_000 != 0 else "")
)
elif num < 1_000_000_000_000_000_000:
return (
self.number_to_words(num // 1_000_000_000_000_000)
+ " quadrillion"
+ (
", " + self.number_to_words(num % 1_000_000_000_000_000)
if num % 1_000_000_000_000_000 != 0
else ""
)
)
else:
return "number out of range"
def convert_to_ascii(self, text: str) -> str:
"""
Converts unicode to ascii
"""
return text.encode("ascii", "ignore").decode("utf-8")
def _expand_dollars(self, m: str) -> str:
"""
This method is used to expand numerical dollar values into spoken words.
"""
match = m.group(1)
parts = match.split(".")
if len(parts) > 2:
return match + " dollars" # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = "dollar" if dollars == 1 else "dollars"
cent_unit = "cent" if cents == 1 else "cents"
return "%s %s, %s %s" % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = "dollar" if dollars == 1 else "dollars"
return "%s %s" % (dollars, dollar_unit)
elif cents:
cent_unit = "cent" if cents == 1 else "cents"
return "%s %s" % (cents, cent_unit)
else:
return "zero dollars"
def _remove_commas(self, m: str) -> str:
"""
This method is used to remove commas from sentences.
"""
return m.group(1).replace(",", "")
def _expand_decimal_point(self, m: str) -> str:
"""
This method is used to expand '.' into spoken word ' point '.
"""
return m.group(1).replace(".", " point ")
def _expand_ordinal(self, num: str) -> str:
"""
This method is used to expand ordinals such as '1st', '2nd' into spoken words.
"""
ordinal_suffixes = {1: "st", 2: "nd", 3: "rd"}
num = int(num.group(0)[:-2])
if 10 <= num % 100 and num % 100 <= 20:
suffix = "th"
else:
suffix = ordinal_suffixes.get(num % 10, "th")
return self.number_to_words(num) + suffix
def _expand_number(self, m: str) -> str:
"""
This method acts as a preprocessing step for numbers between 1000 and 3000 (same as the original repository,
link :
https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/utils/tokenizer.py#L86)
"""
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return "two thousand"
elif num > 2000 and num < 2010:
return "two thousand " + self.number_to_words(num % 100)
elif num % 100 == 0:
return self.number_to_words(num // 100) + " hundred"
else:
return self.number_to_words(num)
else:
return self.number_to_words(num)
def normalize_numbers(self, text: str) -> str:
"""
This method is used to normalize numbers within a text such as converting the numbers to words, removing
commas, etc.
"""
text = re.sub(r"([0-9][0-9,]+[0-9])", self._remove_commas, text)
text = re.sub(r"£([0-9,]*[0-9])", r"\1 pounds", text)
text = re.sub(r"\$([0-9.,]*[0-9])", self._expand_dollars, text)
text = re.sub(r"([0-9]++\.[0-9]+)", self._expand_decimal_point, text)
text = re.sub(r"[0-9]++(st|nd|rd|th)", self._expand_ordinal, text)
text = re.sub(r"[0-9]+", self._expand_number, text)
return text
def expand_abbreviations(self, text: str) -> str:
"""
Expands the abbreviate words.
"""
for regex, replacement in self._abbreviations:
text = re.sub(regex, replacement, text)
return text
def collapse_whitespace(self, text: str) -> str:
"""
Removes multiple whitespaces
"""
return re.sub(re.compile(r"\s+"), " ", text)
def __call__(self, text):
"""
Converts text to ascii, numbers / number-like quantities to their spelt-out counterparts and expands
abbreviations
"""
text = self.convert_to_ascii(text)
text = text.lower()
text = self.normalize_numbers(text)
text = self.expand_abbreviations(text)
text = self.collapse_whitespace(text)
text = text.replace('"', "")
return text
| EnglishNormalizer |
python | doocs__leetcode | solution/2100-2199/2148.Count Elements With Strictly Smaller and Greater Elements/Solution.py | {
"start": 0,
"end": 153
} | class ____:
def countElements(self, nums: List[int]) -> int:
mi, mx = min(nums), max(nums)
return sum(mi < x < mx for x in nums)
| Solution |
python | qdrant__qdrant-client | tools/async_client_generator/transformers/fastembed/call_transformer.py | {
"start": 48,
"end": 569
} | class ____(ast.NodeTransformer):
def __init__(self, async_methods: Optional[list[str]] = None):
self.async_methods = async_methods if async_methods is not None else []
def visit_Call(self, node: ast.Call) -> Union[ast.AST, ast.Await]:
if isinstance(node.func, ast.Attribute):
if isinstance(node.func.value, ast.Name):
if node.func.attr in self.async_methods:
return ast.Await(value=node)
return self.generic_visit(node)
| FastembedCallTransformer |
python | TheAlgorithms__Python | other/graham_scan.py | {
"start": 495,
"end": 5430
} | class ____(Enum):
left = 1
straight = 2
right = 3
def __repr__(self):
return f"{self.__class__.__name__}.{self.name}"
def angle_comparer(point: tuple[int, int], minx: int, miny: int) -> float:
"""Return the angle toward to point from (minx, miny)
:param point: The target point
minx: The starting point's x
miny: The starting point's y
:return: the angle
Examples:
>>> angle_comparer((1,1), 0, 0)
45.0
>>> angle_comparer((100,1), 10, 10)
-5.710593137499642
>>> angle_comparer((5,5), 2, 3)
33.690067525979785
"""
# sort the points accorgind to the angle from the lowest and the most left point
x, y = point
return degrees(atan2(y - miny, x - minx))
def check_direction(
starting: tuple[int, int], via: tuple[int, int], target: tuple[int, int]
) -> Direction:
"""Return the direction toward to the line from via to target from starting
:param starting: The starting point
via: The via point
target: The target point
:return: the Direction
Examples:
>>> check_direction((1,1), (2,2), (3,3))
Direction.straight
>>> check_direction((60,1), (-50,199), (30,2))
Direction.left
>>> check_direction((0,0), (5,5), (10,0))
Direction.right
"""
x0, y0 = starting
x1, y1 = via
x2, y2 = target
via_angle = degrees(atan2(y1 - y0, x1 - x0))
via_angle %= 360
target_angle = degrees(atan2(y2 - y0, x2 - x0))
target_angle %= 360
# t-
# \ \
# \ v
# \|
# s
# via_angle is always lower than target_angle, if direction is left.
# If they are same, it means they are on a same line of convex hull.
if target_angle > via_angle:
return Direction.left
elif target_angle == via_angle:
return Direction.straight
else:
return Direction.right
def graham_scan(points: list[tuple[int, int]]) -> list[tuple[int, int]]:
"""Pure implementation of graham scan algorithm in Python
:param points: The unique points on coordinates.
:return: The points on convex hell.
Examples:
>>> graham_scan([(9, 6), (3, 1), (0, 0), (5, 5), (5, 2), (7, 0), (3, 3), (1, 4)])
[(0, 0), (7, 0), (9, 6), (5, 5), (1, 4)]
>>> graham_scan([(0, 0), (1, 0), (1, 1), (0, 1)])
[(0, 0), (1, 0), (1, 1), (0, 1)]
>>> graham_scan([(0, 0), (1, 1), (2, 2), (3, 3), (-1, 2)])
[(0, 0), (1, 1), (2, 2), (3, 3), (-1, 2)]
>>> graham_scan([(-100, 20), (99, 3), (1, 10000001), (5133186, -25), (-66, -4)])
[(5133186, -25), (1, 10000001), (-100, 20), (-66, -4)]
"""
if len(points) <= 2:
# There is no convex hull
raise ValueError("graham_scan: argument must contain more than 3 points.")
if len(points) == 3:
return points
# find the lowest and the most left point
minidx = 0
miny, minx = maxsize, maxsize
for i, point in enumerate(points):
x = point[0]
y = point[1]
if y < miny:
miny = y
minx = x
minidx = i
if y == miny and x < minx:
minx = x
minidx = i
# remove the lowest and the most left point from points for preparing for sort
points.pop(minidx)
sorted_points = sorted(points, key=lambda point: angle_comparer(point, minx, miny))
# This insert actually costs complexity,
# and you should instead add (minx, miny) into stack later.
# I'm using insert just for easy understanding.
sorted_points.insert(0, (minx, miny))
stack: deque[tuple[int, int]] = deque()
stack.append(sorted_points[0])
stack.append(sorted_points[1])
stack.append(sorted_points[2])
# The first 3 points lines are towards the left because we sort them by their angle
# from minx, miny.
current_direction = Direction.left
for i in range(3, len(sorted_points)):
while True:
starting = stack[-2]
via = stack[-1]
target = sorted_points[i]
next_direction = check_direction(starting, via, target)
if next_direction == Direction.left:
current_direction = Direction.left
break
if next_direction == Direction.straight:
if current_direction == Direction.left:
# We keep current_direction as left.
# Because if the straight line keeps as straight,
# we want to know if this straight line is towards left.
break
elif current_direction == Direction.right:
# If the straight line is towards right,
# every previous points on that straight line is not convex hull.
stack.pop()
if next_direction == Direction.right:
stack.pop()
stack.append(sorted_points[i])
return list(stack)
| Direction |
python | doocs__leetcode | solution/2700-2799/2749.Minimum Operations to Make the Integer Zero/Solution.py | {
"start": 0,
"end": 265
} | class ____:
def makeTheIntegerZero(self, num1: int, num2: int) -> int:
for k in count(1):
x = num1 - k * num2
if x < 0:
break
if x.bit_count() <= k <= x:
return k
return -1
| Solution |
python | explosion__spaCy | spacy/lang/sk/__init__.py | {
"start": 216,
"end": 309
} | class ____(Language):
lang = "sk"
Defaults = SlovakDefaults
__all__ = ["Slovak"]
| Slovak |
python | PrefectHQ__prefect | src/prefect/server/events/schemas/events.py | {
"start": 11095,
"end": 11561
} | class ____(PrefectBaseModel):
"""A single page of events returned from the API, with an optional link to the
next page of results"""
events: List[ReceivedEvent] = Field(
..., description="The Events matching the query"
)
total: int = Field(..., description="The total number of matching Events")
next_page: Optional[AnyHttpUrl] = Field(
..., description="The URL for the next page of results, if there are more"
)
| EventPage |
python | kamyu104__LeetCode-Solutions | Python/single-number-ii.py | {
"start": 627,
"end": 858
} | class ____(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return (collections.Counter(list(set(nums)) * 3) - collections.Counter(nums)).keys()[0]
| Solution3 |
python | openai__openai-python | src/openai/cli/_api/fine_tuning/jobs.py | {
"start": 3106,
"end": 3312
} | class ____(BaseModel):
model: str
training_file: str
hyperparameters: Omittable[str] = omit
suffix: Omittable[str] = omit
validation_file: Omittable[str] = omit
| CLIFineTuningJobsCreateArgs |
python | Lightning-AI__lightning | tests/tests_pytorch/trainer/connectors/test_callback_connector.py | {
"start": 9686,
"end": 12336
} | class ____(Callback):
"""A callback in another library that gets registered through entry points."""
pass
def test_configure_external_callbacks():
"""Test that the connector collects Callback instances from factories registered through entry points."""
def factory_no_callback():
return []
def factory_one_callback():
return ExternalCallback()
def factory_one_callback_list():
return [ExternalCallback()]
def factory_multiple_callbacks_list():
return [ExternalCallback(), ExternalCallback()]
with _make_entry_point_query_mock(factory_no_callback):
trainer = Trainer(enable_checkpointing=False, enable_progress_bar=False, enable_model_summary=False)
assert trainer.callbacks == []
with _make_entry_point_query_mock(factory_one_callback):
trainer = Trainer(enable_checkpointing=False, enable_progress_bar=False, enable_model_summary=False)
assert isinstance(trainer.callbacks[0], ExternalCallback)
with _make_entry_point_query_mock(factory_one_callback_list):
trainer = Trainer(enable_checkpointing=False, enable_progress_bar=False, enable_model_summary=False)
assert isinstance(trainer.callbacks[0], ExternalCallback)
with _make_entry_point_query_mock(factory_multiple_callbacks_list):
trainer = Trainer(enable_checkpointing=False, enable_progress_bar=False, enable_model_summary=False)
assert isinstance(trainer.callbacks[0], ExternalCallback)
assert isinstance(trainer.callbacks[1], ExternalCallback)
@contextlib.contextmanager
def _make_entry_point_query_mock(callback_factory):
query_mock = MagicMock()
entry_point = Mock()
entry_point.name = "mocked"
entry_point.load.return_value = callback_factory
if _PYTHON_GREATER_EQUAL_3_10_0:
query_mock.return_value = [entry_point]
else:
query_mock().get.return_value = [entry_point]
with mock.patch("lightning.fabric.utilities.registry.entry_points", query_mock):
yield
def test_validate_unique_callback_state_key():
"""Test that we raise an error if the state keys collide, leading to missing state in the checkpoint."""
class MockCallback(Callback):
@property
def state_key(self):
return "same_key"
def state_dict(self):
# pretend these callbacks are stateful by overriding the `state_dict` hook
return {"state": 1}
with pytest.raises(RuntimeError, match="Found more than one stateful callback of type `MockCallback`"):
Trainer(callbacks=[MockCallback(), MockCallback()])
# Test with single stateful callback
| ExternalCallback |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 155352,
"end": 161204
} | class ____(Response):
"""
Response of tasks.delete endpoint.
:param deleted: Indicates whether the task was deleted
:type deleted: bool
:param updated_children: Number of child tasks whose parent property was
updated
:type updated_children: int
:param updated_models: Number of models whose task property was updated
:type updated_models: int
:param frames: Response from frames.rollback
:type frames: dict
:param events: Response from events.delete_for_task
:type events: dict
:param urls: The urls of the files that were uploaded by this task. Returned if
the 'return_file_urls' was set to 'true'
:type urls: TaskUrls
"""
_service = "tasks"
_action = "delete"
_version = "2.13"
_schema = {
"definitions": {
"task_urls": {
"properties": {
"artifact_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"event_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"model_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
},
"properties": {
"deleted": {
"description": "Indicates whether the task was deleted",
"type": ["boolean", "null"],
},
"events": {
"additionalProperties": True,
"description": "Response from events.delete_for_task",
"type": ["object", "null"],
},
"frames": {
"additionalProperties": True,
"description": "Response from frames.rollback",
"type": ["object", "null"],
},
"updated_children": {
"description": "Number of child tasks whose parent property was updated",
"type": ["integer", "null"],
},
"updated_models": {
"description": "Number of models whose task property was updated",
"type": ["integer", "null"],
},
"urls": {
"description": "The urls of the files that were uploaded by this task. Returned if the 'return_file_urls' was set to 'true'",
"oneOf": [{"$ref": "#/definitions/task_urls"}, {"type": "null"}],
},
},
"type": "object",
}
def __init__(
self,
deleted: Optional[bool] = None,
updated_children: Optional[int] = None,
updated_models: Optional[int] = None,
frames: Optional[dict] = None,
events: Optional[dict] = None,
urls: Any = None,
**kwargs: Any
) -> None:
super(DeleteResponse, self).__init__(**kwargs)
self.deleted = deleted
self.updated_children = updated_children
self.updated_models = updated_models
self.frames = frames
self.events = events
self.urls = urls
@schema_property("deleted")
def deleted(self) -> Optional[bool]:
return self._property_deleted
@deleted.setter
def deleted(self, value: Optional[bool]) -> None:
if value is None:
self._property_deleted = None
return
self.assert_isinstance(value, "deleted", (bool,))
self._property_deleted = value
@schema_property("updated_children")
def updated_children(self) -> Optional[int]:
return self._property_updated_children
@updated_children.setter
def updated_children(self, value: Optional[int]) -> None:
if value is None:
self._property_updated_children = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated_children", six.integer_types)
self._property_updated_children = value
@schema_property("updated_models")
def updated_models(self) -> Optional[int]:
return self._property_updated_models
@updated_models.setter
def updated_models(self, value: Optional[int]) -> None:
if value is None:
self._property_updated_models = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated_models", six.integer_types)
self._property_updated_models = value
@schema_property("frames")
def frames(self) -> Optional[dict]:
return self._property_frames
@frames.setter
def frames(self, value: Optional[dict]) -> None:
if value is None:
self._property_frames = None
return
self.assert_isinstance(value, "frames", (dict,))
self._property_frames = value
@schema_property("events")
def events(self) -> Optional[dict]:
return self._property_events
@events.setter
def events(self, value: Optional[dict]) -> None:
if value is None:
self._property_events = None
return
self.assert_isinstance(value, "events", (dict,))
self._property_events = value
@schema_property("urls")
def urls(self) -> Any:
return self._property_urls
@urls.setter
def urls(self, value: Any) -> None:
if value is None:
self._property_urls = None
return
if isinstance(value, dict):
value = TaskUrls.from_dict(value)
else:
self.assert_isinstance(value, "urls", TaskUrls)
self._property_urls = value
| DeleteResponse |
python | ApeWorX__ape | src/ape_test/provider.py | {
"start": 21631,
"end": 22623
} | class ____(TransactionTrace):
@cached_property
def return_value(self) -> Any:
# perf: skip trying anything else, because eth-tester doesn't
# yet implement any tracing RPCs.
init_kwargs = self._get_tx_calltree_kwargs()
receipt = self.chain_manager.get_receipt(self.transaction_hash)
init_kwargs["gas_cost"] = receipt.gas_used
if not (abi := self.root_method_abi):
return (None,)
num_return = len(self.root_method_abi.outputs)
# Figure out the 'returndata' using 'eth_call' RPC.
tx = receipt.transaction.model_copy(update={"nonce": None})
try:
returndata = self.provider.send_call(tx, block_id=receipt.block_number)
except ContractLogicError:
# Unable to get the return value because even as a call, it fails.
return tuple([None for _ in range(num_return)])
return self._ecosystem.decode_returndata(abi, returndata)
| EthTesterTransactionTrace |
python | google__jax | jax/experimental/mosaic/gpu/utils.py | {
"start": 42398,
"end": 46053
} | class ____:
ptr: ir.Value
def signal(
self,
value: ir.Value | int,
predicate: ir.Value | None = None,
relaxed: bool = False,
):
i32 = ir.IntegerType.get_signless(32)
if not isinstance(value, ir.Value):
value = c(value, i32)
elif value.type != i32:
raise ValueError(f"Expected a i32 value, got {value.type}")
if predicate is None:
predicate = single_thread_predicate(ThreadSubset.WARPGROUP)
semantics = "relaxed" if relaxed else "release"
llvm.inline_asm(
i32,
[self.ptr, value, predicate],
f"@$3 atom.add.{semantics}.sys.global.u32 $0, [$1], $2;",
"=r,l,r,b",
has_side_effects=True,
)
@staticmethod
def signal_multimem(ptr, value, predicate: ir.Value | None = None):
i32 = ir.IntegerType.get_signless(32)
if not isinstance(value, ir.Value):
value = c(value, i32)
elif value.type != i32:
raise ValueError(f"Expected a i32 value, got {value.type}")
if predicate is None:
predicate = single_thread_predicate(ThreadSubset.WARPGROUP)
llvm.inline_asm(
ir.Type.parse("!llvm.void"),
[ptr, value, predicate],
"""{
@$2 multimem.red.release.sys.global.add.u32 [$0], $1;
fence.proxy.alias;
}
""",
"l,r,b",
has_side_effects=True,
)
def wait(
self,
value: ir.Value | int = 1,
*,
decrement: bool = True,
scope: ThreadSubset = ThreadSubset.WARPGROUP,
):
i32 = ir.IntegerType.get_signless(32)
if not isinstance(value, ir.Value):
value = c(value, i32)
elif value.type != i32:
raise ValueError(f"Expected a i32 value, got {value.type}")
with single_thread(scope=scope):
# Create the while loop for busy waiting
while_op = scf.WhileOp([i32], [value])
before_block = while_op.before.blocks.append(i32)
with ir.InsertionPoint.at_block_begin(before_block):
[expected_in_memory] = before_block.arguments
if decrement:
new_val = arith.subi(expected_in_memory, value)
in_memory = llvm.inline_asm(
i32,
[self.ptr, expected_in_memory, new_val],
"atom.acquire.sys.global.cas.b32 $0, [$1], $2, $3;",
"=r,l,r,r",
has_side_effects=True,
)
ne_pred = arith.CmpIPredicate.ne
comparison = arith.cmpi(ne_pred, in_memory, expected_in_memory)
new_expected_in_memory = arith.maxui(in_memory, value)
else:
in_memory = llvm.inline_asm(
i32,
[self.ptr],
"ld.relaxed.sys.global.b32 $0, [$1];",
"=r,l",
has_side_effects=True,
)
lt_pred = arith.CmpIPredicate.ult
comparison = arith.cmpi(lt_pred, in_memory, value)
new_expected_in_memory = expected_in_memory
scf.condition(comparison, [new_expected_in_memory])
after_block = while_op.after.blocks.append(i32)
with ir.InsertionPoint.at_block_begin(after_block):
scf.yield_(after_block.arguments)
llvm.inline_asm(
ir.Type.parse("!llvm.void"),
[],
"fence.acquire.sys;",
"",
has_side_effects=True,
)
if scope == ThreadSubset.WARPGROUP:
warpgroup_barrier()
elif scope == ThreadSubset.WARP:
warp_barrier()
else:
raise ValueError(f"Unsupported scope: {scope}")
def fence_release_sys():
llvm.inline_asm(
ir.Type.parse("!llvm.void"),
[],
"fence.release.sys;",
"",
has_side_effects=True,
)
| SemaphoreRef |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_doc_integration_avatar.py | {
"start": 2938,
"end": 6969
} | class ____(DocIntegrationAvatarTest):
method = "PUT"
def test_user_upload_avatar(self) -> None:
"""
Tests that regular users cannot upload doc integration avatars
"""
self.login_as(user=self.user)
self.get_error_response(self.published_doc.slug, status_code=status.HTTP_403_FORBIDDEN)
self.get_error_response(self.draft_doc.slug, status_code=status.HTTP_403_FORBIDDEN)
# TODO(schew2381): Change test to check that superusers cannot upload doc integration avatars
def test_superuser_upload_avatar(self) -> None:
"""
Tests that superusers can upload avatars
"""
with self.options(
{
"filestore.control.backend": options_store.get("filestore.backend"),
"filestore.control.options": options_store.get("filestore.options"),
}
):
self.login_as(user=self.superuser, superuser=True)
with (
assume_test_silo_mode(SiloMode.CONTROL),
override_settings(SILO_MODE=SiloMode.CONTROL),
):
for doc in [self.published_doc, self.draft_doc]:
prev_avatar = doc.avatar.get()
response = self.get_success_response(
doc.slug, status_code=status.HTTP_200_OK, **self.avatar_payload
)
assert serialize(doc) == response.data
assert serialize(doc.avatar.get()) == response.data["avatar"]
assert serialize(prev_avatar) != response.data["avatar"]
assert prev_avatar.control_file_id != doc.avatar.get().control_file_id
def test_staff_upload_avatar(self) -> None:
"""
Tests that superusers can upload avatars
"""
with self.options(
{
"filestore.control.backend": options_store.get("filestore.backend"),
"filestore.control.options": options_store.get("filestore.options"),
}
):
self.login_as(user=self.staff_user, staff=True)
with (
assume_test_silo_mode(SiloMode.CONTROL),
override_settings(SILO_MODE=SiloMode.CONTROL),
):
for doc in [self.published_doc, self.draft_doc]:
prev_avatar = doc.avatar.get()
response = self.get_success_response(
doc.slug, status_code=status.HTTP_200_OK, **self.avatar_payload
)
assert serialize(doc) == response.data
assert serialize(doc.avatar.get()) == response.data["avatar"]
assert serialize(prev_avatar) != response.data["avatar"]
assert prev_avatar.control_file_id != doc.avatar.get().control_file_id
def test_upload_avatar_payload_structure(self) -> None:
"""
Tests that errors are thrown on malformed upload payloads
"""
self.login_as(user=self.staff_user, staff=True)
# Structured as 'error-description' : (malformed-payload, erroring-fields)
invalid_payloads: dict[str, tuple[dict[str, Any], list[str]]] = {
"empty_payload": ({}, ["avatar_photo", "avatar_type"]),
"missing_avatar_photo": (
{"avatar_type": self.avatar_payload["avatar_type"]},
["avatar_photo"],
),
"missing_avatar_type": (
{"avatar_photo": self.avatar_payload["avatar_photo"]},
["avatar_type"],
),
"invalid_avatar_type": ({**self.avatar_payload, "avatar_type": 1}, ["avatar_type"]),
}
for payload, fields in invalid_payloads.values():
response = self.get_error_response(
self.draft_doc.slug, status_code=status.HTTP_400_BAD_REQUEST, **payload
)
for field in fields:
assert field in response.data.keys()
| PutDocIntegrationAvatarTest |
python | huggingface__transformers | src/transformers/models/gemma3n/modular_gemma3n.py | {
"start": 74627,
"end": 74707
} | class ____(Gemma3TextScaledWordEmbedding):
pass
| Gemma3nTextScaledWordEmbedding |
python | astral-sh__uv | crates/uv-python/fetch-download-metadata.py | {
"start": 3518,
"end": 4173
} | class ____(NamedTuple):
major: int
minor: int
patch: int
prerelease: str = ""
@classmethod
def from_str(cls, version: str) -> Self:
major, minor, patch = version.split(".", 3)
prerelease = ""
for prerelease_kind in ("a", "b", "rc"):
parts = patch.split(prerelease_kind, 1)
if len(parts) == 2:
patch = parts[0]
prerelease = prerelease_kind + parts[1]
break
return cls(int(major), int(minor), int(patch), prerelease)
def __str__(self) -> str:
return f"{self.major}.{self.minor}.{self.patch}{self.prerelease}"
| Version |
python | davidhalter__jedi | jedi/inference/filters.py | {
"start": 7370,
"end": 7931
} | class ____(_AbstractUsedNamesFilter):
def get(self, name):
try:
names = self._used_names[name]
except KeyError:
return []
return self._convert_names(self._filter(names))
@to_list
def _filter(self, names):
for name in names:
if name.parent.type == 'global_stmt':
yield name
def values(self):
return self._convert_names(
name for name_list in self._used_names.values()
for name in self._filter(name_list)
)
| GlobalNameFilter |
python | sympy__sympy | sympy/printing/glsl.py | {
"start": 629,
"end": 20310
} | class ____(CodePrinter):
"""
Rudimentary, generic GLSL printing tools.
Additional settings:
'use_operators': Boolean (should the printer use operators for +,-,*, or functions?)
"""
_not_supported: set[Basic] = set()
printmethod = "_glsl"
language = "GLSL"
_default_settings = dict(CodePrinter._default_settings, **{
'use_operators': True,
'zero': 0,
'mat_nested': False,
'mat_separator': ',\n',
'mat_transpose': False,
'array_type': 'float',
'glsl_types': True,
'precision': 9,
'user_functions': {},
'contract': True,
})
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "// {}".format(text)
def _declare_number_const(self, name, value):
return "float {} = {};".format(name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, str):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [line.lstrip(' \t') for line in code]
increase = [int(any(map(line.endswith, inc_token))) for line in code]
decrease = [int(any(map(line.startswith, dec_token))) for line in code]
pretty = []
level = 0
for n, line in enumerate(code):
if line in ('', '\n'):
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def _print_MatrixBase(self, mat):
mat_separator = self._settings['mat_separator']
mat_transpose = self._settings['mat_transpose']
column_vector = (mat.rows == 1) if mat_transpose else (mat.cols == 1)
A = mat.transpose() if mat_transpose != column_vector else mat
glsl_types = self._settings['glsl_types']
array_type = self._settings['array_type']
array_size = A.cols*A.rows
array_constructor = "{}[{}]".format(array_type, array_size)
if A.cols == 1:
return self._print(A[0])
if A.rows <= 4 and A.cols <= 4 and glsl_types:
if A.rows == 1:
return "vec{}{}".format(
A.cols, A.table(self,rowstart='(',rowend=')')
)
elif A.rows == A.cols:
return "mat{}({})".format(
A.rows, A.table(self,rowsep=', ',
rowstart='',rowend='')
)
else:
return "mat{}x{}({})".format(
A.cols, A.rows,
A.table(self,rowsep=', ',
rowstart='',rowend='')
)
elif S.One in A.shape:
return "{}({})".format(
array_constructor,
A.table(self,rowsep=mat_separator,rowstart='',rowend='')
)
elif not self._settings['mat_nested']:
return "{}(\n{}\n) /* a {}x{} matrix */".format(
array_constructor,
A.table(self,rowsep=mat_separator,rowstart='',rowend=''),
A.rows, A.cols
)
elif self._settings['mat_nested']:
return "{}[{}][{}](\n{}\n)".format(
array_type, A.rows, A.cols,
A.table(self,rowsep=mat_separator,rowstart='float[](',rowend=')')
)
def _print_SparseRepMatrix(self, mat):
# do not allow sparse matrices to be made dense
return self._print_not_supported(mat)
def _traverse_matrix_indices(self, mat):
mat_transpose = self._settings['mat_transpose']
if mat_transpose:
rows,cols = mat.shape
else:
cols,rows = mat.shape
return ((i, j) for i in range(cols) for j in range(rows))
def _print_MatrixElement(self, expr):
# print('begin _print_MatrixElement')
nest = self._settings['mat_nested']
glsl_types = self._settings['glsl_types']
mat_transpose = self._settings['mat_transpose']
if mat_transpose:
cols,rows = expr.parent.shape
i,j = expr.j,expr.i
else:
rows,cols = expr.parent.shape
i,j = expr.i,expr.j
pnt = self._print(expr.parent)
if glsl_types and ((rows <= 4 and cols <=4) or nest):
return "{}[{}][{}]".format(pnt, i, j)
else:
return "{}[{}]".format(pnt, i + j*rows)
def _print_list(self, expr):
l = ', '.join(self._print(item) for item in expr)
glsl_types = self._settings['glsl_types']
array_type = self._settings['array_type']
array_size = len(expr)
array_constructor = '{}[{}]'.format(array_type, array_size)
if array_size <= 4 and glsl_types:
return 'vec{}({})'.format(array_size, l)
else:
return '{}({})'.format(array_constructor, l)
_print_tuple = _print_list
_print_Tuple = _print_list
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
loopstart = "for (int %(varble)s=%(start)s; %(varble)s<%(end)s; %(varble)s++){"
for i in indices:
# GLSL arrays start at 0 and end at dimension-1
open_lines.append(loopstart % {
'varble': self._print(i.label),
'start': self._print(i.lower),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
def _print_Function_with_args(self, func, func_args):
if func in self.known_functions:
cond_func = self.known_functions[func]
func = None
if isinstance(cond_func, str):
func = cond_func
else:
for cond, func in cond_func:
if cond(func_args):
break
if func is not None:
try:
return func(*[self.parenthesize(item, 0) for item in func_args])
except TypeError:
return '{}({})'.format(func, self.stringify(func_args, ", "))
elif isinstance(func, Lambda):
# inlined function
return self._print(func(*func_args))
else:
return self._print_not_supported(func)
def _print_Piecewise(self, expr):
from sympy.codegen.ast import Assignment
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if expr.has(Assignment):
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s) {" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else {")
else:
lines.append("else if (%s) {" % self._print(c))
code0 = self._print(e)
lines.append(code0)
lines.append("}")
return "\n".join(lines)
else:
# The piecewise was used in an expression, need to do inline
# operators. This has the downside that inline operators will
# not work for statements that span multiple lines (Matrix or
# Indexed expressions).
ecpairs = ["((%s) ? (\n%s\n)\n" % (self._print(c),
self._print(e))
for e, c in expr.args[:-1]]
last_line = ": (\n%s\n)" % self._print(expr.args[-1].expr)
return ": ".join(ecpairs) + last_line + " ".join([")"*len(ecpairs)])
def _print_Indexed(self, expr):
# calculate index for 1d array
dims = expr.shape
elem = S.Zero
offset = S.One
for i in reversed(range(expr.rank)):
elem += expr.indices[i]*offset
offset *= dims[i]
return "{}[{}]".format(
self._print(expr.base.label),
self._print(elem)
)
def _print_Pow(self, expr):
PREC = precedence(expr)
if equal_valued(expr.exp, -1):
return '1.0/%s' % (self.parenthesize(expr.base, PREC))
elif equal_valued(expr.exp, 0.5):
return 'sqrt(%s)' % self._print(expr.base)
else:
try:
e = self._print(float(expr.exp))
except TypeError:
e = self._print(expr.exp)
return self._print_Function_with_args('pow', (
self._print(expr.base),
e
))
def _print_int(self, expr):
return str(float(expr))
def _print_Rational(self, expr):
return "{}.0/{}.0".format(expr.p, expr.q)
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
return "{} {} {}".format(lhs_code, op, rhs_code)
def _print_Add(self, expr, order=None):
if self._settings['use_operators']:
return CodePrinter._print_Add(self, expr, order=order)
terms = expr.as_ordered_terms()
def partition(p,l):
return reduce(lambda x, y: (x[0]+[y], x[1]) if p(y) else (x[0], x[1]+[y]), l, ([], []))
def add(a,b):
return self._print_Function_with_args('add', (a, b))
# return self.known_functions['add']+'(%s, %s)' % (a,b)
neg, pos = partition(lambda arg: arg.could_extract_minus_sign(), terms)
if pos:
s = pos = reduce(lambda a,b: add(a,b), (self._print(t) for t in pos))
else:
s = pos = self._print(self._settings['zero'])
if neg:
# sum the absolute values of the negative terms
neg = reduce(lambda a,b: add(a,b), (self._print(-n) for n in neg))
# then subtract them from the positive terms
s = self._print_Function_with_args('sub', (pos,neg))
# s = self.known_functions['sub']+'(%s, %s)' % (pos,neg)
return s
def _print_Mul(self, expr, **kwargs):
if self._settings['use_operators']:
return CodePrinter._print_Mul(self, expr, **kwargs)
terms = expr.as_ordered_factors()
def mul(a,b):
# return self.known_functions['mul']+'(%s, %s)' % (a,b)
return self._print_Function_with_args('mul', (a,b))
s = reduce(lambda a,b: mul(a,b), (self._print(t) for t in terms))
return s
def glsl_code(expr,assign_to=None,**settings):
"""Converts an expr to a string of GLSL code
Parameters
==========
expr : Expr
A SymPy expression to be converted.
assign_to : optional
When given, the argument is used for naming the variable or variables
to which the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol`` or ``Indexed`` type object. In cases where ``expr``
would be printed as an array, a list of string or ``Symbol`` objects
can also be passed.
This is helpful in case of line-wrapping, or for expressions that
generate multi-line statements. It can also be used to spread an array-like
expression into multiple assignments.
use_operators: bool, optional
If set to False, then *,/,+,- operators will be replaced with functions
mul, add, and sub, which must be implemented by the user, e.g. for
implementing non-standard rings or emulated quad/octal precision.
[default=True]
glsl_types: bool, optional
Set this argument to ``False`` in order to avoid using the ``vec`` and ``mat``
types. The printer will instead use arrays (or nested arrays).
[default=True]
mat_nested: bool, optional
GLSL version 4.3 and above support nested arrays (arrays of arrays). Set this to ``True``
to render matrices as nested arrays.
[default=False]
mat_separator: str, optional
By default, matrices are rendered with newlines using this separator,
making them easier to read, but less compact. By removing the newline
this option can be used to make them more vertically compact.
[default=',\n']
mat_transpose: bool, optional
GLSL's matrix multiplication implementation assumes column-major indexing.
By default, this printer ignores that convention. Setting this option to
``True`` transposes all matrix output.
[default=False]
array_type: str, optional
The GLSL array constructor type.
[default='float']
precision : integer, optional
The precision for numbers such as pi [default=15].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, js_function_string)]. See
below for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
Examples
========
>>> from sympy import glsl_code, symbols, Rational, sin, ceiling, Abs
>>> x, tau = symbols("x, tau")
>>> glsl_code((2*tau)**Rational(7, 2))
'8*sqrt(2)*pow(tau, 3.5)'
>>> glsl_code(sin(x), assign_to="float y")
'float y = sin(x);'
Various GLSL types are supported:
>>> from sympy import Matrix, glsl_code
>>> glsl_code(Matrix([1,2,3]))
'vec3(1, 2, 3)'
>>> glsl_code(Matrix([[1, 2],[3, 4]]))
'mat2(1, 2, 3, 4)'
Pass ``mat_transpose = True`` to switch to column-major indexing:
>>> glsl_code(Matrix([[1, 2],[3, 4]]), mat_transpose = True)
'mat2(1, 3, 2, 4)'
By default, larger matrices get collapsed into float arrays:
>>> print(glsl_code( Matrix([[1,2,3,4,5],[6,7,8,9,10]]) ))
float[10](
1, 2, 3, 4, 5,
6, 7, 8, 9, 10
) /* a 2x5 matrix */
The type of array constructor used to print GLSL arrays can be controlled
via the ``array_type`` parameter:
>>> glsl_code(Matrix([1,2,3,4,5]), array_type='int')
'int[5](1, 2, 3, 4, 5)'
Passing a list of strings or ``symbols`` to the ``assign_to`` parameter will yield
a multi-line assignment for each item in an array-like expression:
>>> x_struct_members = symbols('x.a x.b x.c x.d')
>>> print(glsl_code(Matrix([1,2,3,4]), assign_to=x_struct_members))
x.a = 1;
x.b = 2;
x.c = 3;
x.d = 4;
This could be useful in cases where it's desirable to modify members of a
GLSL ``Struct``. It could also be used to spread items from an array-like
expression into various miscellaneous assignments:
>>> misc_assignments = ('x[0]', 'x[1]', 'float y', 'float z')
>>> print(glsl_code(Matrix([1,2,3,4]), assign_to=misc_assignments))
x[0] = 1;
x[1] = 2;
float y = 3;
float z = 4;
Passing ``mat_nested = True`` instead prints out nested float arrays, which are
supported in GLSL 4.3 and above.
>>> mat = Matrix([
... [ 0, 1, 2],
... [ 3, 4, 5],
... [ 6, 7, 8],
... [ 9, 10, 11],
... [12, 13, 14]])
>>> print(glsl_code( mat, mat_nested = True ))
float[5][3](
float[]( 0, 1, 2),
float[]( 3, 4, 5),
float[]( 6, 7, 8),
float[]( 9, 10, 11),
float[](12, 13, 14)
)
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
js_function_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs"),
... (lambda x: x.is_integer, "ABS")]
... }
>>> glsl_code(Abs(x) + ceiling(x), user_functions=custom_functions)
'fabs(x) + CEIL(x)'
If further control is needed, addition, subtraction, multiplication and
division operators can be replaced with ``add``, ``sub``, and ``mul``
functions. This is done by passing ``use_operators = False``:
>>> x,y,z = symbols('x,y,z')
>>> glsl_code(x*(y+z), use_operators = False)
'mul(x, add(y, z))'
>>> glsl_code(x*(y+z*(x-y)**z), use_operators = False)
'mul(x, add(y, mul(z, pow(sub(x, y), z))))'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(glsl_code(expr, tau))
if (x > 0) {
tau = x + 1;
}
else {
tau = x;
}
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> glsl_code(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);'
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(glsl_code(mat, A))
A[0][0] = pow(x, 2.0);
if (x > 0) {
A[1][0] = x + 1;
}
else {
A[1][0] = x;
}
A[2][0] = sin(x);
"""
return GLSLPrinter(settings).doprint(expr,assign_to)
def print_glsl(expr, **settings):
"""Prints the GLSL representation of the given expression.
See GLSLPrinter init function for settings.
"""
print(glsl_code(expr, **settings))
| GLSLPrinter |
python | mlflow__mlflow | mlflow/gemini/autolog.py | {
"start": 1690,
"end": 8973
} | class ____:
"""Context manager for handling MLflow spans in both sync and async contexts."""
def __init__(self, original, instance, args, kwargs):
self.original = original
self.instance = instance
self.inputs = construct_full_inputs(original, instance, *args, **kwargs)
# These attributes are set outside the constructor.
self.span = None
self.token = None
self.output = None
def __enter__(self):
return self._enter_impl()
def __exit__(self, exc_type, exc_val, exc_tb):
self._exit_impl(exc_type, exc_val, exc_tb)
async def __aenter__(self):
return self._enter_impl()
async def __aexit__(self, exc_type, exc_val, exc_tb):
self._exit_impl(exc_type, exc_val, exc_tb)
def _enter_impl(self):
config = AutoLoggingConfig.init(flavor_name=mlflow.gemini.FLAVOR_NAME)
if not config.log_traces:
return self
self.span = mlflow.start_span_no_context(
name=f"{self.instance.__class__.__name__}.{self.original.__name__}",
span_type=_get_span_type(self.original.__name__),
inputs=self.inputs,
attributes={SpanAttributeKey.MESSAGE_FORMAT: "gemini"},
)
if has_generativeai and isinstance(self.instance, generativeai.GenerativeModel):
_log_generativeai_tool_definition(self.instance, self.span)
if _is_genai_model_or_chat(self.instance):
_log_genai_tool_definition(self.instance, self.inputs, self.span)
# Attach the span to the current context. This is necessary because single Gemini
# SDK call might create multiple child spans.
self.token = set_span_in_context(self.span)
return self
def _exit_impl(self, exc_type, exc_val, exc_tb) -> None:
if not self.span:
return
# Detach span from the context at first. This must not be interrupted by any exception,
# otherwise the span context will leak and pollute other traces created next.
detach_span_from_context(self.token)
if exc_val:
self.span.record_exception(exc_val)
try:
if usage := _parse_usage(self.output):
self.span.set_attribute(SpanAttributeKey.CHAT_USAGE, usage)
except Exception as e:
_logger.warning(
f"Failed to extract token usage for span {self.span.name}: {e}", exc_info=True
)
# need to convert the response of generate_content for better visualization
outputs = self.output.to_dict() if hasattr(self.output, "to_dict") else self.output
self.span.end(outputs=outputs)
def _is_genai_model_or_chat(instance) -> bool:
return has_genai and isinstance(
instance,
(
genai.models.Models,
genai.chats.Chat,
genai.models.AsyncModels,
genai.chats.AsyncChat,
),
)
def patched_module_call(original, *args, **kwargs):
"""
This method is used for patching standalone functions of the google.generativeai module.
This patch creates a span and set input and output of the original function to the span.
"""
config = AutoLoggingConfig.init(flavor_name=mlflow.gemini.FLAVOR_NAME)
if not config.log_traces:
return original(*args, **kwargs)
with mlflow.start_span(
name=f"{original.__name__}",
span_type=_get_span_type(original.__name__),
) as span:
inputs = _construct_full_inputs(original, *args, **kwargs)
span.set_inputs(inputs)
span.set_attribute(SpanAttributeKey.MESSAGE_FORMAT, "gemini")
result = original(*args, **kwargs)
try:
if usage := _parse_usage(result):
span.set_attribute(SpanAttributeKey.CHAT_USAGE, usage)
except Exception as e:
_logger.warning(
f"Failed to extract token usage for span {span.name}: {e}", exc_info=True
)
# need to convert the response of generate_content for better visualization
outputs = result.to_dict() if hasattr(result, "to_dict") else result
span.set_outputs(outputs)
return result
def _get_keys(dic, keys):
for key in keys:
if key in dic:
return dic[key]
return None
def _log_generativeai_tool_definition(model, span):
"""
This method extract tool definition from generativeai tool type.
"""
# when tools are not passed
if not getattr(model, "_tools", None):
return
try:
set_span_chat_tools(
span,
[
convert_gemini_func_to_mlflow_chat_tool(func)
for func in model._tools.to_proto()[0].function_declarations
],
)
except Exception as e:
_logger.warning(f"Failed to set tool definitions for {span}. Error: {e}")
def _log_genai_tool_definition(model, inputs, span):
"""
This method extract tool definition from genai tool type.
"""
config = inputs.get("config")
tools = getattr(config, "tools", None)
if not tools:
return
# Here, we use an internal function of gemini library to convert callable to Tool schema to
# avoid having the same logic on mlflow side and there is no public attribute for Tool schema.
# https://github.com/googleapis/python-genai/blob/01b15e32d3823a58d25534bb6eea93f30bf82219/google/genai/_transformers.py#L662
tools = genai._transformers.t_tools(model._api_client, tools)
try:
set_span_chat_tools(
span,
[
convert_gemini_func_to_mlflow_chat_tool(function_declaration)
for tool in tools
for function_declaration in tool.function_declarations
],
)
except Exception as e:
_logger.warning(f"Failed to set tool definitions for {span}. Error: {e}")
def _get_span_type(task_name: str) -> str:
span_type_mapping = {
"generate_content": SpanType.LLM,
"_generate_content": SpanType.LLM,
"send_message": SpanType.CHAT_MODEL,
"count_tokens": SpanType.LLM,
"embed_content": SpanType.EMBEDDING,
}
return span_type_mapping.get(task_name, SpanType.UNKNOWN)
def _construct_full_inputs(func, *args, **kwargs):
signature = inspect.signature(func)
# this method does not create copy. So values should not be mutated directly
arguments = signature.bind_partial(*args, **kwargs).arguments
if "self" in arguments:
arguments.pop("self")
return arguments
def _parse_usage(output):
usage = None
if hasattr(output, "usage_metadata"):
usage = output.usage_metadata
elif isinstance(output, dict):
usage = output.get("usage_metadata")
else:
return None
usage_dict = {}
if (prompt_tokens := usage.prompt_token_count) is not None:
usage_dict[TokenUsageKey.INPUT_TOKENS] = prompt_tokens
if (candidate_tokens := usage.candidates_token_count) is not None:
usage_dict[TokenUsageKey.OUTPUT_TOKENS] = candidate_tokens
if (total_tokens := usage.total_token_count) is not None:
usage_dict[TokenUsageKey.TOTAL_TOKENS] = total_tokens
return usage_dict or None
| TracingSession |
python | pypa__setuptools | setuptools/tests/test_build_meta.py | {
"start": 6328,
"end": 31732
} | class ____:
backend_name = 'setuptools.build_meta'
def get_build_backend(self):
return BuildBackend(backend_name=self.backend_name)
@pytest.fixture(params=defns)
def build_backend(self, tmpdir, request):
path.build(request.param, prefix=str(tmpdir))
with tmpdir.as_cwd():
yield self.get_build_backend()
def test_get_requires_for_build_wheel(self, build_backend):
actual = build_backend.get_requires_for_build_wheel()
expected = ['six']
assert sorted(actual) == sorted(expected)
def test_get_requires_for_build_sdist(self, build_backend):
actual = build_backend.get_requires_for_build_sdist()
expected = ['six']
assert sorted(actual) == sorted(expected)
def test_build_wheel(self, build_backend):
dist_dir = os.path.abspath('pip-wheel')
os.makedirs(dist_dir)
wheel_name = build_backend.build_wheel(dist_dir)
wheel_file = os.path.join(dist_dir, wheel_name)
assert os.path.isfile(wheel_file)
# Temporary files should be removed
assert not os.path.isfile('world.py')
with ZipFile(wheel_file) as zipfile:
wheel_contents = set(zipfile.namelist())
# Each one of the examples have a single module
# that should be included in the distribution
python_scripts = (f for f in wheel_contents if f.endswith('.py'))
modules = [f for f in python_scripts if not f.endswith('setup.py')]
assert len(modules) == 1
@pytest.mark.parametrize('build_type', ('wheel', 'sdist'))
def test_build_with_existing_file_present(self, build_type, tmpdir_cwd):
# Building a sdist/wheel should still succeed if there's
# already a sdist/wheel in the destination directory.
files = {
'setup.py': "from setuptools import setup\nsetup()",
'VERSION': "0.0.1",
'setup.cfg': DALS(
"""
[metadata]
name = foo
version = file: VERSION
"""
),
'pyproject.toml': DALS(
"""
[build-system]
requires = ["setuptools", "wheel"]
build-backend = "setuptools.build_meta"
"""
),
}
path.build(files)
dist_dir = os.path.abspath('preexisting-' + build_type)
build_backend = self.get_build_backend()
build_method = getattr(build_backend, 'build_' + build_type)
# Build a first sdist/wheel.
# Note: this also check the destination directory is
# successfully created if it does not exist already.
first_result = build_method(dist_dir)
# Change version.
with open("VERSION", "wt", encoding="utf-8") as version_file:
version_file.write("0.0.2")
# Build a *second* sdist/wheel.
second_result = build_method(dist_dir)
assert os.path.isfile(os.path.join(dist_dir, first_result))
assert first_result != second_result
# And if rebuilding the exact same sdist/wheel?
open(os.path.join(dist_dir, second_result), 'wb').close()
third_result = build_method(dist_dir)
assert third_result == second_result
assert os.path.getsize(os.path.join(dist_dir, third_result)) > 0
@pytest.mark.parametrize("setup_script", [None, SETUP_SCRIPT_STUB])
def test_build_with_pyproject_config(self, tmpdir, setup_script):
files = {
'pyproject.toml': DALS(
"""
[build-system]
requires = ["setuptools", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "foo"
license = {text = "MIT"}
description = "This is a Python package"
dynamic = ["version", "readme"]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers"
]
urls = {Homepage = "http://github.com"}
dependencies = [
"appdirs",
]
[project.optional-dependencies]
all = [
"tomli>=1",
"pyscaffold>=4,<5",
'importlib; python_version == "2.6"',
]
[project.scripts]
foo = "foo.cli:main"
[tool.setuptools]
zip-safe = false
package-dir = {"" = "src"}
packages = {find = {where = ["src"]}}
license-files = ["LICENSE*"]
[tool.setuptools.dynamic]
version = {attr = "foo.__version__"}
readme = {file = "README.rst"}
[tool.distutils.sdist]
formats = "gztar"
"""
),
"MANIFEST.in": DALS(
"""
global-include *.py *.txt
global-exclude *.py[cod]
"""
),
"README.rst": "This is a ``README``",
"LICENSE.txt": "---- placeholder MIT license ----",
"src": {
"foo": {
"__init__.py": "__version__ = '0.1'",
"__init__.pyi": "__version__: str",
"cli.py": "def main(): print('hello world')",
"data.txt": "def main(): print('hello world')",
"py.typed": "",
}
},
}
if setup_script:
files["setup.py"] = setup_script
build_backend = self.get_build_backend()
with tmpdir.as_cwd():
path.build(files)
msgs = [
"'tool.setuptools.license-files' is deprecated in favor of 'project.license-files'",
"`project.license` as a TOML table is deprecated",
]
with warnings.catch_warnings():
for msg in msgs:
warnings.filterwarnings("ignore", msg, SetuptoolsDeprecationWarning)
sdist_path = build_backend.build_sdist("temp")
wheel_file = build_backend.build_wheel("temp")
with tarfile.open(os.path.join(tmpdir, "temp", sdist_path)) as tar:
sdist_contents = set(tar.getnames())
with ZipFile(os.path.join(tmpdir, "temp", wheel_file)) as zipfile:
wheel_contents = set(zipfile.namelist())
metadata = str(zipfile.read("foo-0.1.dist-info/METADATA"), "utf-8")
license = str(
zipfile.read("foo-0.1.dist-info/licenses/LICENSE.txt"), "utf-8"
)
epoints = str(zipfile.read("foo-0.1.dist-info/entry_points.txt"), "utf-8")
assert sdist_contents - {"foo-0.1/setup.py"} == {
'foo-0.1',
'foo-0.1/LICENSE.txt',
'foo-0.1/MANIFEST.in',
'foo-0.1/PKG-INFO',
'foo-0.1/README.rst',
'foo-0.1/pyproject.toml',
'foo-0.1/setup.cfg',
'foo-0.1/src',
'foo-0.1/src/foo',
'foo-0.1/src/foo/__init__.py',
'foo-0.1/src/foo/__init__.pyi',
'foo-0.1/src/foo/cli.py',
'foo-0.1/src/foo/data.txt',
'foo-0.1/src/foo/py.typed',
'foo-0.1/src/foo.egg-info',
'foo-0.1/src/foo.egg-info/PKG-INFO',
'foo-0.1/src/foo.egg-info/SOURCES.txt',
'foo-0.1/src/foo.egg-info/dependency_links.txt',
'foo-0.1/src/foo.egg-info/entry_points.txt',
'foo-0.1/src/foo.egg-info/requires.txt',
'foo-0.1/src/foo.egg-info/top_level.txt',
'foo-0.1/src/foo.egg-info/not-zip-safe',
}
assert wheel_contents == {
"foo/__init__.py",
"foo/__init__.pyi", # include type information by default
"foo/cli.py",
"foo/data.txt", # include_package_data defaults to True
"foo/py.typed", # include type information by default
"foo-0.1.dist-info/licenses/LICENSE.txt",
"foo-0.1.dist-info/METADATA",
"foo-0.1.dist-info/WHEEL",
"foo-0.1.dist-info/entry_points.txt",
"foo-0.1.dist-info/top_level.txt",
"foo-0.1.dist-info/RECORD",
}
assert license == "---- placeholder MIT license ----"
for line in (
"Summary: This is a Python package",
"License: MIT",
"License-File: LICENSE.txt",
"Classifier: Intended Audience :: Developers",
"Requires-Dist: appdirs",
"Requires-Dist: " + str(Requirement('tomli>=1 ; extra == "all"')),
"Requires-Dist: "
+ str(Requirement('importlib; python_version=="2.6" and extra =="all"')),
):
assert line in metadata, (line, metadata)
assert metadata.strip().endswith("This is a ``README``")
assert epoints.strip() == "[console_scripts]\nfoo = foo.cli:main"
def test_static_metadata_in_pyproject_config(self, tmpdir):
# Make sure static metadata in pyproject.toml is not overwritten by setup.py
# as required by PEP 621
files = {
'pyproject.toml': DALS(
"""
[build-system]
requires = ["setuptools", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "foo"
description = "This is a Python package"
version = "42"
dependencies = ["six"]
"""
),
'hello.py': DALS(
"""
def run():
print('hello')
"""
),
'setup.py': DALS(
"""
__import__('setuptools').setup(
name='bar',
version='13',
)
"""
),
}
build_backend = self.get_build_backend()
with tmpdir.as_cwd():
path.build(files)
sdist_path = build_backend.build_sdist("temp")
wheel_file = build_backend.build_wheel("temp")
assert (tmpdir / "temp/foo-42.tar.gz").exists()
assert (tmpdir / "temp/foo-42-py3-none-any.whl").exists()
assert not (tmpdir / "temp/bar-13.tar.gz").exists()
assert not (tmpdir / "temp/bar-42.tar.gz").exists()
assert not (tmpdir / "temp/foo-13.tar.gz").exists()
assert not (tmpdir / "temp/bar-13-py3-none-any.whl").exists()
assert not (tmpdir / "temp/bar-42-py3-none-any.whl").exists()
assert not (tmpdir / "temp/foo-13-py3-none-any.whl").exists()
with tarfile.open(os.path.join(tmpdir, "temp", sdist_path)) as tar:
pkg_info = str(tar.extractfile('foo-42/PKG-INFO').read(), "utf-8")
members = tar.getnames()
assert "bar-13/PKG-INFO" not in members
with ZipFile(os.path.join(tmpdir, "temp", wheel_file)) as zipfile:
metadata = str(zipfile.read("foo-42.dist-info/METADATA"), "utf-8")
members = zipfile.namelist()
assert "bar-13.dist-info/METADATA" not in members
for file in pkg_info, metadata:
for line in ("Name: foo", "Version: 42"):
assert line in file
for line in ("Name: bar", "Version: 13"):
assert line not in file
def test_build_sdist(self, build_backend):
dist_dir = os.path.abspath('pip-sdist')
os.makedirs(dist_dir)
sdist_name = build_backend.build_sdist(dist_dir)
assert os.path.isfile(os.path.join(dist_dir, sdist_name))
def test_prepare_metadata_for_build_wheel(self, build_backend):
dist_dir = os.path.abspath('pip-dist-info')
os.makedirs(dist_dir)
dist_info = build_backend.prepare_metadata_for_build_wheel(dist_dir)
assert os.path.isfile(os.path.join(dist_dir, dist_info, 'METADATA'))
def test_prepare_metadata_inplace(self, build_backend):
"""
Some users might pass metadata_directory pre-populated with `.tox` or `.venv`.
See issue #3523.
"""
for pre_existing in [
".tox/python/lib/python3.10/site-packages/attrs-22.1.0.dist-info",
".tox/python/lib/python3.10/site-packages/autocommand-2.2.1.dist-info",
".nox/python/lib/python3.10/site-packages/build-0.8.0.dist-info",
".venv/python3.10/site-packages/click-8.1.3.dist-info",
"venv/python3.10/site-packages/distlib-0.3.5.dist-info",
"env/python3.10/site-packages/docutils-0.19.dist-info",
]:
os.makedirs(pre_existing, exist_ok=True)
dist_info = build_backend.prepare_metadata_for_build_wheel(".")
assert os.path.isfile(os.path.join(dist_info, 'METADATA'))
def test_build_sdist_explicit_dist(self, build_backend):
# explicitly specifying the dist folder should work
# the folder sdist_directory and the ``--dist-dir`` can be the same
dist_dir = os.path.abspath('dist')
sdist_name = build_backend.build_sdist(dist_dir)
assert os.path.isfile(os.path.join(dist_dir, sdist_name))
def test_build_sdist_version_change(self, build_backend):
sdist_into_directory = os.path.abspath("out_sdist")
os.makedirs(sdist_into_directory)
sdist_name = build_backend.build_sdist(sdist_into_directory)
assert os.path.isfile(os.path.join(sdist_into_directory, sdist_name))
# if the setup.py changes subsequent call of the build meta
# should still succeed, given the
# sdist_directory the frontend specifies is empty
setup_loc = os.path.abspath("setup.py")
if not os.path.exists(setup_loc):
setup_loc = os.path.abspath("setup.cfg")
with open(setup_loc, 'rt', encoding="utf-8") as file_handler:
content = file_handler.read()
with open(setup_loc, 'wt', encoding="utf-8") as file_handler:
file_handler.write(content.replace("version='0.0.0'", "version='0.0.1'"))
shutil.rmtree(sdist_into_directory)
os.makedirs(sdist_into_directory)
sdist_name = build_backend.build_sdist("out_sdist")
assert os.path.isfile(os.path.join(os.path.abspath("out_sdist"), sdist_name))
def test_build_sdist_pyproject_toml_exists(self, tmpdir_cwd):
files = {
'setup.py': DALS(
"""
__import__('setuptools').setup(
name='foo',
version='0.0.0',
py_modules=['hello']
)"""
),
'hello.py': '',
'pyproject.toml': DALS(
"""
[build-system]
requires = ["setuptools", "wheel"]
build-backend = "setuptools.build_meta"
"""
),
}
path.build(files)
build_backend = self.get_build_backend()
targz_path = build_backend.build_sdist("temp")
with tarfile.open(os.path.join("temp", targz_path)) as tar:
assert any('pyproject.toml' in name for name in tar.getnames())
def test_build_sdist_setup_py_exists(self, tmpdir_cwd):
# If build_sdist is called from a script other than setup.py,
# ensure setup.py is included
path.build(defns[0])
build_backend = self.get_build_backend()
targz_path = build_backend.build_sdist("temp")
with tarfile.open(os.path.join("temp", targz_path)) as tar:
assert any('setup.py' in name for name in tar.getnames())
def test_build_sdist_setup_py_manifest_excluded(self, tmpdir_cwd):
# Ensure that MANIFEST.in can exclude setup.py
files = {
'setup.py': DALS(
"""
__import__('setuptools').setup(
name='foo',
version='0.0.0',
py_modules=['hello']
)"""
),
'hello.py': '',
'MANIFEST.in': DALS(
"""
exclude setup.py
"""
),
}
path.build(files)
build_backend = self.get_build_backend()
targz_path = build_backend.build_sdist("temp")
with tarfile.open(os.path.join("temp", targz_path)) as tar:
assert not any('setup.py' in name for name in tar.getnames())
def test_build_sdist_builds_targz_even_if_zip_indicated(self, tmpdir_cwd):
files = {
'setup.py': DALS(
"""
__import__('setuptools').setup(
name='foo',
version='0.0.0',
py_modules=['hello']
)"""
),
'hello.py': '',
'setup.cfg': DALS(
"""
[sdist]
formats=zip
"""
),
}
path.build(files)
build_backend = self.get_build_backend()
build_backend.build_sdist("temp")
_relative_path_import_files = {
'setup.py': DALS(
"""
__import__('setuptools').setup(
name='foo',
version=__import__('hello').__version__,
py_modules=['hello']
)"""
),
'hello.py': '__version__ = "0.0.0"',
'setup.cfg': DALS(
"""
[sdist]
formats=zip
"""
),
}
def test_build_sdist_relative_path_import(self, tmpdir_cwd):
path.build(self._relative_path_import_files)
build_backend = self.get_build_backend()
with pytest.raises(ImportError, match="^No module named 'hello'$"):
build_backend.build_sdist("temp")
_simple_pyproject_example = {
"pyproject.toml": DALS(
"""
[project]
name = "proj"
version = "42"
"""
),
"src": {"proj": {"__init__.py": ""}},
}
def _assert_link_tree(self, parent_dir):
"""All files in the directory should be either links or hard links"""
files = list(Path(parent_dir).glob("**/*"))
assert files # Should not be empty
for file in files:
assert file.is_symlink() or os.stat(file).st_nlink > 0
def test_editable_without_config_settings(self, tmpdir_cwd):
"""
Sanity check to ensure tests with --mode=strict are different from the ones
without --mode.
--mode=strict should create a local directory with a package tree.
The directory should not get created otherwise.
"""
path.build(self._simple_pyproject_example)
build_backend = self.get_build_backend()
assert not Path("build").exists()
build_backend.build_editable("temp")
assert not Path("build").exists()
def test_build_wheel_inplace(self, tmpdir_cwd):
config_settings = {"--build-option": ["build_ext", "--inplace"]}
path.build(self._simple_pyproject_example)
build_backend = self.get_build_backend()
assert not Path("build").exists()
Path("build").mkdir()
build_backend.prepare_metadata_for_build_wheel("build", config_settings)
build_backend.build_wheel("build", config_settings)
assert Path("build/proj-42-py3-none-any.whl").exists()
@pytest.mark.parametrize("config_settings", [{"editable-mode": "strict"}])
def test_editable_with_config_settings(self, tmpdir_cwd, config_settings):
path.build({**self._simple_pyproject_example, '_meta': {}})
assert not Path("build").exists()
build_backend = self.get_build_backend()
build_backend.prepare_metadata_for_build_editable("_meta", config_settings)
build_backend.build_editable("temp", config_settings, "_meta")
self._assert_link_tree(next(Path("build").glob("__editable__.*")))
@pytest.mark.parametrize(
("setup_literal", "requirements"),
[
("'foo'", ['foo']),
("['foo']", ['foo']),
(r"'foo\n'", ['foo']),
(r"'foo\n\n'", ['foo']),
("['foo', 'bar']", ['foo', 'bar']),
(r"'# Has a comment line\nfoo'", ['foo']),
(r"'foo # Has an inline comment'", ['foo']),
(r"'foo \\\n >=3.0'", ['foo>=3.0']),
(r"'foo\nbar'", ['foo', 'bar']),
(r"'foo\nbar\n'", ['foo', 'bar']),
(r"['foo\n', 'bar\n']", ['foo', 'bar']),
],
)
@pytest.mark.parametrize('use_wheel', [True, False])
def test_setup_requires(self, setup_literal, requirements, use_wheel, tmpdir_cwd):
files = {
'setup.py': DALS(
"""
from setuptools import setup
setup(
name="qux",
version="0.0.0",
py_modules=["hello"],
setup_requires={setup_literal},
)
"""
).format(setup_literal=setup_literal),
'hello.py': DALS(
"""
def run():
print('hello')
"""
),
}
path.build(files)
build_backend = self.get_build_backend()
if use_wheel:
get_requires = build_backend.get_requires_for_build_wheel
else:
get_requires = build_backend.get_requires_for_build_sdist
# Ensure that the build requirements are properly parsed
expected = sorted(requirements)
actual = get_requires()
assert expected == sorted(actual)
def test_setup_requires_with_auto_discovery(self, tmpdir_cwd):
# Make sure patches introduced to retrieve setup_requires don't accidentally
# activate auto-discovery and cause problems due to the incomplete set of
# attributes passed to MinimalDistribution
files = {
'pyproject.toml': DALS(
"""
[project]
name = "proj"
version = "42"
"""
),
"setup.py": DALS(
"""
__import__('setuptools').setup(
setup_requires=["foo"],
py_modules = ["hello", "world"]
)
"""
),
'hello.py': "'hello'",
'world.py': "'world'",
}
path.build(files)
build_backend = self.get_build_backend()
setup_requires = build_backend.get_requires_for_build_wheel()
assert setup_requires == ["foo"]
def test_dont_install_setup_requires(self, tmpdir_cwd):
files = {
'setup.py': DALS(
"""
from setuptools import setup
setup(
name="qux",
version="0.0.0",
py_modules=["hello"],
setup_requires=["does-not-exist >99"],
)
"""
),
'hello.py': DALS(
"""
def run():
print('hello')
"""
),
}
path.build(files)
build_backend = self.get_build_backend()
dist_dir = os.path.abspath('pip-dist-info')
os.makedirs(dist_dir)
# does-not-exist can't be satisfied, so if it attempts to install
# setup_requires, it will fail.
build_backend.prepare_metadata_for_build_wheel(dist_dir)
_sys_argv_0_passthrough = {
'setup.py': DALS(
"""
import os
import sys
__import__('setuptools').setup(
name='foo',
version='0.0.0',
)
sys_argv = os.path.abspath(sys.argv[0])
file_path = os.path.abspath('setup.py')
assert sys_argv == file_path
"""
)
}
def test_sys_argv_passthrough(self, tmpdir_cwd):
path.build(self._sys_argv_0_passthrough)
build_backend = self.get_build_backend()
with pytest.raises(AssertionError):
build_backend.build_sdist("temp")
_setup_py_file_abspath = {
'setup.py': DALS(
"""
import os
assert os.path.isabs(__file__)
__import__('setuptools').setup(
name='foo',
version='0.0.0',
py_modules=['hello'],
setup_requires=['six'],
)
"""
)
}
def test_setup_py_file_abspath(self, tmpdir_cwd):
path.build(self._setup_py_file_abspath)
build_backend = self.get_build_backend()
build_backend.build_sdist("temp")
@pytest.mark.parametrize('build_hook', ('build_sdist', 'build_wheel'))
def test_build_with_empty_setuppy(self, build_backend, build_hook):
files = {'setup.py': ''}
path.build(files)
msg = re.escape('No distribution was found.')
with pytest.raises(ValueError, match=msg):
getattr(build_backend, build_hook)("temp")
| TestBuildMetaBackend |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_stateful.py | {
"start": 2846,
"end": 3879
} | class ____(RuleBasedStateMachine):
@rule(d=data())
def action(self, d):
if current_build_context().is_final:
d.draw(binary(min_size=1, max_size=1))
else:
buffer = binary(min_size=1024, max_size=1024)
assert 0 not in buffer
def test_flaky_draw_less_raises_flaky():
with raises(Flaky):
FlakyDrawLessMachine.TestCase().runTest()
def test_result_is_added_to_target():
class TargetStateMachine(RuleBasedStateMachine):
nodes = Bundle("nodes")
@rule(target=nodes, source=lists(nodes))
def bunch(self, source):
assert len(source) == 0
return source
test_class = TargetStateMachine.TestCase
try:
test_class().runTest()
raise RuntimeError("Expected an assertion error")
except AssertionError as err:
notes = err.__notes__
regularized_notes = [re.sub(r"[0-9]+", "i", note) for note in notes]
assert "state.bunch(source=[nodes_i])" in regularized_notes
| FlakyDrawLessMachine |
python | tensorflow__tensorflow | tensorflow/python/distribute/parameter_server_strategy.py | {
"start": 7341,
"end": 28342
} | class ____(distribute_lib.StrategyExtendedV1):
"""Implementation of ParameterServerStrategy and CentralStorageStrategy."""
def __init__(self,
container_strategy,
cluster_resolver=None,
compute_devices=None,
parameter_device=None):
super(ParameterServerStrategyExtended, self).__init__(container_strategy)
self._initialize_strategy(
cluster_resolver=cluster_resolver,
compute_devices=compute_devices,
parameter_device=parameter_device)
# We typically don't need to do all-reduce in this strategy.
self._cross_device_ops = (
cross_device_ops_lib.ReductionToOneDevice(reduce_to_device=_LOCAL_CPU))
def _initialize_strategy(self,
cluster_resolver=None,
compute_devices=None,
parameter_device=None):
if cluster_resolver and cluster_resolver.cluster_spec():
self._initialize_multi_worker(cluster_resolver)
else:
self._initialize_local(
compute_devices, parameter_device, cluster_resolver=cluster_resolver)
def _initialize_multi_worker(self, cluster_resolver):
"""Initialize devices for multiple workers.
It creates variable devices and compute devices. Variables and operations
will be assigned to them respectively. We have one compute device per
replica. The variable device is a device function or device string. The
default variable device assigns variables to parameter servers in a
round-robin fashion.
Args:
cluster_resolver: a descendant of `ClusterResolver` object.
Raises:
ValueError: if the cluster doesn't have ps jobs.
"""
# TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in
# some cases.
if isinstance(
cluster_resolver, tfconfig_cluster_resolver.TFConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
# Save the num_gpus_per_worker for configure method.
self._num_gpus_per_worker = num_gpus
cluster_spec = cluster_resolver.cluster_spec()
task_type = cluster_resolver.task_type
task_id = cluster_resolver.task_id
if not task_type or task_id is None:
raise ValueError("When `cluster_spec` is given, you must also specify "
"`task_type` and `task_id`")
cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
assert cluster_spec.as_dict()
self._worker_device = "/job:%s/task:%d" % (task_type, task_id)
self._input_host_device = numpy_dataset.SingleDevice(self._worker_device)
# Define compute devices which is a list of device strings and one for each
# replica. When there are GPUs, replicate operations on these GPUs.
# Otherwise, place operations on CPU.
if num_gpus > 0:
compute_devices = tuple(
"%s/device:GPU:%d" % (self._worker_device, i)
for i in range(num_gpus))
else:
compute_devices = (self._worker_device,)
self._compute_devices = [
device_util.canonicalize(d) for d in compute_devices]
# In distributed mode, place variables on ps jobs in a round-robin fashion.
# Note that devices returned from `replica_device_setter` are not
# canonical and therefore we don't canonicalize all variable devices to
# make them consistent.
# TODO(yuefengz): support passing a strategy object to control variable
# assignment.
# TODO(yuefengz): merge the logic of replica_device_setter into this
# class.
num_ps_replicas = len(cluster_spec.as_dict().get("ps", []))
if num_ps_replicas == 0:
raise ValueError("The cluster spec needs to have `ps` jobs.")
self._variable_device = device_setter.replica_device_setter(
ps_tasks=num_ps_replicas,
worker_device=self._worker_device,
merge_devices=True,
cluster=cluster_spec)
# The `_parameter_devices` is needed for the `parameter_devices` property
# and is a list of all variable devices. Here parameter devices are all
# tasks of the "ps" job.
self._parameter_devices = tuple(map("/job:ps/task:{}".format,
range(num_ps_replicas)))
# Add a default device so that ops without specified devices will not end up
# on other workers.
self._default_device = self._worker_device
self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,
task_id)
self._cluster_spec = cluster_spec
self._task_type = task_type
self._task_id = task_id
logging.info(
"Multi-worker ParameterServerStrategy with "
"cluster_spec = %r, task_type = %r, task_id = %r, "
"num_ps_replicas = %r, is_chief = %r, compute_devices = %r, "
"variable_device = %r", cluster_spec.as_dict(), task_type, task_id,
num_ps_replicas, self._is_chief, self._compute_devices,
self._variable_device)
# TODO(yuefengz): get rid of cluster_resolver argument when contrib's
# version no longer depends on this class.
def _initialize_local(self,
compute_devices,
parameter_device,
cluster_resolver=None):
"""Initialize local devices for training."""
self._worker_device = device_util.canonicalize("/device:CPU:0")
self._input_host_device = numpy_dataset.SingleDevice(self._worker_device)
if compute_devices is None:
if not cluster_resolver:
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
# Save the num_gpus_per_worker for configure method which is used by the
# contrib version.
self._num_gpus_per_worker = num_gpus
compute_devices = device_util.local_devices_from_num_gpus(num_gpus)
compute_devices = [device_util.canonicalize(d) for d in compute_devices]
if parameter_device is None:
# If there is only one GPU, put everything on that GPU. Otherwise, place
# variables on CPU.
if len(compute_devices) == 1:
parameter_device = compute_devices[0]
else:
parameter_device = _LOCAL_CPU
self._variable_device = parameter_device
self._compute_devices = compute_devices
self._parameter_devices = (parameter_device,)
self._is_chief = True
self._cluster_spec = None
self._task_type = None
self._task_id = None
logging.info(
"ParameterServerStrategy (CentralStorageStrategy if you are using a "
"single machine) with compute_devices = %r, variable_device = %r",
compute_devices, self._variable_device)
def _input_workers_with_options(self, options=None):
if not options or options.experimental_fetch_to_device:
return input_lib.InputWorkers(
[(self._worker_device, self._compute_devices)])
else:
return input_lib.InputWorkers(
[(self._worker_device,
(self._worker_device,) * len(self._compute_devices))])
@property
def _input_workers(self):
return self._input_workers_with_options()
def _validate_colocate_with_variable(self, colocate_with_variable):
distribute_utils.validate_colocate(colocate_with_variable, self)
def _experimental_distribute_dataset(self, dataset, options):
return input_util.get_distributed_dataset(
dataset,
self._input_workers_with_options(options),
self._container_strategy(),
num_replicas_in_sync=self._num_replicas_in_sync,
options=options)
def _make_dataset_iterator(self, dataset):
return input_lib_v1.DatasetIterator(
dataset,
self._input_workers,
self._container_strategy(),
num_replicas_in_sync=self._num_replicas_in_sync)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
"""Distributes the dataset to each local GPU."""
if self._cluster_spec:
input_pipeline_id = multi_worker_util.id_in_cluster(
self._cluster_spec, self._task_type, self._task_id)
num_input_pipelines = multi_worker_util.worker_count(
self._cluster_spec, self._task_type)
else:
input_pipeline_id = 0
num_input_pipelines = 1
input_context = distribute_lib.InputContext(
num_input_pipelines=num_input_pipelines,
input_pipeline_id=input_pipeline_id,
num_replicas_in_sync=self._num_replicas_in_sync)
return input_lib_v1.InputFunctionIterator(input_fn, self._input_workers,
[input_context],
self._container_strategy())
def _experimental_make_numpy_dataset(self, numpy_input, session):
return numpy_dataset.one_host_numpy_dataset(
numpy_input, self._input_host_device, session)
def _distribute_datasets_from_function(self, dataset_fn, options):
if self._cluster_spec:
input_pipeline_id = multi_worker_util.id_in_cluster(
self._cluster_spec, self._task_type, self._task_id)
num_input_pipelines = multi_worker_util.worker_count(
self._cluster_spec, self._task_type)
else:
input_pipeline_id = 0
num_input_pipelines = 1
input_context = distribute_lib.InputContext(
num_input_pipelines=num_input_pipelines,
input_pipeline_id=input_pipeline_id,
num_replicas_in_sync=self._num_replicas_in_sync)
return input_util.get_distributed_datasets_from_function(
dataset_fn,
self._input_workers_with_options(options), [input_context],
self._container_strategy(),
options=options)
def _experimental_distribute_values_from_function(self, value_fn):
per_replica_values = []
for replica_id in range(self._num_replicas_in_sync):
per_replica_values.append(
value_fn(distribute_lib.ValueContext(replica_id,
self._num_replicas_in_sync)))
return distribute_utils.regroup(per_replica_values, always_wrap=True)
def _broadcast_to(self, tensor, destinations):
# This is both a fast path for Python constants, and a way to delay
# converting Python values to a tensor until we know what type it
# should be converted to. Otherwise we have trouble with:
# global_step.assign_add(1)
# since the `1` gets broadcast as an int32 but global_step is int64.
if isinstance(tensor, (float, int)):
return tensor
if not cross_device_ops_lib.check_destinations(destinations):
# TODO(josh11b): Use current logical device instead of 0 here.
destinations = self._compute_devices
return self._cross_device_ops.broadcast(tensor, destinations)
def _allow_variable_partition(self):
return not context.executing_eagerly()
def _create_var_creator(self, next_creator, **kwargs):
if self._num_replicas_in_sync > 1:
aggregation = kwargs.pop("aggregation", vs.VariableAggregation.NONE)
if aggregation not in (
vs.VariableAggregation.NONE,
vs.VariableAggregation.SUM,
vs.VariableAggregation.MEAN,
vs.VariableAggregation.ONLY_FIRST_REPLICA
):
raise ValueError("Invalid variable aggregation mode: " + aggregation +
" for variable: " + kwargs["name"])
def var_creator(**kwargs):
"""Create an AggregatingVariable and fix up collections."""
# Record what collections this variable should be added to.
collections = kwargs.pop("collections", None)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
kwargs["collections"] = []
# Create and wrap the variable.
v = next_creator(**kwargs)
wrapped = ps_values.AggregatingVariable(self._container_strategy(), v,
aggregation)
# Add the wrapped variable to the requested collections.
# The handling of eager mode and the global step matches
# ResourceVariable._init_from_args().
if not context.executing_eagerly():
g = ops.get_default_graph()
# If "trainable" is True, next_creator() will add the contained
# variable to the TRAINABLE_VARIABLES collection, so we manually
# remove it and replace with the wrapper. We can't set "trainable"
# to False for next_creator() since that causes functions like
# implicit_gradients to skip those variables.
if kwargs.get("trainable", True):
collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)
l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)
if v in l:
l.remove(v)
g.add_to_collections(collections, wrapped)
elif ops.GraphKeys.GLOBAL_STEP in collections:
ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, wrapped)
return wrapped
return var_creator
else:
return next_creator
# TODO(yuefengz): Not all ops in device_setter.STANDARD_PS_OPS will go through
# this creator, such as "MutableHashTable".
def _create_variable(self, next_creator, **kwargs):
var_creator = self._create_var_creator(next_creator, **kwargs)
if "colocate_with" in kwargs:
colocate_with = kwargs["colocate_with"]
if isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return var_creator(**kwargs)
with ops.device(None):
with ops.colocate_with(colocate_with):
return var_creator(**kwargs)
with ops.colocate_with(None, ignore_existing=True):
with ops.device(self._variable_device):
return var_creator(**kwargs)
def _call_for_each_replica(self, fn, args, kwargs):
return mirrored_run.call_for_each_replica(self._container_strategy(), fn,
args, kwargs)
def _verify_destinations_not_different_worker(self, destinations):
if not self._cluster_spec:
return
if destinations is None:
return
for d in cross_device_ops_lib.get_devices_from(destinations):
d_spec = tf_device.DeviceSpec.from_string(d)
if d_spec.job == self._task_type and d_spec.task != self._task_id:
raise ValueError(
"Cannot reduce to another worker: %r, current worker is %r" %
(d, self._worker_device))
def _gather_to_implementation(self, value, destinations, axis,
options):
self._verify_destinations_not_different_worker(destinations)
if not isinstance(value, values.DistributedValues):
return value
return self._cross_device_ops._gather( # pylint: disable=protected-access
value,
destinations=destinations,
axis=axis,
options=options)
def _reduce_to(self, reduce_op, value, destinations, options):
self._verify_destinations_not_different_worker(destinations)
if not isinstance(value, values.DistributedValues):
# pylint: disable=protected-access
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, value, destinations, self._num_replicas_in_sync)
return self._cross_device_ops.reduce(
reduce_op, value, destinations=destinations, options=options)
def _batch_reduce_to(self, reduce_op, value_destination_pairs, options):
for _, destinations in value_destination_pairs:
self._verify_destinations_not_different_worker(destinations)
return self._cross_device_ops.batch_reduce(reduce_op,
value_destination_pairs, options)
def _select_single_value(self, structured):
"""Select any single value in `structured`."""
def _select_fn(x): # pylint: disable=g-missing-docstring
if isinstance(x, values.Mirrored) or isinstance(x, values.PerReplica):
return x._primary # pylint: disable=protected-access
else:
return x
return nest.map_structure(_select_fn, structured)
def _update(self, var, fn, args, kwargs, group):
if isinstance(var, ps_values.AggregatingVariable):
var = var.get()
if not resource_variable_ops.is_resource_variable(var):
raise ValueError(
"You can not update `var` %r. It must be a Variable." % var)
with ops.colocate_with(var), distribute_lib.UpdateContext(var.device):
result = fn(var, *self._select_single_value(args),
**self._select_single_value(kwargs))
if group:
return result
else:
return nest.map_structure(self._local_results, result)
# TODO(yuefengz): does it need to call _select_single_value?
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
with ops.device(
colocate_with.device), distribute_lib.UpdateContext(colocate_with):
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._local_results, result)
def value_container(self, val):
if (hasattr(val, "_aggregating_container") and
not isinstance(val, ps_values.AggregatingVariable)):
wrapper = val._aggregating_container() # pylint: disable=protected-access
if wrapper is not None:
return wrapper
return val
def read_var(self, var):
# No need to distinguish between normal variables and replica-local
# variables.
return array_ops.identity(var)
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
"""Configures the strategy class with `cluster_spec`.
The strategy object will be re-initialized if `cluster_spec` is passed to
`configure` but was not passed when instantiating the strategy.
Args:
session_config: Session config object.
cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the
cluster configurations.
task_type: the current task type.
task_id: the current task id.
Raises:
ValueError: if `cluster_spec` is given but `task_type` or `task_id` is
not.
"""
if cluster_spec:
# Use the num_gpus_per_worker recorded in constructor since _configure
# doesn't take num_gpus.
cluster_resolver = cluster_resolver_lib.SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),
task_type=task_type,
task_id=task_id,
num_accelerators={"GPU": self._num_gpus_per_worker})
self._initialize_multi_worker(cluster_resolver)
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
if not self._cluster_spec:
updated_config.isolate_session_state = True
return updated_config
updated_config.isolate_session_state = False
assert self._task_type
assert self._task_id is not None
# The device filters prevent communication between workers.
del updated_config.device_filters[:]
if self._task_type in ["chief", "worker"]:
updated_config.device_filters.extend(
["/job:%s/task:%d" % (self._task_type, self._task_id), "/job:ps"])
elif self._task_type == "evaluator":
updated_config.device_filters.append(
"/job:%s/task:%d" % (self._task_type, self._task_id))
return updated_config
def _in_multi_worker_mode(self):
"""Whether this strategy indicates working in multi-worker settings."""
return self._cluster_spec is not None
@property
def _num_replicas_in_sync(self):
return len(self._compute_devices)
@property
def worker_devices(self):
return self._compute_devices
@property
def worker_devices_by_replica(self):
return [[d] for d in self._compute_devices]
@property
def parameter_devices(self):
return self._parameter_devices
def non_slot_devices(self, var_list):
return min(var_list, key=lambda x: x.name)
@property
def experimental_between_graph(self):
# TODO(yuefengz): Should this return False in the local case?
return True
@property
def experimental_should_init(self):
return self._is_chief
@property
def should_checkpoint(self):
return self._is_chief
@property
def should_save_summary(self):
return self._is_chief
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
def _get_local_replica_id(self, replica_id_in_sync_group):
return replica_id_in_sync_group
def _get_replica_id_in_sync_group(self, replica_id):
return replica_id
| ParameterServerStrategyExtended |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/dml.py | {
"start": 7560,
"end": 9560
} | class ____(SyntaxExtension, ClauseElement):
__visit_name__ = "on_duplicate_key_update"
_parameter_ordering: Optional[List[str]] = None
update: Dict[str, ColumnElement[Any]]
stringify_dialect = "mysql"
_traverse_internals = [
("_parameter_ordering", InternalTraversal.dp_string_list),
("update", InternalTraversal.dp_dml_values),
]
def __init__(
self, inserted_alias: NamedFromClause, update: _UpdateArg
) -> None:
self.inserted_alias = inserted_alias
# auto-detect that parameters should be ordered. This is copied from
# Update._proces_colparams(), however we don't look for a special flag
# in this case since we are not disambiguating from other use cases as
# we are in Update.values().
if isinstance(update, list) and (
update and isinstance(update[0], tuple)
):
self._parameter_ordering = [key for key, value in update]
update = dict(update)
if isinstance(update, dict):
if not update:
raise ValueError(
"update parameter dictionary must not be empty"
)
elif isinstance(update, ColumnCollection):
update = dict(update)
else:
raise ValueError(
"update parameter must be a non-empty dictionary "
"or a ColumnCollection such as the `.c.` collection "
"of a Table object"
)
self.update = {
k: coercions.expect(
roles.ExpressionElementRole, v, type_=NULLTYPE, is_crud=True
)
for k, v in update.items()
}
def apply_to_insert(self, insert_stmt: StandardInsert) -> None:
insert_stmt.apply_syntax_extension_point(
self.append_replacing_same_type, "post_values"
)
_UpdateArg = Union[
Mapping[Any, Any], List[Tuple[str, Any]], ColumnCollection[Any, Any]
]
| OnDuplicateClause |
python | great-expectations__great_expectations | great_expectations/render/components.py | {
"start": 17153,
"end": 18904
} | class ____(RenderedComponentContent):
"""RenderedStringTemplateContent is RenderedComponentContent that represents a templated string.
Args:
string_template: A dictionary containing:
template: The string to perform substitution on. Variables are denoted with a preceeding $.
params: A dictionary with keys that match variable names and values which will be substituted.
styling: A dictionary containing styling information.
styling: A dictionary containing styling information.
content_block_type: The type of content block.
""" # noqa: E501 # FIXME CoP
def __init__(
self,
string_template: dict,
styling: Optional[dict] = None,
content_block_type: str = "string_template",
) -> None:
super().__init__(content_block_type=content_block_type, styling=styling)
self.string_template = string_template
@override
def to_json_dict(self) -> dict[str, JSONValues]:
"""Returns a JSON-serializable dict representation of this RenderedStringTemplateContent.
Returns:
A JSON-serializable dict representation of this RenderedStringTemplateContent.
"""
d = super().to_json_dict()
d["string_template"] = self.string_template
return d
@override
def __str__(self):
string = pTemplate(self.string_template["template"]).safe_substitute(
self.string_template["params"]
)
return string
@override
def __eq__(self, other):
return str(self) == str(other)
@override
def __hash__(self) -> int:
"""Overrides the default implementation"""
return hash(str(self))
| RenderedStringTemplateContent |
python | scipy__scipy | scipy/interpolate/_polyint.py | {
"start": 549,
"end": 4598
} | class ____:
"""
Common features in univariate interpolation
Deal with input data type and interpolation axis rolling. The
actual interpolator can assume the y-data is of shape (n, r) where
`n` is the number of x-points, and `r` the number of variables,
and use self.dtype as the y-data type.
Attributes
----------
_y_axis
Axis along which the interpolation goes in the original array
_y_extra_shape
Additional trailing shape of the input arrays, excluding
the interpolation axis.
dtype
Dtype of the y-data arrays. Can be set via _set_dtype, which
forces it to be float or complex.
Methods
-------
__call__
_prepare_x
_finish_y
_reshape_yi
_set_yi
_set_dtype
_evaluate
"""
__slots__ = ('_y_axis', '_y_extra_shape', 'dtype')
# generic type compatibility with scipy-stubs
__class_getitem__ = classmethod(GenericAlias)
def __init__(self, xi=None, yi=None, axis=None):
self._y_axis = axis
self._y_extra_shape = None
self.dtype = None
if yi is not None:
self._set_yi(yi, xi=xi, axis=axis)
def __call__(self, x):
"""
Evaluate the interpolant
Parameters
----------
x : array_like
Point or points at which to evaluate the interpolant.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of `x`.
Notes
-----
Input values `x` must be convertible to `float` values like `int`
or `float`.
"""
x, x_shape = self._prepare_x(x)
y = self._evaluate(x)
return self._finish_y(y, x_shape)
def _evaluate(self, x):
"""
Actually evaluate the value of the interpolator.
"""
raise NotImplementedError()
def _prepare_x(self, x):
"""Reshape input x array to 1-D"""
x = _asarray_validated(x, check_finite=False, as_inexact=True)
x_shape = x.shape
return x.ravel(), x_shape
def _finish_y(self, y, x_shape):
"""Reshape interpolated y back to an N-D array similar to initial y"""
y = y.reshape(x_shape + self._y_extra_shape)
if self._y_axis != 0 and x_shape != ():
nx = len(x_shape)
ny = len(self._y_extra_shape)
s = (list(range(nx, nx + self._y_axis))
+ list(range(nx)) + list(range(nx+self._y_axis, nx+ny)))
y = y.transpose(s)
return y
def _reshape_yi(self, yi, check=False):
yi = np.moveaxis(np.asarray(yi), self._y_axis, 0)
if check and yi.shape[1:] != self._y_extra_shape:
ok_shape = (f"{self._y_extra_shape[-self._y_axis:]!r} + (N,) + "
f"{self._y_extra_shape[:-self._y_axis]!r}")
raise ValueError(f"Data must be of shape {ok_shape}")
return yi.reshape((yi.shape[0], -1))
def _set_yi(self, yi, xi=None, axis=None):
if axis is None:
axis = self._y_axis
if axis is None:
raise ValueError("no interpolation axis specified")
yi = np.asarray(yi)
shape = yi.shape
if shape == ():
shape = (1,)
if xi is not None and shape[axis] != len(xi):
raise ValueError("x and y arrays must be equal in length along "
"interpolation axis.")
self._y_axis = (axis % yi.ndim)
self._y_extra_shape = yi.shape[:self._y_axis] + yi.shape[self._y_axis+1:]
self.dtype = None
self._set_dtype(yi.dtype)
def _set_dtype(self, dtype, union=False):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.dtype, np.complexfloating):
self.dtype = np.complex128
else:
if not union or self.dtype != np.complex128:
self.dtype = np.float64
| _Interpolator1D |
python | sympy__sympy | sympy/plotting/pygletplot/plot_modes.py | {
"start": 1494,
"end": 1998
} | class ____(PlotCurve):
i_vars, d_vars = 't', 'xy'
intervals = [[0, 2*pi, 100]]
aliases = ['parametric']
is_default = True
def _get_sympy_evaluator(self):
fx, fy = self.d_vars
t = self.t_interval.v
@float_vec3
def e(_t):
return (fx.subs(t, _t), fy.subs(t, _t), 0.0)
return e
def _get_lambda_evaluator(self):
fx, fy = self.d_vars
t = self.t_interval.v
return lambdify([t], [fx, fy, 0.0])
| ParametricCurve2D |
python | openai__openai-python | src/openai/types/responses/response_function_shell_tool_call_output.py | {
"start": 521,
"end": 815
} | class ____(BaseModel):
exit_code: int
"""Exit code from the shell process."""
type: Literal["exit"]
"""The outcome type. Always `exit`."""
OutputOutcome: TypeAlias = Annotated[Union[OutputOutcomeTimeout, OutputOutcomeExit], PropertyInfo(discriminator="type")]
| OutputOutcomeExit |
python | walkccc__LeetCode | solutions/2415. Reverse Odd Levels of Binary Tree/2415.py | {
"start": 0,
"end": 433
} | class ____:
def reverseOddLevels(self, root: TreeNode | None) -> TreeNode | None:
def dfs(left: TreeNode | None, right: TreeNode | None, isOddLevel: bool) -> None:
if not left:
return
if isOddLevel:
left.val, right.val = right.val, left.val
dfs(left.left, right.right, not isOddLevel)
dfs(left.right, right.left, not isOddLevel)
dfs(root.left, root.right, True)
return root
| Solution |
python | kamyu104__LeetCode-Solutions | Python/degree-of-an-array.py | {
"start": 50,
"end": 538
} | class ____(object):
def findShortestSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
counts = collections.Counter(nums)
left, right = {}, {}
for i, num in enumerate(nums):
left.setdefault(num, i)
right[num] = i
degree = max(counts.values())
return min(right[num]-left[num]+1 \
for num in counts.keys() \
if counts[num] == degree)
| Solution |
python | great-expectations__great_expectations | great_expectations/render/view/view.py | {
"start": 16411,
"end": 16500
} | class ____(DefaultJinjaPageView):
_template = "index_page.j2"
| DefaultJinjaIndexPageView |
python | doocs__leetcode | solution/1800-1899/1869.Longer Contiguous Segments of Ones than Zeros/Solution.py | {
"start": 0,
"end": 338
} | class ____:
def checkZeroOnes(self, s: str) -> bool:
def f(x: str) -> int:
cnt = mx = 0
for c in s:
if c == x:
cnt += 1
mx = max(mx, cnt)
else:
cnt = 0
return mx
return f("1") > f("0")
| Solution |
python | gevent__gevent | src/gevent/tests/test___config.py | {
"start": 2461,
"end": 3015
} | class ____(unittest.TestCase):
def test_validate_bool(self):
self.assertTrue(_config.validate_bool('on'))
self.assertTrue(_config.validate_bool('1'))
self.assertFalse(_config.validate_bool('off'))
self.assertFalse(_config.validate_bool('0'))
self.assertFalse(_config.validate_bool(''))
with self.assertRaises(ValueError):
_config.validate_bool(' hmm ')
def test_validate_invalid(self):
with self.assertRaises(ValueError):
_config.validate_invalid(self)
| TestFunctions |
python | pytorch__pytorch | torch/_inductor/fx_passes/graph_view.py | {
"start": 854,
"end": 7447
} | class ____:
"""
A hierarchical class for organizing and managing torch.fx nodes by their module stack.
This class provides a tree-like structure where each node in the hierarchy corresponds
to a module or submodule in a traced FX graph. Each `GraphView` instance can hold a list
of FX nodes (`self.data`) belonging to that module scope, maintain a unique set of nodes
(`self.unique_nodes`), and manage its child containers (`self.children`).
Attributes:
name (str): The name of the module or container scope.
klass (type[Any]): The class type associated with this module/container.
data (list[fx.Node]): A list of FX graph nodes belonging to this module.
unique_nodes (OrderedSet[fx.Node]): A deduplicated set of nodes to ensure no duplicates.
children (dict[str, GraphView]): A mapping of child module names to their corresponding GraphView instances.
"""
def __init__(self, name: str, klass: type[Any]) -> None:
self.name: str = name
self.klass: type[Any] = klass
self.data: list[fx.Node] = []
self.unique_nodes: OrderedSet[fx.Node] = OrderedSet()
self.children: dict[str, GraphView] = {}
def add(self, data: fx.Node) -> None:
if data not in self.unique_nodes:
self.data.append(data)
self.unique_nodes.add(data)
def get_child(
self, module_stack: str, klass: Optional[type[Any]] = None
) -> GraphView:
if module_stack not in self.children:
new_stack = GraphView(module_stack, klass or self.klass)
self.children[module_stack] = new_stack
return self.children[module_stack]
def __getitem__(self, name: str) -> GraphView:
return self.children[name]
def __getattr__(self, name: str) -> GraphView:
return self.children[name]
def __repr__(self) -> str:
child_lines: list[str] = []
for name, child in self.children.items():
mod_str = repr(child)
mod_str = _addindent(mod_str, 2)
child_lines.append(f"({name}): {mod_str}")
main_str = f"{self.klass.__name__}("
if child_lines:
main_str += "\n " + "\n ".join(child_lines) + "\n"
main_str += ")"
return main_str
def _clean_stack_name(stack_name: str) -> str:
"""
Clean up FX node's nn_module_stack metadata string to match the module name hierarchies
Example:
Input: "L['self']._modules['layers']['0']._modules['attention']"
Output: "layers.0.attention"
"""
cleaned = re.sub(r"^L\['self'\]\.?", "", stack_name)
parts = re.findall(r"\['([^']+)'\]", cleaned)
return ".".join(parts) if parts else cleaned
def _is_root(stack: str) -> bool:
return stack == ""
def make_graph_view(graph: fx.Graph) -> Optional[GraphView]:
"""
Code from: https://github.com/meta-pytorch/autoparallel/pull/158
Make a graph view from the fx.Graph. This is a tree structure that
represents the module hierarchy of the graph, and enables us to
easily find the nodes that belong to each module, and gives a slightly
easier way of visualize different parts of the graph by extracting
subgraphs that belong to a particular module FQN.
For example, if we have the following model with module hierarchy:
Transformer(
(tok_embeddings): Embedding(128256, 4096)
(layers): ModuleDict(
(0): TransformerBlock(
(attention): Attention(
(wq): Linear(in_features=4096, out_features=4096, bias=False)
(wk): Linear(in_features=4096, out_features=1024, bias=False)
(wv): Linear(in_features=4096, out_features=1024, bias=False)
(wo): Linear(in_features=4096, out_features=4096, bias=False)
(sdpa): ScaledDotProductAttention()
)
(feed_forward): FeedForward(
(w1): Linear(in_features=4096, out_features=14336, bias=False)
(w2): Linear(in_features=14336, out_features=4096, bias=False)
(w3): Linear(in_features=4096, out_features=14336, bias=False)
)
(attention_norm): RMSNorm((4096,), eps=1e-05, elementwise_affine=True)
(ffn_norm): RMSNorm((4096,), eps=1e-05, elementwise_affine=True)
)
)
(norm): RMSNorm((4096,), eps=1e-05, elementwise_affine=True)
(output): Linear(in_features=4096, out_features=128256, bias=False)
)
Then we can get a GraphView for the fx.Graph that enables us to do
graph_view = make_graph_view(graph)
subgraph = get_subgraph_by_path(graph_view, "layers.0")
where subgraph contains all the nodes that belong to this region
"""
nodes: list[fx.Node] = list(graph.nodes)
nodes_by_module_stack_root: GraphView | None = None
for node in nodes:
for module_stack, module_class in _get_module_stack(node):
module_stack = _clean_stack_name(module_stack)
nodes_by_module_stack: GraphView | None = nodes_by_module_stack_root
for name in module_stack.split("."):
if nodes_by_module_stack is None:
nodes_by_module_stack = GraphView(name, module_class)
nodes_by_module_stack_root = nodes_by_module_stack
if _is_root(module_stack):
new_stack: GraphView = nodes_by_module_stack
else:
new_stack = nodes_by_module_stack.get_child(name, module_class)
nodes_by_module_stack = new_stack
nodes_by_module_stack.add(node)
return nodes_by_module_stack_root
def get_subgraph_by_path(
graph_view: GraphView, paths: Union[str, list[str]]
) -> list[fx.Node]:
"""
Get subgraph by path(s).
Args:
graph_view (object): Root graph view object.
paths (str or list of str): Path(s) to subgraph.
Returns:
list[fx.Node]: fx nodes belong to the subgraph
"""
def get_node_by_path(node: GraphView, path: str) -> GraphView:
for p in path.split("."):
if p in node.children:
node = node.children[p]
else:
return GraphView("", object)
return node
if isinstance(paths, list):
nodes = list(
itertools.chain.from_iterable(
get_node_by_path(graph_view, p).data for p in paths
)
)
return nodes
else:
node = get_node_by_path(graph_view, paths)
return node.data
| GraphView |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 189927,
"end": 191637
} | class ____(Operation):
def __init__(self, axis=0, *, name=None):
super().__init__(name=name)
self.axis = axis
def call(self, x):
return backend.numpy.stack(x, axis=self.axis)
def compute_output_spec(self, x):
first_shape = x[0].shape
dtypes_to_resolve = []
for a in x:
if not shape_equal(a.shape, first_shape, axis=[], allow_none=True):
raise ValueError(
"Every value in `x` must have the same shape. But found "
f"element of shape {a.shape}, which is different from the "
f"first element's shape {first_shape}."
)
dtypes_to_resolve.append(getattr(a, "dtype", type(a)))
size_on_axis = len(x)
output_shape = list(first_shape)
if self.axis == -1:
output_shape = output_shape + [size_on_axis]
elif self.axis >= 0:
output_shape.insert(self.axis, size_on_axis)
else:
output_shape.insert(self.axis + 1, size_on_axis)
output_dtype = dtypes.result_type(*dtypes_to_resolve)
return KerasTensor(output_shape, dtype=output_dtype)
@keras_export(["keras.ops.stack", "keras.ops.numpy.stack"])
def stack(x, axis=0):
"""Join a sequence of tensors along a new axis.
The `axis` parameter specifies the index of the new axis in the
dimensions of the result.
Args:
x: A sequence of tensors.
axis: Axis along which to stack. Defaults to `0`.
Returns:
The stacked tensor.
"""
if any_symbolic_tensors((x,)):
return Stack(axis=axis).symbolic_call(x)
return backend.numpy.stack(x, axis=axis)
| Stack |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarTuple1.py | {
"start": 289,
"end": 959
} | class ____(Generic[_T, Unpack[_Xs]]):
def __init__(self, *args: Unpack[_Xs]) -> None:
reveal_type(args, expected_text="tuple[*_Xs@ClassA]")
# This should generate two errors.
def func2(self) -> Union[_Xs]: ...
def func3(self) -> tuple[Unpack[_Xs]]: ...
# This should generate an error.
def func4(self) -> tuple[_Xs]: ...
def func5(self) -> "ClassA[int, str, Unpack[_Xs]]": ...
# This should be an error because list doesn't accept a variadic TypeVar.
x: list[_Xs] = []
# This should generate an error.
y: _Xs = ()
# This should generate an error.
z: tuple[_Xs, ...]
# This should generate an error.
| ClassA |
python | Pylons__pyramid | tests/test_authentication.py | {
"start": 67342,
"end": 67478
} | class ____:
def __init__(self, cookie):
self.cookie = cookie
def get(self, name):
return self.cookie
| DummyCookies |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 668458,
"end": 669182
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for GitActor."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("GitActorEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of(GitActor), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| GitActorConnection |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/enum1.py | {
"start": 5728,
"end": 5945
} | class ____(Enum):
(A, B, C) = range(3)
te11_A = TestEnum11.A
reveal_type(te11_A, expected_text="Literal[TestEnum11.A]")
reveal_type(te11_A.value, expected_text="int")
def func3(self) -> None:
pass
| TestEnum11 |
python | ray-project__ray | python/ray/train/lightgbm/_lightgbm_utils.py | {
"start": 4744,
"end": 7543
} | class ____(RayReportCallback):
"""Creates a callback that reports metrics and checkpoints model.
Args:
metrics: Metrics to report. If this is a list,
each item should be a metric key reported by LightGBM,
and it will be reported to Ray Train/Tune under the same name.
This can also be a dict of {<key-to-report>: <lightgbm-metric-key>},
which can be used to rename LightGBM default metrics.
filename: Customize the saved checkpoint file type by passing
a filename. Defaults to "model.txt".
frequency: How often to save checkpoints, in terms of iterations.
Defaults to 0 (no checkpoints are saved during training).
checkpoint_at_end: Whether or not to save a checkpoint at the end of training.
results_postprocessing_fn: An optional Callable that takes in
the metrics dict that will be reported (after it has been flattened)
and returns a modified dict.
Examples
--------
Reporting checkpoints and metrics to Ray Tune when running many
independent LightGBM trials (without data parallelism within a trial).
.. testcode::
:skipif: True
import lightgbm
from ray.train.lightgbm import RayTrainReportCallback
config = {
# ...
"metric": ["binary_logloss", "binary_error"],
}
# Report only log loss to Tune after each validation epoch.
bst = lightgbm.train(
...,
callbacks=[
RayTrainReportCallback(
metrics={"loss": "eval-binary_logloss"}, frequency=1
)
],
)
Loading a model from a checkpoint reported by this callback.
.. testcode::
:skipif: True
from ray.train.lightgbm import RayTrainReportCallback
# Get a `Checkpoint` object that is saved by the callback during training.
result = trainer.fit()
booster = RayTrainReportCallback.get_model(result.checkpoint)
"""
@contextmanager
def _get_checkpoint(self, model: Booster) -> Optional[Checkpoint]:
if ray.train.get_context().get_world_rank() in (0, None):
with tempfile.TemporaryDirectory() as temp_checkpoint_dir:
model.save_model(Path(temp_checkpoint_dir, self._filename).as_posix())
yield Checkpoint.from_directory(temp_checkpoint_dir)
else:
yield None
def _save_and_report_checkpoint(self, report_dict: Dict, model: Booster):
with self._get_checkpoint(model=model) as checkpoint:
ray.train.report(report_dict, checkpoint=checkpoint)
def _report_metrics(self, report_dict: Dict):
ray.train.report(report_dict)
| RayTrainReportCallback |
python | urllib3__urllib3 | src/urllib3/fields.py | {
"start": 4542,
"end": 10829
} | class ____:
"""
A data container for request body parameters.
:param name:
The name of this request field. Must be unicode.
:param data:
The data/value body.
:param filename:
An optional filename of the request field. Must be unicode.
:param headers:
An optional dict-like object of headers to initially use for the field.
.. versionchanged:: 2.0.0
The ``header_formatter`` parameter is deprecated and will
be removed in urllib3 v2.1.0.
"""
def __init__(
self,
name: str,
data: _TYPE_FIELD_VALUE,
filename: str | None = None,
headers: typing.Mapping[str, str] | None = None,
header_formatter: typing.Callable[[str, _TYPE_FIELD_VALUE], str] | None = None,
):
self._name = name
self._filename = filename
self.data = data
self.headers: dict[str, str | None] = {}
if headers:
self.headers = dict(headers)
if header_formatter is not None:
import warnings
warnings.warn(
"The 'header_formatter' parameter is deprecated and "
"will be removed in urllib3 v2.1.0.",
DeprecationWarning,
stacklevel=2,
)
self.header_formatter = header_formatter
else:
self.header_formatter = format_multipart_header_param
@classmethod
def from_tuples(
cls,
fieldname: str,
value: _TYPE_FIELD_VALUE_TUPLE,
header_formatter: typing.Callable[[str, _TYPE_FIELD_VALUE], str] | None = None,
) -> RequestField:
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
filename: str | None
content_type: str | None
data: _TYPE_FIELD_VALUE
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(
fieldname, data, filename=filename, header_formatter=header_formatter
)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name: str, value: _TYPE_FIELD_VALUE) -> str:
"""
Override this method to change how each multipart header
parameter is formatted. By default, this calls
:func:`format_multipart_header_param`.
:param name:
The name of the parameter, an ASCII-only ``str``.
:param value:
The value of the parameter, a ``str`` or UTF-8 encoded
``bytes``.
:meta public:
"""
return self.header_formatter(name, value)
def _render_parts(
self,
header_parts: (
dict[str, _TYPE_FIELD_VALUE | None]
| typing.Sequence[tuple[str, _TYPE_FIELD_VALUE | None]]
),
) -> str:
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
iterable: typing.Iterable[tuple[str, _TYPE_FIELD_VALUE | None]]
parts = []
if isinstance(header_parts, dict):
iterable = header_parts.items()
else:
iterable = header_parts
for name, value in iterable:
if value is not None:
parts.append(self._render_part(name, value))
return "; ".join(parts)
def render_headers(self) -> str:
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ["Content-Disposition", "Content-Type", "Content-Location"]
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append(f"{sort_key}: {self.headers[sort_key]}")
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append(f"{header_name}: {header_value}")
lines.append("\r\n")
return "\r\n".join(lines)
def make_multipart(
self,
content_disposition: str | None = None,
content_type: str | None = None,
content_location: str | None = None,
) -> None:
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_disposition:
The 'Content-Disposition' of the request body. Defaults to 'form-data'
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
content_disposition = (content_disposition or "form-data") + "; ".join(
[
"",
self._render_parts(
(("name", self._name), ("filename", self._filename))
),
]
)
self.headers["Content-Disposition"] = content_disposition
self.headers["Content-Type"] = content_type
self.headers["Content-Location"] = content_location
| RequestField |
python | django__django | tests/admin_filters/tests.py | {
"start": 8533,
"end": 8656
} | class ____(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__In,)
| DecadeFilterBookAdminParameterEndsWith__In |
python | getsentry__sentry | tests/sentry/notifications/test_class_manager.py | {
"start": 266,
"end": 760
} | class ____(TestCase):
def tearDown(self) -> None:
manager.classes.pop("AnotherDummyNotification", None)
def test_register(self) -> None:
register()(AnotherDummyNotification)
assert get("AnotherDummyNotification") == AnotherDummyNotification
def test_duplicate_register(self) -> None:
register()(AnotherDummyNotification)
with pytest.raises(NotificationClassAlreadySetException):
register()(AnotherDummyNotification)
| ClassManagerTest |
python | sympy__sympy | sympy/functions/combinatorial/numbers.py | {
"start": 59557,
"end": 61428
} | class ____(DefinedFunction):
r"""
Calculate the unitary divisor function `\sigma_k^*(n)` for positive integer n
``udivisor_sigma(n, k)`` is equal to ``sum([x**k for x in udivisors(n)])``
If n's prime factorization is:
.. math ::
n = \prod_{i=1}^\omega p_i^{m_i},
then
.. math ::
\sigma_k^*(n) = \prod_{i=1}^\omega (1+ p_i^{m_ik}).
Parameters
==========
k : power of divisors in the sum
for k = 0, 1:
``udivisor_sigma(n, 0)`` is equal to ``udivisor_count(n)``
``udivisor_sigma(n, 1)`` is equal to ``sum(udivisors(n))``
Default for k is 1.
Examples
========
>>> from sympy.functions.combinatorial.numbers import udivisor_sigma
>>> udivisor_sigma(18, 0)
4
>>> udivisor_sigma(74, 1)
114
>>> udivisor_sigma(36, 3)
47450
>>> udivisor_sigma(111)
152
See Also
========
sympy.ntheory.factor_.divisor_count, totient, sympy.ntheory.factor_.divisors,
sympy.ntheory.factor_.udivisors, sympy.ntheory.factor_.udivisor_count, divisor_sigma,
sympy.ntheory.factor_.factorint
References
==========
.. [1] https://mathworld.wolfram.com/UnitaryDivisorFunction.html
"""
is_integer = True
is_positive = True
@classmethod
def eval(cls, n, k=S.One):
if n.is_integer is False:
raise TypeError("n should be an integer")
if n.is_positive is False:
raise ValueError("n should be a positive integer")
if k.is_integer is False:
raise TypeError("k should be an integer")
if k.is_nonnegative is False:
raise ValueError("k should be a nonnegative integer")
if n.is_prime is True:
return 1 + n**k
if n.is_Integer:
return Mul(*[1+p**(k*e) for p, e in factorint(n).items()])
| udivisor_sigma |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_blocks/test_tab.py | {
"start": 2703,
"end": 5948
} | class ____(util.MdCase):
"""Combine header slug with content tab."""
extension = ['pymdownx.blocks.tab', 'toc', 'pymdownx.blocks.details']
extension_configs = {
'pymdownx.blocks.tab': {
'slugify': slugify(case='lower'),
'combine_header_slug': True,
'alternate_style': True
}
}
def test_combine_header_slug(self):
"""Test that slugs are a combination of the header slug and the tab title."""
md = R"""
### Here is some text
/// tab | First Tab
content
///
### Another header
/// details | title
//// tab | Second Tab
content
////
///
"""
self.check_markdown(
md,
'''
<h3 id="here-is-some-text">Here is some text</h3>
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="here-is-some-text-first-tab" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="here-is-some-text-first-tab">First Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<p>content</p>
</div>
</div>
</div>
<h3 id="another-header">Another header</h3>
<details>
<summary>title</summary>
<div class="tabbed-set tabbed-alternate" data-tabs="2:1"><input checked="checked" id="another-header-second-tab" name="__tabbed_2" type="radio" /><div class="tabbed-labels"><label for="another-header-second-tab">Second Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<p>content</p>
</div>
</div>
</div>
</details>
''', # noqa: E501
True
)
def test_no_header(self):
"""Test when there is no header."""
md = R"""
/// tab | A Tab
content
///
"""
self.check_markdown(
md,
'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="a-tab" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="a-tab">A Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<p>content</p>
</div>
</div>
</div>
''', # noqa: E501
True
)
def test_header_after(self):
"""Test when header comes after."""
md = R"""
/// tab | A Tab
content
///
# Header
"""
self.check_markdown(
md,
'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="a-tab" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="a-tab">A Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<p>content</p>
</div>
</div>
</div>
<h1 id="header">Header</h1>
''', # noqa: E501
True
)
| TestTabSlugsCombineHeader |
python | plotly__plotly.py | plotly/utils.py | {
"start": 1244,
"end": 3189
} | class ____(object):
"""
Helper class that wraps values of certain types and produces a custom
__repr__() that may be elided and is suitable for use during pretty
printing
"""
def __init__(self, v, threshold, indent):
self.v = v
self.indent = indent
self.threshold = threshold
@staticmethod
def is_wrappable(v):
numpy = get_module("numpy")
if isinstance(v, (list, tuple)) and len(v) > 0 and not isinstance(v[0], dict):
return True
elif numpy and isinstance(v, numpy.ndarray):
return True
elif isinstance(v, str):
return True
else:
return False
def __repr__(self):
numpy = get_module("numpy")
if isinstance(self.v, (list, tuple)):
# Handle lists/tuples
res = _list_repr_elided(
self.v, threshold=self.threshold, indent=self.indent
)
return res
elif numpy and isinstance(self.v, numpy.ndarray):
# Handle numpy arrays
# Get original print opts
orig_opts = numpy.get_printoptions()
# Set threshold to self.max_list_elements
numpy.set_printoptions(
**dict(orig_opts, threshold=self.threshold, edgeitems=3, linewidth=80)
)
res = self.v.__repr__()
# Add indent to all but the first line
res_lines = res.split("\n")
res = ("\n" + " " * self.indent).join(res_lines)
# Restore print opts
numpy.set_printoptions(**orig_opts)
return res
elif isinstance(self.v, str):
# Handle strings
if len(self.v) > 80:
return "(" + repr(self.v[:30]) + " ... " + repr(self.v[-30:]) + ")"
else:
return self.v.__repr__()
else:
return self.v.__repr__()
| ElidedWrapper |
python | falconry__falcon | falcon/inspect.py | {
"start": 10862,
"end": 11525
} | class ____(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (list[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: list[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
| RouteInfo |
python | google__jax | jax/experimental/pallas/ops/tpu/splash_attention/splash_attention_kernel.py | {
"start": 1618,
"end": 15144
} | class ____(NamedTuple):
"""SegmentIds for Q and KV sequences.
SegmentIds are a mechanism to ensure that there is no cross-attention between
segments (fraction of a sequence) that have been concatenated together into a
sequence. Each array is a list of ids (integers). Only tokens with the same
id are allowed to attend to each other.
The static mask (e.g. causal) is "and-ed" with the segment id mask to form
the actual attention mask. It is important that the latter does not have any
all-zero rows (along dimension kv). Otherwise it would result in a invalid
softmax (the denominator would be 0).
This condition holds for causal self-attention because in this case segment
ids form a block diagonal matrix so at least one element in each row is set.
It is easy to break this condition with non-self-attention configurations.
Attributes:
q: segment ids along the Q sequence
kv: segment ids along the KV sequence
"""
q: jax.Array # [q_seq_len]
kv: jax.Array # [kv_seq_len]
# Return type of SplashAttention function that implements the custom vjp rule.
SplashCustomReturnType = Union[
# out, no residuals
jax.Array,
# out, residuals:
tuple[jax.Array, tuple[jax.Array,]]
]
SplashResidualsType = tuple[
jax.Array, # q
jax.Array, # k
jax.Array, # v
Optional[SegmentIds], # segment_ids
Optional[jax.Array], # sinks
jax.Array, # out
jax.Array, # logsumexp
Optional[mask_info_lib.MaskInfo], # dq_mask_info
Optional[mask_info_lib.MaskInfo], # dkv_mask_info
]
MaskFunctionType = Callable[..., jax.Array]
def get_kernel_name(
block_metadata: Mapping[str, Any],
is_mqa: bool,
save_residuals: bool,
is_segmented: bool,
phase: str,
) -> str:
"""Returns a unique name for all SplashAttention kernel variants."""
assert phase == "dq" or phase == "dkv" or phase == "fwd"
# Saving residuals is supported only for the fwd phase.
assert not save_residuals or phase == "fwd"
residuals = ""
if save_residuals:
residuals = "_residuals"
elif phase == "fwd":
residuals = "_no_residuals"
attention_type = "mqa" if is_mqa else "mha"
segments = "_segmented" if is_segmented else ""
return f"splash_{attention_type}_{phase}{segments}{residuals}_" + "_".join(
f"{k}={v}" for k, v in sorted(block_metadata.items())
)
# Reference attention implementations
@overload
def _attention_reference(
mask: jax.Array,
q: jax.Array,
k: jax.Array,
v: jax.Array,
segment_ids: SegmentIds | None,
sinks: jax.Array | None,
save_residuals: Literal[False],
mask_value: float,
custom_type: str,
attn_logits_soft_cap: float | None,
) -> jax.Array:
...
@overload
def _attention_reference(
mask: jax.Array,
q: jax.Array,
k: jax.Array,
v: jax.Array,
segment_ids: SegmentIds | None,
sinks: jax.Array | None,
save_residuals: Literal[True],
mask_value: float,
custom_type: str,
attn_logits_soft_cap: float | None,
) -> tuple[jax.Array, tuple[jax.Array]]:
...
def _attention_reference(
mask: jax.Array, # [q_seq_len, kv_seq_len]
q: jax.Array, # [q_seq_len, head_dim]
k: jax.Array, # [kv_seq_len, head_dim]
v: jax.Array, # [kv_seq_len, head_dim]
segment_ids: SegmentIds | None,
sinks: jax.Array | None,
mask_value: float,
save_residuals: bool,
custom_type: str,
attn_logits_soft_cap: float | None,
):
return _attention_reference_default( # pytype: disable=bad-return-type
mask,
q,
k,
v,
segment_ids,
sinks,
mask_value,
save_residuals,
custom_type,
attn_logits_soft_cap,
)
def _attention_reference_default(
mask: jax.Array, # [q_seq_len, kv_seq_len]
q: jax.Array, # [q_seq_len, head_dim]
k: jax.Array, # [kv_seq_len, head_dim]
v: jax.Array, # [kv_seq_len, head_dim]
segment_ids: SegmentIds | None,
sinks: jax.Array | None, # [] one scalar per qhead
mask_value: float,
save_residuals: bool,
custom_type: str,
attn_logits_soft_cap: float | None,
):
del custom_type
logits = jnp.einsum("sd,td->st", q.astype(jnp.float32), k.astype(jnp.float32))
if segment_ids is not None:
mask = jnp.logical_and(
mask, segment_ids.q[:, None] == segment_ids.kv[None, :]
)
if attn_logits_soft_cap is not None:
logits = jnp.tanh(logits / attn_logits_soft_cap)
logits = logits * attn_logits_soft_cap
logits = jnp.where(mask, logits, mask_value)
m = logits.max(axis=-1)
sinks = None if sinks is None else sinks.astype(logits.dtype)
m = m if sinks is None else jnp.maximum(m, sinks)
s = jnp.exp(logits - m[..., None])
l = s.sum(axis=-1) + (0 if sinks is None else jnp.exp(sinks - m))
s = s / l[..., None]
o = jnp.einsum("st,td->sd", s, v.astype(jnp.float32))
logsumexp = m + jnp.log(l)
if save_residuals:
return o, (logsumexp,)
return o
def attention_reference(
mask: jax.Array, # [q_seq_len, kv_seq_len]
q: jax.Array, # [q_seq_len, head_dim]
k: jax.Array, # [kv_seq_len, head_dim]
v: jax.Array, # [kv_seq_len, head_dim]
segment_ids: SegmentIds | None,
sinks: jax.Array | None = None,
*,
mask_value: float = DEFAULT_MASK_VALUE,
save_residuals: bool = False,
custom_type: str = "flash",
attn_logits_soft_cap: float | None = None,
) -> SplashCustomReturnType:
return _attention_reference( # pytype: disable=wrong-arg-types
mask,
q,
k,
v,
segment_ids,
sinks,
mask_value=mask_value,
save_residuals=save_residuals,
custom_type=custom_type,
attn_logits_soft_cap=attn_logits_soft_cap,
)
def _attention_reference_custom_fwd(
mask: jax.Array, # [q_seq_len, kv_seq_len]
q: jax.Array, # [q_seq_len, head_dim]
k: jax.Array, # [kv_seq_len, head_dim]
v: jax.Array, # [kv_seq_len, head_dim]
segment_ids: SegmentIds | None,
sinks: jax.Array | None,
mask_value: float,
save_residuals: bool,
custom_type: str,
attn_logits_soft_cap: float | None,
):
if save_residuals:
raise NotImplementedError("Higher-order AD not supported.")
o, (logsumexp,) = _attention_reference(
mask,
q,
k,
v,
segment_ids,
sinks,
mask_value=mask_value,
save_residuals=True,
custom_type=custom_type,
attn_logits_soft_cap=attn_logits_soft_cap,
)
return o, (mask, q, k, v, segment_ids, sinks, o, logsumexp)
def _attention_reference_custom_bwd(
mask_value: float,
save_residuals: bool,
custom_type: str,
attn_logits_soft_cap: float | None,
res,
do: jax.Array,
) -> tuple[None, jax.Array, jax.Array, jax.Array, None, jax.Array | None]:
del save_residuals
mask, q, k, v, segment_ids, sinks, o, logsumexp = res
uncapped_logits = jnp.einsum(
"qc,kc->qk", q, k, preferred_element_type=jnp.float32)
if attn_logits_soft_cap is not None:
logits = jnp.tanh(uncapped_logits / attn_logits_soft_cap)
logits = logits * attn_logits_soft_cap
else:
logits = uncapped_logits
if segment_ids is not None:
mask = jnp.logical_and(
mask, segment_ids.q[:, None] == segment_ids.kv[None, :]
)
logits = jnp.where(mask, logits, mask_value)
p = jnp.exp(logits - logsumexp[..., None])
do = do.astype(jnp.float32) # pytype: disable=attribute-error
dv = jnp.einsum("pt,pd->td", p, do).astype(v.dtype)
dp = jnp.einsum("pd,td->pt", do, v.astype(jnp.float32))
# These two ways of computing ds are mathematically equivalent. The first
# involves reducing over the head_dim dimension and the second involves
# reducing over a sequence dimension. They tend to produce slightly different
# numerics.
if custom_type == "flash":
di = jnp.sum(o.astype(jnp.float32) * do, axis=-1)[..., None]
else:
di = jnp.einsum("st,st->s", dp, p)[:, None]
ds = (dp - di) * p
if attn_logits_soft_cap is not None:
normalized = uncapped_logits / attn_logits_soft_cap
d = jnp.tanh(normalized)
g = ds * (1 - d)
ds = g + g * d
dk = jnp.einsum("sd,st->td", q.astype(jnp.float32), ds).astype(k.dtype)
dq = jnp.einsum("st,td->sd", ds, k.astype(jnp.float32)).astype(q.dtype)
dsinks = None
if sinks is not None: # the gradient is ``sum(-exp(s) / exp(lse) * o * do)``
sinks_exp = -jnp.exp(sinks[..., None, None].astype(jnp.float32)
- logsumexp[..., None].astype(jnp.float32))
dsinks = jnp.sum(sinks_exp.astype(o.dtype) * do * o)
return None, dq, dk, dv, None, dsinks
_attention_reference_custom = jax.custom_vjp(
_attention_reference, nondiff_argnames=(
"mask_value", "save_residuals", "custom_type", "attn_logits_soft_cap")
)
_attention_reference_custom.defvjp(_attention_reference_custom_fwd,
_attention_reference_custom_bwd)
def attention_reference_custom(
mask: jax.Array, # [q_seq_len, kv_seq_len]
q: jax.Array, # [q_seq_len, head_dim]
k: jax.Array, # [kv_seq_len, head_dim]
v: jax.Array, # [kv_seq_len, head_dim]
segment_ids: SegmentIds | None,
sinks: jax.Array | None = None,
*,
mask_value: float = DEFAULT_MASK_VALUE,
save_residuals: bool = False,
custom_type: str = "flash",
attn_logits_soft_cap: float | None = None,
):
return _attention_reference_custom(
mask,
q,
k,
v,
segment_ids,
sinks,
mask_value,
save_residuals,
custom_type=custom_type,
attn_logits_soft_cap=attn_logits_soft_cap,
)
def make_attention_reference(
mask: mask_lib.Mask | np.ndarray,
is_mqa: bool,
backward_impl: str = "vanilla",
**params: Any,
) -> Callable:
@partial(
jax.jit,
static_argnames=[
"mask_value",
"save_residuals",
"attn_logits_soft_cap",
],
)
def _wrapped(
mask: jax.Array,
q: jax.Array,
k: jax.Array,
v: jax.Array,
segment_ids: SegmentIds | None = None,
sinks: jax.Array | None = None,
*,
mask_value: float = DEFAULT_MASK_VALUE,
save_residuals: bool = False,
attn_logits_soft_cap: float | None = None,
):
if backward_impl == "custom":
attn_impl = partial(
attention_reference_custom, custom_type="flash",
)
elif backward_impl == "custom_vanilla":
attn_impl = partial(
attention_reference_custom, custom_type="vanilla",
)
else:
attn_impl = attention_reference
func = partial(
attn_impl,
mask_value=mask_value,
save_residuals=save_residuals,
attn_logits_soft_cap=attn_logits_soft_cap,
**params,
)
if is_mqa:
func = jax.vmap(func, in_axes=(0, 0, None, None, None, 0))
is_grouped = False
else:
# In grouped attention (1 < num_kv_heads && num_kv_heads < num_q_heads).
# We interleave the KV heads across the Q heads.
# For example: for 8 Q heads and 4 KV heads:
# Q head [0, 1] see KV head 0
# Q head [2, 3] see KV head 1
# Q head [4, 5] see KV head 2
# Q head [6, 7] see KV head 3
#
# The following implementation reshapes Q to expose KV heads and vmaps
# Across the Q heads so it is similar to MQA.
# Alternatively we can replicate K/V to match Q like so:
# k = jnp.repeat(k, q_heads_per_kv_head, axis=0)
# v = jnp.repeat(v, q_heads_per_kv_head, axis=0)
kv_heads = k.shape[0]
assert kv_heads == v.shape[0]
q_heads, q_seq_len, head_dim = q.shape
is_grouped = kv_heads < q_heads
if is_grouped:
assert q_heads % kv_heads == 0
assert mask.shape[0] == q_heads
q_heads_per_kv_head = q_heads // kv_heads
q = q.reshape((kv_heads, q_heads_per_kv_head, q_seq_len, head_dim))
mask = mask.reshape((kv_heads, q_heads_per_kv_head, *mask.shape[1:]))
if sinks is not None:
sinks = sinks.reshape((kv_heads, q_heads_per_kv_head))
# Inner-most vmap: iterate over the q heads.
func = jax.vmap(func, in_axes=(0, 0, None, None, None, 0))
# Outer-most vmap: iterate over the kv heads.
func = jax.vmap(func, in_axes=(0, 0, 0, 0, None, 0))
out = func(mask, q, k, v, segment_ids, sinks)
if is_grouped:
def reshape_activations(activations):
if activations.ndim == 4: # pytype: disable=attribute-error
kv_heads, q_heads_per_kv_head, q_seq_len, head_dim = activations.shape # pytype: disable=attribute-error
return activations.reshape( # pytype: disable=attribute-error
kv_heads * q_heads_per_kv_head, q_seq_len, head_dim
)
return activations
def reshape_residuals(residuals):
if residuals.ndim == 3:
kv_heads, q_heads_per_kv_head, q_seq_len = residuals.shape
return residuals.reshape(kv_heads * q_heads_per_kv_head, q_seq_len)
return residuals
if save_residuals:
assert isinstance(out, tuple)
assert isinstance(out[1], tuple)
return (reshape_activations(out[0]), (reshape_residuals(out[1][0]),))
else:
return reshape_activations(out)
else:
return out
return functools.partial(_wrapped, jnp.array(mask[:, :, :]))
make_masked_mha_reference = partial(make_attention_reference, is_mqa=False)
make_masked_mqa_reference = partial(make_attention_reference, is_mqa=True)
# Splash attention implementation
# We use an IntEnum to make it JSON serializable as regen metadata.
| SegmentIds |
python | run-llama__llama_index | llama-index-utils/llama-index-utils-qianfan/llama_index/utils/qianfan/apis.py | {
"start": 925,
"end": 2591
} | class ____(BaseModel):
"""
Response for Querying the List of Model Serving.
"""
result: ServiceListResult
"""All model available service items."""
def get_service_list(
access_key: str, secret_key: str, api_type_filter: Sequence[APIType] = []
):
"""
Get a list of available model services. Can be filtered by api type.
"""
url = "https://qianfan.baidubce.com/wenxinworkshop/service/list"
json = {"apiTypefilter": api_type_filter}
client = Client(access_key, secret_key)
resp_dict = client.post(url, json=json)
resp = ServiceListResponse(**resp_dict)
common_services = filter(
lambda service: service.charge_status == "OPENED", resp.result.common
)
custom_services = filter(
lambda service: service.charge_status == "OPENED", resp.result.custom
)
return list(common_services) + list(custom_services)
async def aget_service_list(
access_key: str, secret_key: str, api_type_filter: Sequence[APIType] = []
):
"""
Asynchronous get a list of available model services. Can be filtered by api type.
"""
url = "https://qianfan.baidubce.com/wenxinworkshop/service/list"
json = {"apiTypefilter": api_type_filter}
client = Client(access_key, secret_key)
resp_dict = await client.apost(url, json=json)
resp = ServiceListResponse(**resp_dict)
common_services = filter(
lambda service: service.charge_status == "OPENED", resp.result.common
)
custom_services = filter(
lambda service: service.charge_status == "OPENED", resp.result.custom
)
return list(common_services) + list(custom_services)
| ServiceListResponse |
python | redis__redis-py | tests/test_maint_notifications.py | {
"start": 31234,
"end": 31731
} | class ____:
"""Test the EndpointType class functionality."""
def test_endpoint_type_constants(self):
"""Test that the EndpointType constants are correct."""
assert EndpointType.INTERNAL_IP.value == "internal-ip"
assert EndpointType.INTERNAL_FQDN.value == "internal-fqdn"
assert EndpointType.EXTERNAL_IP.value == "external-ip"
assert EndpointType.EXTERNAL_FQDN.value == "external-fqdn"
assert EndpointType.NONE.value == "none"
| TestEndpointType |
python | rapidsai__cudf | python/cudf/cudf/core/udf/row_function.py | {
"start": 2995,
"end": 5363
} | class ____(ApplyKernelBase):
"""
Class representing a kernel that computes the result of
a DataFrame.apply operation. Expects that the user passed
a function that operates on an input row of the dataframe,
for example
def f(row):
return row['x'] + row['y']
"""
@property
def kernel_type(self):
return "dataframe_apply"
def _get_frame_type(self):
return _get_frame_row_type(
np.dtype(list(_all_dtypes_from_frame(self.frame).items()))
)
def _get_kernel_string(self):
row_type = self._get_frame_type()
# Create argument list for kernel
frame = _supported_cols_from_frame(self.frame)
input_columns = ", ".join(
[f"input_col_{i}" for i in range(len(frame))]
)
input_offsets = ", ".join([f"offset_{i}" for i in range(len(frame))])
extra_args = ", ".join(
[f"extra_arg_{i}" for i in range(len(self.args))]
)
# Generate the initializers for each device function argument
initializers = []
row_initializers = []
for i, (colname, col) in enumerate(frame.items()):
idx = str(i)
template = (
masked_input_initializer_template
if col.mask is not None
else unmasked_input_initializer_template
)
initializers.append(template.format(idx=idx))
row_initializers.append(
row_initializer_template.format(idx=idx, name=colname)
)
return row_kernel_template.format(
input_columns=input_columns,
input_offsets=input_offsets,
extra_args=extra_args,
masked_input_initializers="\n".join(initializers),
row_initializers="\n".join(row_initializers),
numba_rectype=row_type,
)
@cache
def _get_kernel_string_exec_context(self):
# This is the global execution context that will be used
# to compile the kernel. It contains the function being
# compiled and the cuda module.
row_type = self._get_frame_type()
return {
"cuda": cuda,
"Masked": Masked,
"_mask_get": _mask_get,
"pack_return": pack_return,
"row_type": row_type,
}
| DataFrameApplyKernel |
python | cython__cython | Cython/Compiler/TypeSlots.py | {
"start": 19767,
"end": 20351
} | class ____(SyntheticSlot):
def __init__(self, signature, slot_name, left_method, method_name_to_slot, **kargs):
assert left_method.startswith('__')
right_method = '__r' + left_method[2:]
SyntheticSlot.__init__(
self, slot_name, [left_method, right_method], "0", is_binop=True, **kargs)
# MethodSlot causes special method registration.
self.left_slot = MethodSlot(signature, "", left_method, method_name_to_slot, **kargs)
self.right_slot = MethodSlot(signature, "", right_method, method_name_to_slot, **kargs)
| BinopSlot |
python | PyCQA__pycodestyle | tests/test_blank_lines.py | {
"start": 10158,
"end": 10575
} | class ____(object):
pass
""")
self.assertEqual([
'E303:7:1', # some_function
'E303:18:1', # SomeVeryFarClass
], result)
def test_the_right_blanks(self):
"""
It will accept 3 blank for top level and 2 for nested.
"""
result = errors_from_src("""
def some_function():
pass
# With comments.
some_other = code_here
| AFarEnoughClass |
python | django__django | tests/model_meta/tests.py | {
"start": 1422,
"end": 1843
} | class ____(OptionsBaseTests):
def test_get_fields_is_immutable(self):
msg = IMMUTABLE_WARNING % "get_fields()"
for _ in range(2):
# Running unit test twice to ensure both non-cached and cached
# result are immutable.
fields = Person._meta.get_fields()
with self.assertRaisesMessage(AttributeError, msg):
fields += ["errors"]
| GetFieldsTests |
python | vyperlang__vyper | vyper/venom/check_venom.py | {
"start": 1779,
"end": 6227
} | class ____(VenomError):
message: str = "multi-output assignment only supported for invoke"
def __init__(self, caller: IRFunction, inst: IRInstruction):
self.caller = caller
self.inst = inst
def __str__(self):
bb = self.inst.parent
return f"multi-output on non-invoke in {self.caller.name}:\n" f" {self.inst}\n\n{bb}"
def _handle_var_definition(
fn: IRFunction, bb: IRBasicBlock, var_def: VarDefinition
) -> list[VenomError]:
errors: list[VenomError] = []
for inst in bb.instructions:
if inst.opcode == "phi":
for label, op in inst.phi_operands:
defined = var_def.defined_vars_bb[fn.get_basic_block(label.name)]
if op not in defined:
errors.append(VarNotDefined(var=op, inst=inst))
continue
defined = var_def.defined_vars[inst]
for op in inst.operands:
if isinstance(op, IRVariable):
if op not in defined:
errors.append(VarNotDefined(var=op, inst=inst))
return errors
def find_semantic_errors_fn(fn: IRFunction) -> list[VenomError]:
errors: list[VenomError] = []
# check that all the bbs are terminated
for bb in fn.get_basic_blocks():
if not bb.is_terminated:
errors.append(BasicBlockNotTerminated(basicblock=bb))
if len(errors) > 0:
return errors
ac = IRAnalysesCache(fn)
var_def: VarDefinition = ac.request_analysis(VarDefinition)
for bb in fn.get_basic_blocks():
e = _handle_var_definition(fn, bb, var_def)
errors.extend(e)
return errors
def _collect_ret_arities(context: IRContext) -> dict[IRFunction, set[int]]:
ret_arities: dict[IRFunction, set[int]] = {}
for fn in context.functions.values():
arities: set[int] = set()
for bb in fn.get_basic_blocks():
for inst in bb.instructions:
if inst.opcode == "ret":
# last operand is return PC; all preceding (if any) are return values
arities.add(len(inst.operands) - 1)
ret_arities[fn] = arities
return ret_arities
def find_calling_convention_errors(context: IRContext) -> list[VenomError]:
errors: list[VenomError] = []
# Enforce invoke binding exactly callee arity
ret_arities = _collect_ret_arities(context)
for fn, arities in ret_arities.items():
if len(arities) > 1:
errors.append(InconsistentReturnArity(fn, arities))
for caller in context.functions.values():
for bb in caller.get_basic_blocks():
for inst in bb.instructions:
# Disallow multi-output except on invoke
got_num = inst.num_outputs
if got_num > 1 and inst.opcode != "invoke":
errors.append(MultiOutputNonInvoke(caller, inst))
continue
if inst.opcode != "invoke":
continue
target = inst.operands[0]
assert isinstance(target, IRLabel)
callee = context.get_function(target)
arities = ret_arities[callee]
if len(arities) == 0:
expected_num = 0
elif len(arities) == 1:
expected_num = next(iter(arities))
else:
# a function with InconsistentReturnArity, we already
# checked this above
continue
if got_num != expected_num:
errors.append(InvokeArityMismatch(caller, inst, expected_num, got_num))
return errors
def find_semantic_errors(context: IRContext) -> list[VenomError]:
errors: list[VenomError] = []
# Per-function basic checks (var definitions, bb termination, etc.)
for fn in context.functions.values():
errors.extend(find_semantic_errors_fn(fn))
# Calling convention errors can be reported too if desired
errors.extend(find_calling_convention_errors(context))
return errors
def check_venom_ctx(context: IRContext):
errors = find_semantic_errors(context)
if errors:
raise ExceptionGroup("venom semantic errors", errors)
def check_calling_convention(context: IRContext):
errors = find_calling_convention_errors(context)
if errors:
raise ExceptionGroup("venom calling convention errors", errors)
| MultiOutputNonInvoke |
python | getsentry__sentry | tests/sentry/integrations/utils/test_scope.py | {
"start": 2300,
"end": 5134
} | class ____(TestCase):
@patch("sentry.integrations.utils.scope.bind_organization_context")
def test_binds_org_context_with_single_org(self, mock_bind_org_context: MagicMock) -> None:
org = self.create_organization(slug="dogsaregreat")
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_provider_integration(name="squirrelChasers")
integration.add_organization(org)
bind_org_context_from_integration(integration.id)
with assume_test_silo_mode(SiloMode.REGION):
mock_bind_org_context.assert_called_with(serialize_rpc_organization(org))
@patch("sentry.integrations.utils.scope.bind_ambiguous_org_context")
def test_binds_org_context_with_multiple_orgs(
self, mock_bind_ambiguous_org_context: MagicMock
) -> None:
maisey_org = self.create_organization(slug="themaiseymaiseydog")
charlie_org = self.create_organization(slug="charliebear")
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_provider_integration(name="squirrelChasers")
integration.add_organization(maisey_org)
integration.add_organization(charlie_org)
bind_org_context_from_integration(integration.id)
call_orgs, call_source = mock_bind_ambiguous_org_context.call_args[0]
# Prevent flakiness from random ordering of the org slugs
self.assertListEqual(sorted(call_orgs), sorted([maisey_org.slug, charlie_org.slug]))
assert call_source == f"integration (id={integration.id})"
@patch("sentry.integrations.utils.scope.bind_ambiguous_org_context")
@patch("sentry.integrations.utils.scope.bind_organization_context")
@patch("sentry.integrations.utils.scope.check_tag_for_scope_bleed")
@patch("sentry.integrations.utils.scope.logger.warning")
def test_logs_warning_if_no_orgs_found(
self,
mock_logger_warning: MagicMock,
mock_check_tag_for_scope_bleed: MagicMock,
mock_bind_org_context: MagicMock,
mock_bind_ambiguous_org_context: MagicMock,
) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_provider_integration(name="squirrelChasers")
bind_org_context_from_integration(integration.id, {"webhook": "issue_updated"})
mock_logger_warning.assert_called_with(
"Can't bind org context - no orgs are associated with integration id=%s.",
integration.id,
extra={"webhook": "issue_updated"},
)
mock_check_tag_for_scope_bleed.assert_called_with(
"integration_id", integration.id, add_to_scope=False
)
mock_bind_org_context.assert_not_called()
mock_bind_ambiguous_org_context.assert_not_called()
| BindOrgContextFromIntegrationTest |
python | astropy__astropy | astropy/io/votable/tree.py | {
"start": 116476,
"end": 122514
} | class ____(Element):
"""
MIVOT Block holder:
Processing VO model views on data is out of the scope of Astropy.
This is why the only VOmodel-related feature implemented here the
extraction or the writing of a mapping block from/to a VOTable
There is no syntax validation other than the allowed tag names.
The mapping block is handled as a correctly indented XML string
which is meant to be parsed by the calling API (e.g., PyVO).
The constructor takes "content" as a parameter, it is the string
serialization of the MIVOT block.
If it is None, the instance is meant to be set by the Resource parser.
Orherwise, the parameter value is parsed to make sure it matches
the MIVOT XML structure.
"""
def __init__(self, content=None):
if content is not None:
self._content = content.strip()
self.check_content_format()
else:
self._content = ""
self._indent_level = 0
self._on_error = False
def __str__(self):
return self._content
def _add_statement(self, start, tag, data, config, pos):
"""
Convert the tag as a string and append it to the mapping
block string with the correct indentation level.
The signature is the same as for all _add_* methods of the parser.
"""
if self._on_error is True:
return
# The first mapping tag (<VODML>) is consumed by the host RESOURCE
# To check that the content is a mapping block. This cannot be done here
# because that RESOURCE might have another content
if self._content == "":
self._content = '<VODML xmlns="http://www.ivoa.net/xml/mivot">\n'
self._indent_level += 1
ele_content = ""
if start:
element = "<" + tag
for k, v in data.items():
element += f" {k}='{v}'"
element += ">\n"
else:
if data:
ele_content = f"{data}\n"
element = f"</{tag}>\n"
if start is False:
self._indent_level -= 1
# The content is formatted on the fly: not mandatory but cool for debugging
indent = "".join(" " for _ in range(2 * self._indent_level))
if ele_content:
self._content += indent + " " + ele_content
self._content += indent + element
if start is True:
self._indent_level += 1
def _unknown_mapping_tag(self, start, tag, data, config, pos):
"""
In case of unexpected tag, the parsing stops and the mapping block
is set with a REPORT tag telling what went wrong.
The signature si that same as for all _add_* methods of the parser.
"""
self._content = (
f'<VODML xmlns="http://www.ivoa.net/xml/mivot">\n '
f'<REPORT status="KO">Unknown mivot block statement: {tag}</REPORT>\n</VODML>'
)
self._on_error = True
warn_or_raise(W10, W10, tag, config, pos=pos)
@property
def content(self):
"""
The XML mapping block serialized as string.
If there is not mapping block, an empty block is returned in order to
prevent client code to deal with None blocks.
"""
if self._content == "":
self._content = (
'<VODML xmlns="http://www.ivoa.net/xml/mivot">\n '
'<REPORT status="KO">No Mivot block</REPORT>\n</VODML>\n'
)
return self._content
def parse(self, votable, iterator, config):
"""
Regular parser similar to others VOTable components.
"""
model_mapping_mapping = {
"VODML": self._add_statement,
"GLOBALS": self._add_statement,
"REPORT": self._add_statement,
"MODEL": self._add_statement,
"TEMPLATES": self._add_statement,
"COLLECTION": self._add_statement,
"INSTANCE": self._add_statement,
"ATTRIBUTE": self._add_statement,
"REFERENCE": self._add_statement,
"JOIN": self._add_statement,
"WHERE": self._add_statement,
"PRIMARY_KEY": self._add_statement,
"FOREIGN_KEY": self._add_statement,
}
for start, tag, data, pos in iterator:
model_mapping_mapping.get(tag, self._unknown_mapping_tag)(
start, tag, data, config, pos
)
if start is False and tag == "VODML":
break
return self
def to_xml(self, w):
"""
Tells the writer to insert the MIVOT block in its output stream.
"""
w.string_element(self._content)
def check_content_format(self):
"""
Check if the content is on xml format by building a VOTable,
putting a MIVOT block in the first resource and trying to parse the VOTable.
"""
if not self._content.startswith("<"):
vo_raise(E26)
in_memory_votable = VOTableFile()
mivot_resource = Resource()
mivot_resource.type = "meta"
mivot_resource.mivot_block = self
# pack the meta resource in a top level resource
result_resource = Resource()
result_resource.type = "results"
result_resource.resources.append(mivot_resource)
data_table = TableElement(in_memory_votable)
data_table.name = "t1"
result_resource.tables.append(data_table)
in_memory_votable.resources.append(result_resource)
# Push the VOTable in an IOSTream (emulates a disk saving)
buff = io.BytesIO()
in_memory_votable.to_xml(buff)
# Read the IOStream (emulates a disk readout)
buff.seek(0)
config = {}
with iterparser.get_xml_iterator(
buff, _debug_python_based_parser=None
) as iterator:
return VOTableFile(config=config, pos=(1, 1)).parse(iterator, config)
@deprecated("6.0", alternative="TableElement")
| MivotBlock |
python | kubernetes-client__python | kubernetes/client/models/v1_replica_set_list.py | {
"start": 383,
"end": 7035
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1ReplicaSet]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1ReplicaSetList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1ReplicaSetList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ReplicaSetList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ReplicaSetList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ReplicaSetList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1ReplicaSetList. # noqa: E501
List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset # noqa: E501
:return: The items of this V1ReplicaSetList. # noqa: E501
:rtype: list[V1ReplicaSet]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1ReplicaSetList.
List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset # noqa: E501
:param items: The items of this V1ReplicaSetList. # noqa: E501
:type: list[V1ReplicaSet]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1ReplicaSetList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ReplicaSetList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ReplicaSetList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ReplicaSetList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ReplicaSetList. # noqa: E501
:return: The metadata of this V1ReplicaSetList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ReplicaSetList.
:param metadata: The metadata of this V1ReplicaSetList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ReplicaSetList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ReplicaSetList):
return True
return self.to_dict() != other.to_dict()
| V1ReplicaSetList |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/triggers/dataflow.py | {
"start": 6530,
"end": 11342
} | class ____(BaseTrigger):
"""
Trigger that monitors if a Dataflow job has reached any of the expected statuses.
:param job_id: Required. ID of the job.
:param expected_statuses: The expected state(s) of the operation.
See: https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobState
:param project_id: Required. The Google Cloud project ID in which the job was started.
:param location: Optional. The location where the job is executed. If set to None then
the value of DEFAULT_DATAFLOW_LOCATION will be used.
:param gcp_conn_id: The connection ID to use for connecting to Google Cloud.
:param poll_sleep: Time (seconds) to wait between two consecutive calls to check the job.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
def __init__(
self,
job_id: str,
expected_statuses: set[str],
project_id: str | None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
poll_sleep: int = 10,
impersonation_chain: str | Sequence[str] | None = None,
):
super().__init__()
self.job_id = job_id
self.expected_statuses = expected_statuses
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.poll_sleep = poll_sleep
self.impersonation_chain = impersonation_chain
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize class arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.dataflow.DataflowJobStatusTrigger",
{
"job_id": self.job_id,
"expected_statuses": self.expected_statuses,
"project_id": self.project_id,
"location": self.location,
"gcp_conn_id": self.gcp_conn_id,
"poll_sleep": self.poll_sleep,
"impersonation_chain": self.impersonation_chain,
},
)
async def run(self):
"""
Loop until the job reaches an expected or terminal state.
Yields a TriggerEvent with success status, if the client returns an expected job status.
Yields a TriggerEvent with error status, if the client returns an unexpected terminal
job status or any exception is raised while looping.
In any other case the Trigger will wait for a specified amount of time
stored in self.poll_sleep variable.
"""
try:
while True:
job_status = await self.async_hook.get_job_status(
job_id=self.job_id,
project_id=self.project_id,
location=self.location,
)
if job_status.name in self.expected_statuses:
yield TriggerEvent(
{
"status": "success",
"message": f"Job with id '{self.job_id}' has reached an expected state: {job_status.name}",
}
)
return
elif job_status.name in DataflowJobStatus.TERMINAL_STATES:
yield TriggerEvent(
{
"status": "error",
"message": f"Job with id '{self.job_id}' is already in terminal state: {job_status.name}",
}
)
return
self.log.info("Sleeping for %s seconds.", self.poll_sleep)
await asyncio.sleep(self.poll_sleep)
except Exception as e:
self.log.error("Exception occurred while checking for job status!")
yield TriggerEvent(
{
"status": "error",
"message": str(e),
}
)
@cached_property
def async_hook(self) -> AsyncDataflowHook:
return AsyncDataflowHook(
gcp_conn_id=self.gcp_conn_id,
poll_sleep=self.poll_sleep,
impersonation_chain=self.impersonation_chain,
)
| DataflowJobStatusTrigger |
python | Lightning-AI__lightning | src/lightning/pytorch/strategies/ddp.py | {
"start": 19259,
"end": 20037
} | class ____(_ForwardRedirection):
@override
def on_after_inner_forward(self, wrapper_module: Module, original_module: "pl.LightningModule") -> None:
# In manual_optimization, we need to prevent DDP reducer as
# it is done manually in `LightningModule.manual_backward`
if isinstance(wrapper_module, DistributedDataParallel) and not original_module.automatic_optimization:
wrapper_module.require_backward_grad_sync = False
@override
def on_after_outer_forward(self, wrapper_module: Module, original_module: "pl.LightningModule") -> None:
if isinstance(wrapper_module, DistributedDataParallel) and not original_module.automatic_optimization:
wrapper_module.require_backward_grad_sync = True
| _DDPForwardRedirection |
python | PrefectHQ__prefect | src/prefect/settings/models/testing.py | {
"start": 225,
"end": 1820
} | class ____(PrefectBaseSettings):
model_config: ClassVar[SettingsConfigDict] = build_settings_config(("testing",))
test_mode: bool = Field(
default=False,
description="If `True`, places the API in test mode. This may modify behavior to facilitate testing.",
validation_alias=AliasChoices(
AliasPath("test_mode"),
"prefect_testing_test_mode",
"prefect_test_mode",
),
)
unit_test_mode: bool = Field(
default=False,
description="This setting only exists to facilitate unit testing. If `True`, code is executing in a unit test context. Defaults to `False`.",
validation_alias=AliasChoices(
AliasPath("unit_test_mode"),
"prefect_testing_unit_test_mode",
"prefect_unit_test_mode",
),
)
unit_test_loop_debug: bool = Field(
default=True,
description="If `True` turns on debug mode for the unit testing event loop.",
validation_alias=AliasChoices(
AliasPath("unit_test_loop_debug"),
"prefect_testing_unit_test_loop_debug",
"prefect_unit_test_loop_debug",
),
)
test_setting: Optional[Any] = Field(
default="FOO",
description="This setting only exists to facilitate unit testing. If in test mode, this setting will return its value. Otherwise, it returns `None`.",
validation_alias=AliasChoices(
AliasPath("test_setting"),
"prefect_testing_test_setting",
"prefect_test_setting",
),
)
| TestingSettings |
python | apache__airflow | task-sdk/tests/task_sdk/bases/test_operator.py | {
"start": 3630,
"end": 36420
} | class ____:
# Since we have a custom metaclass, lets double check the behaviour of
# passing args in the wrong way (args etc)
def test_kwargs_only(self):
with pytest.raises(TypeError, match="keyword arguments"):
BaseOperator("task_id")
def test_missing_kwarg(self):
with pytest.raises(TypeError, match="missing keyword argument"):
FakeOperator(task_id="task_id")
def test_missing_kwargs(self):
with pytest.raises(TypeError, match="missing keyword arguments"):
FakeSubClass(task_id="task_id")
def test_baseoperator_raises_exception_when_task_id_plus_taskgroup_id_exceeds_250_chars(self):
with DAG(dag_id="foo"), TaskGroup("A"):
with pytest.raises(ValueError, match="The key has to be less than 250 characters"):
BaseOperator(task_id="1" * 249)
def test_baseoperator_with_task_id_and_taskgroup_id_less_than_250_chars(self):
with DAG(dag_id="foo", schedule=None), TaskGroup("A" * 10):
BaseOperator(task_id="1" * 239)
def test_baseoperator_with_task_id_less_than_250_chars(self):
"""Test exception is not raised when operator task id < 250 chars."""
with DAG(dag_id="foo"):
op = BaseOperator(task_id="1" * 249)
assert op.task_id == "1" * 249
def test_task_naive_datetime(self):
naive_datetime = DEFAULT_DATE.replace(tzinfo=None)
op_no_dag = BaseOperator(
task_id="test_task_naive_datetime", start_date=naive_datetime, end_date=naive_datetime
)
assert op_no_dag.start_date.tzinfo
assert op_no_dag.end_date.tzinfo
def test_hash(self):
"""Two operators created equally should hash equaylly"""
# Include a "non-hashable" type too
assert hash(MockOperator(task_id="one", retries=1024 * 1024, arg1="abcef", params={"a": 1})) == hash(
MockOperator(task_id="one", retries=1024 * 1024, arg1="abcef", params={"a": 2})
)
def test_expand(self):
op = FakeOperator(test_param=True)
assert op.test_param
with pytest.raises(TypeError, match="missing keyword argument 'test_param'"):
FakeSubClass(test_sub_param=True)
def test_default_args(self):
default_args = {"test_param": True}
op = FakeOperator(default_args=default_args)
assert op.test_param
default_args = {"test_param": True, "test_sub_param": True}
op = FakeSubClass(default_args=default_args)
assert op.test_param
assert op.test_sub_param
default_args = {"test_param": True}
op = FakeSubClass(default_args=default_args, test_sub_param=True)
assert op.test_param
assert op.test_sub_param
with pytest.raises(TypeError, match="missing keyword argument 'test_sub_param'"):
FakeSubClass(default_args=default_args)
def test_execution_timeout_type(self):
with pytest.raises(
ValueError, match="execution_timeout must be timedelta object but passed as type: <class 'str'>"
):
BaseOperator(task_id="test", execution_timeout="1")
with pytest.raises(
ValueError, match="execution_timeout must be timedelta object but passed as type: <class 'int'>"
):
BaseOperator(task_id="test", execution_timeout=1)
def test_default_resources(self):
task = BaseOperator(task_id="default-resources")
assert task.resources is None
def test_custom_resources(self):
task = BaseOperator(task_id="custom-resources", resources={"cpus": 1, "ram": 1024})
assert task.resources.cpus.qty == 1
assert task.resources.ram.qty == 1024
def test_default_email_on_actions(self):
test_task = BaseOperator(task_id="test_default_email_on_actions")
assert test_task.email_on_retry is True
assert test_task.email_on_failure is True
def test_email_on_actions(self):
test_task = BaseOperator(
task_id="test_default_email_on_actions", email_on_retry=False, email_on_failure=True
)
assert test_task.email_on_retry is False
assert test_task.email_on_failure is True
def test_incorrect_default_args(self):
default_args = {"test_param": True, "extra_param": True}
op = FakeOperator(default_args=default_args)
assert op.test_param
default_args = {"random_params": True}
with pytest.raises(TypeError, match="missing keyword argument 'test_param'"):
FakeOperator(default_args=default_args)
def test_incorrect_priority_weight(self):
error_msg = "'priority_weight' for task 'test_op' expects <class 'int'>, got <class 'str'>"
with pytest.raises(TypeError, match=error_msg):
BaseOperator(task_id="test_op", priority_weight="2")
def test_illegal_args_forbidden(self):
"""
Tests that operators raise exceptions on illegal arguments when
illegal arguments are not allowed.
"""
msg = r"Invalid arguments were passed to BaseOperator \(task_id: test_illegal_args\)"
with pytest.raises(TypeError, match=msg):
BaseOperator(
task_id="test_illegal_args",
illegal_argument_1234="hello?",
)
@mock.patch("airflow.sdk.bases.operator.redact")
def test_illegal_args_with_secrets(self, mock_redact):
"""
Tests that operators on illegal arguments with secrets are correctly masked.
"""
secret = "secretP4ssw0rd!"
mock_redact.side_effect = ["***"]
msg = r"Invalid arguments were passed to BaseOperator"
with pytest.raises(TypeError, match=msg) as exc_info:
BaseOperator(
task_id="test_illegal_args",
secret_argument=secret,
)
assert "***" in str(exc_info.value)
assert secret not in str(exc_info.value)
def test_invalid_type_for_default_arg(self):
error_msg = "'max_active_tis_per_dag' for task 'test' expects <class 'int'>, got <class 'str'> with value 'not_an_int'"
with pytest.raises(TypeError, match=error_msg):
BaseOperator(task_id="test", default_args={"max_active_tis_per_dag": "not_an_int"})
def test_invalid_type_for_operator_arg(self):
error_msg = "'max_active_tis_per_dag' for task 'test' expects <class 'int'>, got <class 'str'> with value 'not_an_int'"
with pytest.raises(TypeError, match=error_msg):
BaseOperator(task_id="test", max_active_tis_per_dag="not_an_int")
def test_weight_rule_default(self):
op = BaseOperator(task_id="test_task")
assert _DownstreamPriorityWeightStrategy() == op.weight_rule
def test_weight_rule_override(self):
op = BaseOperator(task_id="test_task", weight_rule="upstream")
assert _UpstreamPriorityWeightStrategy() == op.weight_rule
def test_dag_task_invalid_weight_rule(self):
# Test if we enter an invalid weight rule
with pytest.raises(ValueError, match="Unknown priority strategy"):
BaseOperator(task_id="should_fail", weight_rule="no rule")
def test_dag_task_not_registered_weight_strategy(self):
from airflow.task.priority_strategy import PriorityWeightStrategy
class NotRegisteredPriorityWeightStrategy(PriorityWeightStrategy):
def get_weight(self, ti):
return 99
with pytest.raises(ValueError, match="Unknown priority strategy"):
BaseOperator(
task_id="empty_task",
weight_rule=NotRegisteredPriorityWeightStrategy(),
)
def test_db_safe_priority(self):
"""Test the db_safe_priority function."""
from airflow.sdk.bases.operator import DB_SAFE_MAXIMUM, DB_SAFE_MINIMUM, db_safe_priority
assert db_safe_priority(1) == 1
assert db_safe_priority(-1) == -1
assert db_safe_priority(9999999999) == DB_SAFE_MAXIMUM
assert db_safe_priority(-9999999999) == DB_SAFE_MINIMUM
def test_db_safe_constants(self):
"""Test the database safe constants."""
from airflow.sdk.bases.operator import DB_SAFE_MAXIMUM, DB_SAFE_MINIMUM
assert DB_SAFE_MINIMUM == -2147483648
assert DB_SAFE_MAXIMUM == 2147483647
def test_warnings_are_properly_propagated(self):
with pytest.warns(DeprecationWarning, match="deprecated") as warnings:
DeprecatedOperator(task_id="test")
assert len(warnings) == 1
warning = warnings[0]
# Here we check that the trace points to the place
# where the deprecated class was used
assert warning.filename == __file__
def test_setattr_performs_no_custom_action_at_execute_time(self, spy_agency):
op = MockOperator(task_id="test_task")
op_copy = op.prepare_for_execution()
spy_agency.spy_on(op._set_xcomargs_dependency, call_original=False)
op_copy.arg1 = "b"
assert op._set_xcomargs_dependency.called is False
def test_upstream_is_set_when_template_field_is_xcomarg(self):
with DAG("xcomargs_test", schedule=None):
op1 = BaseOperator(task_id="op1")
op2 = MockOperator(task_id="op2", arg1=op1.output)
assert op1.task_id in op2.upstream_task_ids
assert op2.task_id in op1.downstream_task_ids
def test_cross_downstream(self):
"""Test if all dependencies between tasks are all set correctly."""
dag = DAG(dag_id="test_dag", schedule=None, start_date=datetime.now())
start_tasks = [BaseOperator(task_id=f"t{i}", dag=dag) for i in range(1, 4)]
end_tasks = [BaseOperator(task_id=f"t{i}", dag=dag) for i in range(4, 7)]
cross_downstream(from_tasks=start_tasks, to_tasks=end_tasks)
for start_task in start_tasks:
assert set(start_task.get_direct_relatives(upstream=False)) == set(end_tasks)
# Begin test for `XComArgs`
xstart_tasks = [
task_decorator(task_id=f"xcomarg_task{i}", python_callable=lambda: None, dag=dag)()
for i in range(1, 4)
]
xend_tasks = [
task_decorator(task_id=f"xcomarg_task{i}", python_callable=lambda: None, dag=dag)()
for i in range(4, 7)
]
cross_downstream(from_tasks=xstart_tasks, to_tasks=xend_tasks)
for xstart_task in xstart_tasks:
assert set(xstart_task.operator.get_direct_relatives(upstream=False)) == {
xend_task.operator for xend_task in xend_tasks
}
def test_chain(self):
dag = DAG(dag_id="test_chain", schedule=None, start_date=datetime.now())
# Begin test for classic operators with `EdgeModifiers`
[label1, label2] = [Label(label=f"label{i}") for i in range(1, 3)]
[op1, op2, op3, op4, op5, op6] = [BaseOperator(task_id=f"t{i}", dag=dag) for i in range(1, 7)]
chain(op1, [label1, label2], [op2, op3], [op4, op5], op6)
assert {op2, op3} == set(op1.get_direct_relatives(upstream=False))
assert [op4] == op2.get_direct_relatives(upstream=False)
assert [op5] == op3.get_direct_relatives(upstream=False)
assert {op4, op5} == set(op6.get_direct_relatives(upstream=True))
assert dag.get_edge_info(upstream_task_id=op1.task_id, downstream_task_id=op2.task_id) == {
"label": "label1"
}
assert dag.get_edge_info(upstream_task_id=op1.task_id, downstream_task_id=op3.task_id) == {
"label": "label2"
}
# Begin test for `XComArgs` with `EdgeModifiers`
[xlabel1, xlabel2] = [Label(label=f"xcomarg_label{i}") for i in range(1, 3)]
[xop1, xop2, xop3, xop4, xop5, xop6] = [
task_decorator(task_id=f"xcomarg_task{i}", python_callable=lambda: None, dag=dag)()
for i in range(1, 7)
]
chain(xop1, [xlabel1, xlabel2], [xop2, xop3], [xop4, xop5], xop6)
assert {xop2.operator, xop3.operator} == set(xop1.operator.get_direct_relatives(upstream=False))
assert [xop4.operator] == xop2.operator.get_direct_relatives(upstream=False)
assert [xop5.operator] == xop3.operator.get_direct_relatives(upstream=False)
assert {xop4.operator, xop5.operator} == set(xop6.operator.get_direct_relatives(upstream=True))
assert dag.get_edge_info(
upstream_task_id=xop1.operator.task_id, downstream_task_id=xop2.operator.task_id
) == {"label": "xcomarg_label1"}
assert dag.get_edge_info(
upstream_task_id=xop1.operator.task_id, downstream_task_id=xop3.operator.task_id
) == {"label": "xcomarg_label2"}
# Begin test for `TaskGroups`
[tg1, tg2] = [TaskGroup(group_id=f"tg{i}", dag=dag) for i in range(1, 3)]
[op1, op2] = [BaseOperator(task_id=f"task{i}", dag=dag) for i in range(1, 3)]
[tgop1, tgop2] = [
BaseOperator(task_id=f"task_group_task{i}", task_group=tg1, dag=dag) for i in range(1, 3)
]
[tgop3, tgop4] = [
BaseOperator(task_id=f"task_group_task{i}", task_group=tg2, dag=dag) for i in range(1, 3)
]
chain(op1, tg1, tg2, op2)
assert {tgop1, tgop2} == set(op1.get_direct_relatives(upstream=False))
assert {tgop3, tgop4} == set(tgop1.get_direct_relatives(upstream=False))
assert {tgop3, tgop4} == set(tgop2.get_direct_relatives(upstream=False))
assert [op2] == tgop3.get_direct_relatives(upstream=False)
assert [op2] == tgop4.get_direct_relatives(upstream=False)
def test_chain_linear(self):
dag = DAG(dag_id="test_chain_linear", schedule=None, start_date=datetime.now())
t1, t2, t3, t4, t5, t6, t7 = (BaseOperator(task_id=f"t{i}", dag=dag) for i in range(1, 8))
chain_linear(t1, [t2, t3, t4], [t5, t6], t7)
assert set(t1.get_direct_relatives(upstream=False)) == {t2, t3, t4}
assert set(t2.get_direct_relatives(upstream=False)) == {t5, t6}
assert set(t3.get_direct_relatives(upstream=False)) == {t5, t6}
assert set(t7.get_direct_relatives(upstream=True)) == {t5, t6}
t1, t2, t3, t4, t5, t6 = (
task_decorator(task_id=f"xcomarg_task{i}", python_callable=lambda: None, dag=dag)()
for i in range(1, 7)
)
chain_linear(t1, [t2, t3], [t4, t5], t6)
assert set(t1.operator.get_direct_relatives(upstream=False)) == {t2.operator, t3.operator}
assert set(t2.operator.get_direct_relatives(upstream=False)) == {t4.operator, t5.operator}
assert set(t3.operator.get_direct_relatives(upstream=False)) == {t4.operator, t5.operator}
assert set(t6.operator.get_direct_relatives(upstream=True)) == {t4.operator, t5.operator}
# Begin test for `TaskGroups`
tg1, tg2 = (TaskGroup(group_id=f"tg{i}", dag=dag) for i in range(1, 3))
op1, op2 = (BaseOperator(task_id=f"task{i}", dag=dag) for i in range(1, 3))
tgop1, tgop2 = (
BaseOperator(task_id=f"task_group_task{i}", task_group=tg1, dag=dag) for i in range(1, 3)
)
tgop3, tgop4 = (
BaseOperator(task_id=f"task_group_task{i}", task_group=tg2, dag=dag) for i in range(1, 3)
)
chain_linear(op1, tg1, tg2, op2)
assert set(op1.get_direct_relatives(upstream=False)) == {tgop1, tgop2}
assert set(tgop1.get_direct_relatives(upstream=False)) == {tgop3, tgop4}
assert set(tgop2.get_direct_relatives(upstream=False)) == {tgop3, tgop4}
assert set(tgop3.get_direct_relatives(upstream=False)) == {op2}
assert set(tgop4.get_direct_relatives(upstream=False)) == {op2}
t1, t2 = (BaseOperator(task_id=f"t-{i}", dag=dag) for i in range(1, 3))
with pytest.raises(ValueError, match="Labels are not supported"):
chain_linear(t1, Label("hi"), t2)
with pytest.raises(ValueError, match="nothing to do"):
chain_linear()
with pytest.raises(ValueError, match="Did you forget to expand"):
chain_linear(t1)
def test_chain_not_support_type(self):
dag = DAG(dag_id="test_chain", schedule=None, start_date=datetime.now())
[op1, op2] = [BaseOperator(task_id=f"t{i}", dag=dag) for i in range(1, 3)]
with pytest.raises(TypeError):
chain([op1, op2], 1)
# Begin test for `XComArgs`
[xop1, xop2] = [
task_decorator(task_id=f"xcomarg_task{i}", python_callable=lambda: None, dag=dag)()
for i in range(1, 3)
]
with pytest.raises(TypeError):
chain([xop1, xop2], 1)
# Begin test for `EdgeModifiers`
with pytest.raises(TypeError):
chain([Label("labe1"), Label("label2")], 1)
# Begin test for `TaskGroups`
[tg1, tg2] = [TaskGroup(group_id=f"tg{i}", dag=dag) for i in range(1, 3)]
with pytest.raises(TypeError):
chain([tg1, tg2], 1)
def test_chain_different_length_iterable(self):
dag = DAG(dag_id="test_chain", schedule=None, start_date=datetime.now())
[label1, label2] = [Label(label=f"label{i}") for i in range(1, 3)]
[op1, op2, op3, op4, op5] = [BaseOperator(task_id=f"t{i}", dag=dag) for i in range(1, 6)]
CHAIN_NOT_SUPPORTED = "Chain not supported for different length Iterable. Got {} and {}."
with pytest.raises(ValueError, match=CHAIN_NOT_SUPPORTED.format(2, 3)):
chain([op1, op2], [op3, op4, op5])
with pytest.raises(ValueError, match=CHAIN_NOT_SUPPORTED.format(3, 2)):
chain([op1, op2, op3], [label1, label2])
# Begin test for `XComArgs` with `EdgeModifiers`
[label3, label4] = [Label(label=f"xcomarg_label{i}") for i in range(1, 3)]
[xop1, xop2, xop3, xop4, xop5] = [
task_decorator(task_id=f"xcomarg_task{i}", python_callable=lambda: None, dag=dag)()
for i in range(1, 6)
]
with pytest.raises(ValueError, match=CHAIN_NOT_SUPPORTED.format(2, 3)):
chain([xop1, xop2], [xop3, xop4, xop5])
with pytest.raises(ValueError, match=CHAIN_NOT_SUPPORTED.format(3, 2)):
chain([xop1, xop2, xop3], [label1, label2])
# Begin test for `TaskGroups`
[tg1, tg2, tg3, tg4, tg5] = [TaskGroup(group_id=f"tg{i}", dag=dag) for i in range(1, 6)]
with pytest.raises(ValueError, match=CHAIN_NOT_SUPPORTED.format(2, 3)):
chain([tg1, tg2], [tg3, tg4, tg5])
def test_set_xcomargs_dependencies_works_recursively(self):
with DAG("xcomargs_test", schedule=None):
op1 = BaseOperator(task_id="op1")
op2 = BaseOperator(task_id="op2")
op3 = MockOperator(task_id="op3", arg1=[op1.output, op2.output])
op4 = MockOperator(task_id="op4", arg1={"op1": op1.output, "op2": op2.output})
assert op1.task_id in op3.upstream_task_ids
assert op2.task_id in op3.upstream_task_ids
assert op1.task_id in op4.upstream_task_ids
assert op2.task_id in op4.upstream_task_ids
def test_set_xcomargs_dependencies_works_when_set_after_init(self):
with DAG(dag_id="xcomargs_test", schedule=None):
op1 = BaseOperator(task_id="op1")
op2 = MockOperator(task_id="op2")
op2.arg1 = op1.output # value is set after init
assert op1.task_id in op2.upstream_task_ids
def test_set_xcomargs_dependencies_error_when_outside_dag(self):
op1 = BaseOperator(task_id="op1")
with pytest.raises(
ValueError,
match=r"Tried to create relationships between tasks that don't have Dags yet. Set the Dag for at least one task and try again: \[<Task\(MockOperator\): op2>, <Task\(BaseOperator\): op1>\]",
):
MockOperator(task_id="op2", arg1=op1.output)
def test_cannot_change_dag(self):
with DAG(dag_id="dag1", schedule=None):
op1 = BaseOperator(task_id="op1")
with pytest.raises(ValueError, match="can not be changed"):
op1.dag = DAG(dag_id="dag2")
def test_invalid_trigger_rule(self):
with pytest.raises(
ValueError,
match=(r"The trigger_rule must be one of .*,'\.op1'; received 'some_rule'\."),
):
BaseOperator(task_id="op1", trigger_rule="some_rule")
def test_trigger_rule_validation(self):
from airflow.sdk.definitions._internal.abstractoperator import DEFAULT_TRIGGER_RULE
# An operator with default trigger rule and a fail-stop dag should be allowed.
with DAG(
dag_id="test_dag_trigger_rule_validation",
schedule=None,
start_date=DEFAULT_DATE,
fail_fast=True,
):
BaseOperator(task_id="test_valid_trigger_rule", trigger_rule=DEFAULT_TRIGGER_RULE)
# An operator with non default trigger rule and a non fail-stop dag should be allowed.
with DAG(
dag_id="test_dag_trigger_rule_validation",
schedule=None,
start_date=DEFAULT_DATE,
fail_fast=False,
):
BaseOperator(task_id="test_valid_trigger_rule", trigger_rule="always")
@pytest.mark.parametrize(
("content", "context", "expected_output"),
[
("{{ foo }}", {"foo": "bar"}, "bar"),
(["{{ foo }}_1", "{{ foo }}_2"], {"foo": "bar"}, ["bar_1", "bar_2"]),
(("{{ foo }}_1", "{{ foo }}_2"), {"foo": "bar"}, ("bar_1", "bar_2")),
(
{"key1": "{{ foo }}_1", "key2": "{{ foo }}_2"},
{"foo": "bar"},
{"key1": "bar_1", "key2": "bar_2"},
),
(
{"key_{{ foo }}_1": 1, "key_2": "{{ foo }}_2"},
{"foo": "bar"},
{"key_{{ foo }}_1": 1, "key_2": "bar_2"},
),
(date(2018, 12, 6), {"foo": "bar"}, date(2018, 12, 6)),
(datetime(2018, 12, 6, 10, 55), {"foo": "bar"}, datetime(2018, 12, 6, 10, 55)),
(MockNamedTuple("{{ foo }}_1", "{{ foo }}_2"), {"foo": "bar"}, MockNamedTuple("bar_1", "bar_2")),
({"{{ foo }}_1", "{{ foo }}_2"}, {"foo": "bar"}, {"bar_1", "bar_2"}),
(None, {}, None),
([], {}, []),
({}, {}, {}),
(
# check nested fields can be templated
ClassWithCustomAttributes(att1="{{ foo }}_1", att2="{{ foo }}_2", template_fields=["att1"]),
{"foo": "bar"},
ClassWithCustomAttributes(att1="bar_1", att2="{{ foo }}_2", template_fields=["att1"]),
),
(
# check deep nested fields can be templated
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ foo }}_1", att2="{{ foo }}_2", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ foo }}_3", att4="{{ foo }}_4", template_fields=["att3"]
),
template_fields=["nested1"],
),
{"foo": "bar"},
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="bar_1", att2="{{ foo }}_2", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ foo }}_3", att4="{{ foo }}_4", template_fields=["att3"]
),
template_fields=["nested1"],
),
),
(
# check null value on nested template field
ClassWithCustomAttributes(att1=None, template_fields=["att1"]),
{},
ClassWithCustomAttributes(att1=None, template_fields=["att1"]),
),
(
# check there is no RecursionError on circular references
object1,
{"foo": "bar"},
object1,
),
# By default, Jinja2 drops one (single) trailing newline
("{{ foo }}\n\n", {"foo": "bar"}, "bar\n"),
(literal("{{ foo }}"), {"foo": "bar"}, "{{ foo }}"),
(literal(["{{ foo }}_1", "{{ foo }}_2"]), {"foo": "bar"}, ["{{ foo }}_1", "{{ foo }}_2"]),
(literal(("{{ foo }}_1", "{{ foo }}_2")), {"foo": "bar"}, ("{{ foo }}_1", "{{ foo }}_2")),
],
)
def test_render_template(self, content, context, expected_output):
"""Test render_template given various input types."""
task = BaseOperator(task_id="op1")
result = task.render_template(content, context)
assert result == expected_output
@pytest.mark.parametrize(
("content", "context", "expected_output"),
[
("{{ foo }}", {"foo": "bar"}, "bar"),
("{{ foo }}", {"foo": ["bar1", "bar2"]}, ["bar1", "bar2"]),
(["{{ foo }}", "{{ foo | length}}"], {"foo": ["bar1", "bar2"]}, [["bar1", "bar2"], 2]),
(("{{ foo }}_1", "{{ foo }}_2"), {"foo": "bar"}, ("bar_1", "bar_2")),
("{{ ds }}", {"ds": date(2018, 12, 6)}, date(2018, 12, 6)),
(datetime(2018, 12, 6, 10, 55), {"foo": "bar"}, datetime(2018, 12, 6, 10, 55)),
("{{ ds }}", {"ds": datetime(2018, 12, 6, 10, 55)}, datetime(2018, 12, 6, 10, 55)),
(MockNamedTuple("{{ foo }}_1", "{{ foo }}_2"), {"foo": "bar"}, MockNamedTuple("bar_1", "bar_2")),
(
("{{ foo }}", "{{ foo.isoformat() }}"),
{"foo": datetime(2018, 12, 6, 10, 55)},
(datetime(2018, 12, 6, 10, 55), "2018-12-06T10:55:00"),
),
(None, {}, None),
([], {}, []),
({}, {}, {}),
],
)
def test_render_template_with_native_envs(self, content, context, expected_output):
"""Test render_template given various input types with Native Python types"""
with DAG("test-dag", schedule=None, start_date=DEFAULT_DATE, render_template_as_native_obj=True):
task = BaseOperator(task_id="op1")
result = task.render_template(content, context)
assert result == expected_output
def test_render_template_fields(self):
"""Verify if operator attributes are correctly templated."""
task = MockOperator(task_id="op1", arg1="{{ foo }}", arg2="{{ bar }}")
# Assert nothing is templated yet
assert task.arg1 == "{{ foo }}"
assert task.arg2 == "{{ bar }}"
# Trigger templating and verify if attributes are templated correctly
task.render_template_fields(context={"foo": "footemplated", "bar": "bartemplated"})
assert task.arg1 == "footemplated"
assert task.arg2 == "bartemplated"
def test_render_template_fields_func_using_context(self):
"""Verify if operator attributes are correctly templated."""
def fn_to_template(context, jinja_env):
tmp = context["task"].render_template("{{ bar }}", context, jinja_env)
return "foo_" + tmp
task = MockOperator(task_id="op1", arg2=fn_to_template)
# Trigger templating and verify if attributes are templated correctly
task.render_template_fields(context={"bar": "bartemplated", "task": task})
assert task.arg2 == "foo_bartemplated"
def test_render_template_fields_simple_func(self):
"""Verify if operator attributes are correctly templated."""
def fn_to_template(**kwargs):
a = "foo_" + ("bar" * 3)
return a
task = MockOperator(task_id="op1", arg2=fn_to_template)
task.render_template_fields({})
assert task.arg2 == "foo_barbarbar"
@pytest.mark.parametrize("content", [object(), uuid.uuid4()])
def test_render_template_fields_no_change(self, content):
"""Tests if non-templatable types remain unchanged."""
task = BaseOperator(task_id="op1")
result = task.render_template(content, {"foo": "bar"})
assert content is result
def test_nested_template_fields_declared_must_exist(self):
"""Test render_template when a nested template field is missing."""
task = BaseOperator(task_id="op1")
error_message = (
"'missing_field' is configured as a template field but ClassWithCustomAttributes does not have "
"this attribute."
)
with pytest.raises(AttributeError, match=error_message):
task.render_template(
ClassWithCustomAttributes(
template_fields=["missing_field"], task_type="ClassWithCustomAttributes"
),
{},
)
def test_string_template_field_attr_is_converted_to_list(self):
"""Verify template_fields attribute is converted to a list if declared as a string."""
class StringTemplateFieldsOperator(BaseOperator):
template_fields = "a_string"
warning_message = (
"The `template_fields` value for StringTemplateFieldsOperator is a string but should be a "
"list or tuple of string. Wrapping it in a list for execution. Please update "
"StringTemplateFieldsOperator accordingly."
)
with pytest.warns(UserWarning, match=warning_message) as warnings:
task = StringTemplateFieldsOperator(task_id="op1")
assert len(warnings) == 1
assert isinstance(task.template_fields, list)
def test_jinja_invalid_expression_is_just_propagated(self):
"""Test render_template propagates Jinja invalid expression errors."""
task = BaseOperator(task_id="op1")
with pytest.raises(jinja2.exceptions.TemplateSyntaxError):
task.render_template("{{ invalid expression }}", {})
@mock.patch("airflow.sdk.definitions._internal.templater.SandboxedEnvironment", autospec=True)
def test_jinja_env_creation(self, mock_jinja_env):
"""Verify if a Jinja environment is created only once when templating."""
task = MockOperator(task_id="op1", arg1="{{ foo }}", arg2="{{ bar }}")
task.render_template_fields(context={"foo": "whatever", "bar": "whatever"})
assert mock_jinja_env.call_count == 1
def test_params_source(self):
# Test bug when copying an operator attached to a Dag
with DAG(
"dag0",
params=ParamsDict(
{
"param from Dag": "value1",
"overwritten by task": "value 2",
}
),
schedule=None,
start_date=DEFAULT_DATE,
):
op1 = MockOperator(
task_id="task1",
params=ParamsDict(
{
"overwritten by task": "value 3",
"param from task": "value 4",
}
),
)
for key, expected_source in (
("param from Dag", "dag"),
("overwritten by task", "task"),
("param from task", "task"),
):
assert op1.params.get_param(key).source == expected_source
def test_deepcopy(self):
# Test bug when copying an operator attached to a Dag
with DAG("dag0", schedule=None, start_date=DEFAULT_DATE) as dag:
@dag.task
def task0():
pass
MockOperator(task_id="task1", arg1=task0())
copy.deepcopy(dag)
def test_mro(self):
from airflow.providers.common.sql.operators import sql
class Mixin(sql.BaseSQLOperator):
pass
class Branch(Mixin, sql.BranchSQLOperator):
pass
# The following throws an exception if metaclass breaks MRO:
# airflow.sdk.exceptions.AirflowException: Invalid arguments were passed to Branch (task_id: test). Invalid arguments were:
# **kwargs: {'sql': 'sql', 'follow_task_ids_if_true': ['x'], 'follow_task_ids_if_false': ['y']}
op = Branch(
task_id="test",
conn_id="abc",
sql="sql",
follow_task_ids_if_true=["x"],
follow_task_ids_if_false=["y"],
)
assert isinstance(op, Branch)
def test_init_subclass_args():
class InitSubclassOp(BaseOperator):
class_arg = None
def __init_subclass__(cls, class_arg=None, **kwargs) -> None:
cls.class_arg = class_arg
super().__init_subclass__()
class_arg = "foo"
class ConcreteSubclassOp(InitSubclassOp, class_arg=class_arg):
pass
task = ConcreteSubclassOp(task_id="op1")
assert task.class_arg == class_arg
| TestBaseOperator |
python | django__django | tests/generic_views/views.py | {
"start": 2016,
"end": 2300
} | class ____(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super().__init__(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page,
)
| CustomPaginator |
python | allegroai__clearml | clearml/backend_api/services/v2_13/projects.py | {
"start": 37375,
"end": 42758
} | class ____(Response):
"""
Response of projects.delete endpoint.
:param deleted: Number of projects deleted (0 or 1)
:type deleted: int
:param disassociated_tasks: Number of tasks disassociated from the deleted
project
:type disassociated_tasks: int
:param urls: The urls of the files that were uploaded by the project tasks and
models. Returned if the 'delete_contents' was set to 'true'
:type urls: Urls
:param deleted_models: Number of models deleted
:type deleted_models: int
:param deleted_tasks: Number of tasks deleted
:type deleted_tasks: int
"""
_service = "projects"
_action = "delete"
_version = "2.13"
_schema = {
"definitions": {
"urls": {
"properties": {
"artifact_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"event_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"model_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
},
"properties": {
"deleted": {
"description": "Number of projects deleted (0 or 1)",
"type": ["integer", "null"],
},
"deleted_models": {
"description": "Number of models deleted",
"type": ["integer", "null"],
},
"deleted_tasks": {
"description": "Number of tasks deleted",
"type": ["integer", "null"],
},
"disassociated_tasks": {
"description": "Number of tasks disassociated from the deleted project",
"type": ["integer", "null"],
},
"urls": {
"description": "The urls of the files that were uploaded by the project tasks and models. Returned if the 'delete_contents' was set to 'true'",
"oneOf": [{"$ref": "#/definitions/urls"}, {"type": "null"}],
},
},
"type": "object",
}
def __init__(
self,
deleted: Optional[int] = None,
disassociated_tasks: Optional[int] = None,
urls: Any = None,
deleted_models: Optional[int] = None,
deleted_tasks: Optional[int] = None,
**kwargs: Any
) -> None:
super(DeleteResponse, self).__init__(**kwargs)
self.deleted = deleted
self.disassociated_tasks = disassociated_tasks
self.urls = urls
self.deleted_models = deleted_models
self.deleted_tasks = deleted_tasks
@schema_property("deleted")
def deleted(self) -> Optional[int]:
return self._property_deleted
@deleted.setter
def deleted(self, value: Optional[int]) -> None:
if value is None:
self._property_deleted = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted", six.integer_types)
self._property_deleted = value
@schema_property("disassociated_tasks")
def disassociated_tasks(self) -> Optional[int]:
return self._property_disassociated_tasks
@disassociated_tasks.setter
def disassociated_tasks(self, value: Optional[int]) -> None:
if value is None:
self._property_disassociated_tasks = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "disassociated_tasks", six.integer_types)
self._property_disassociated_tasks = value
@schema_property("urls")
def urls(self) -> Any:
return self._property_urls
@urls.setter
def urls(self, value: Any) -> None:
if value is None:
self._property_urls = None
return
if isinstance(value, dict):
value = Urls.from_dict(value)
else:
self.assert_isinstance(value, "urls", Urls)
self._property_urls = value
@schema_property("deleted_models")
def deleted_models(self) -> Optional[int]:
return self._property_deleted_models
@deleted_models.setter
def deleted_models(self, value: Optional[int]) -> None:
if value is None:
self._property_deleted_models = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted_models", six.integer_types)
self._property_deleted_models = value
@schema_property("deleted_tasks")
def deleted_tasks(self) -> Optional[int]:
return self._property_deleted_tasks
@deleted_tasks.setter
def deleted_tasks(self, value: Optional[int]) -> None:
if value is None:
self._property_deleted_tasks = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted_tasks", six.integer_types)
self._property_deleted_tasks = value
| DeleteResponse |
python | ray-project__ray | doc/source/serve/doc_code/getting_started/translator.py | {
"start": 250,
"end": 666
} | class ____:
def __init__(self):
# Load model
self.model = pipeline("translation_en_to_fr", model="t5-small")
def translate(self, text: str) -> str:
# Run inference
model_output = self.model(text)
# Post-process output to return only the translation text
translation = model_output[0]["translation_text"]
return translation
@serve.deployment
| Translator |
python | huggingface__transformers | tests/models/smolvlm/test_modeling_smolvlm.py | {
"start": 23123,
"end": 31901
} | class ____(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained("HuggingFaceTB/SmolVLM2-256M-Video-Instruct")
self.image1 = Image.open(
BytesIO(
requests.get(
"https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
).content
)
)
self.video_messages = [
{
"role": "user",
"content": [
{
"type": "video",
"path": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_1_1080p.mov",
},
{"type": "text", "text": "Describe this video in detail"},
],
},
]
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_integration_test(self):
model = SmolVLMForConditionalGeneration.from_pretrained(
"HuggingFaceTB/SmolVLM2-256M-Video-Instruct",
dtype=torch.bfloat16,
device_map="auto",
)
# Create inputs
text = "<image>In this image, we see"
images = self.image1
inputs = self.processor(text=text, images=images, return_tensors="pt", padding=True)
inputs.to(device=torch_device, dtype=torch.bfloat16)
generated_ids = model.generate(**inputs, max_new_tokens=9)
generated_texts = self.processor.batch_decode(generated_ids, skip_special_tokens=True)
expected_generated_text = "\n\n\n\nIn this image, we see a view of the Statue of Liberty and the"
self.assertEqual(generated_texts[0], expected_generated_text)
@slow
def test_integration_test_video(self):
model = SmolVLMForConditionalGeneration.from_pretrained(
"HuggingFaceTB/SmolVLM2-256M-Video-Instruct",
dtype=torch.bfloat16,
device_map="auto",
)
# Create inputs
inputs = self.processor.apply_chat_template(
self.video_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(device=torch_device, dtype=torch.bfloat16)
generated_ids = model.generate(**inputs, max_new_tokens=20)
generated_texts = self.processor.batch_decode(generated_ids, skip_special_tokens=True)
expected_generated_text = Expectations(
{
(None, None): 'User: You are provided the following series of nine frames from a 0:00:09 [H:MM:SS] video.\n\nFrame from 00:00:\nFrame from 00:01:\nFrame from 00:02:\nFrame from 00:03:\nFrame from 00:04:\nFrame from 00:05:\nFrame from 00:06:\nFrame from 00:08:\nFrame from 00:09:\n\nDescribe this video in detail\nAssistant: The video depicts a large language model architecture, specifically a language model with a "quick brown" feature',
("cuda", (8, 0)): 'User: You are provided the following series of nine frames from a 0:00:09 [H:MM:SS] video.\n\nFrame from 00:00:\nFrame from 00:01:\nFrame from 00:02:\nFrame from 00:03:\nFrame from 00:04:\nFrame from 00:05:\nFrame from 00:06:\nFrame from 00:08:\nFrame from 00:09:\n\nDescribe this video in detail\nAssistant: The video showcases a large language model architecture, specifically a "Quick Brown" model, which is designed',
("cuda", (8, 6)): 'User: You are provided the following series of nine frames from a 0:00:09 [H:MM:SS] video.\n\nFrame from 00:00:\nFrame from 00:01:\nFrame from 00:02:\nFrame from 00:03:\nFrame from 00:04:\nFrame from 00:05:\nFrame from 00:06:\nFrame from 00:08:\nFrame from 00:09:\n\nDescribe this video in detail\nAssistant: The video showcases a large language model, specifically a neural network model, which is designed to learn and',
("rocm", (9, 4)): 'User: You are provided the following series of nine frames from a 0:00:09 [H:MM:SS] video.\n\nFrame from 00:00:\nFrame from 00:01:\nFrame from 00:02:\nFrame from 00:03:\nFrame from 00:04:\nFrame from 00:05:\nFrame from 00:06:\nFrame from 00:08:\nFrame from 00:09:\n\nDescribe this video in detail\nAssistant: The video depicts a large language model architecture, specifically a language model with a "quick brown" feature',
("rocm", None): 'User: You are provided the following series of nine frames from a 0:00:09 [H:MM:SS] video.\n\nFrame from 00:00:\nFrame from 00:01:\nFrame from 00:02:\nFrame from 00:03:\nFrame from 00:04:\nFrame from 00:05:\nFrame from 00:06:\nFrame from 00:08:\nFrame from 00:09:\n\nDescribe this video in detail\nAssistant: The video showcases a large language model architecture, specifically a "Quick Brown" model, which is designed',
}
).get_expectation() # fmt: skip
self.assertEqual(generated_texts[0], expected_generated_text)
@slow
def test_export_smolvlm_vision_encoder(self):
from transformers import AutoConfig
from transformers.integrations.executorch import TorchExportableModuleForVLM
model_id = "HuggingFaceTB/SmolVLM2-256M-Video-Instruct"
# NOTE: The attention_mask is prepared internally in the vision encoder, depending on whether flash attention is used or not
# For ExecuTorch, flash attention is not supported, so the way of exporting vison encoder should be compatible with text-decoder
config = AutoConfig.from_pretrained(model_id)
config.text_config._flash_attn_2_enabled = False
# Load model and extract vision encoder
model = SmolVLMForConditionalGeneration.from_pretrained(
model_id,
dtype=torch.float32,
config=config,
)
exportable_module = TorchExportableModuleForVLM(model)
exported_program = exportable_module.export_vision_encoder()
self.assertIsInstance(exported_program, torch.export.ExportedProgram)
@slow
def test_export_smolvlm_connector(self):
from transformers import AutoConfig
from transformers.integrations.executorch import TorchExportableModuleForVLM
model_id = "HuggingFaceTB/SmolVLM2-256M-Video-Instruct"
# NOTE: The attention_mask is prepared internally in the vision encoder, depending on whether flash attention is used or not
# For ExecuTorch, flash attention is not supported, so the way of exporting vison encoder should be compatible with text-decoder
config = AutoConfig.from_pretrained(model_id)
config.text_config._flash_attn_2_enabled = False
# Load the model and extract the connector (multi-modal projector)
model = SmolVLMForConditionalGeneration.from_pretrained(
model_id,
dtype=torch.float32,
config=config,
)
connector = model.model.connector
connector.eval()
exportable_module = TorchExportableModuleForVLM(model)
exported_program = exportable_module.export_connector()
self.assertIsInstance(exported_program, torch.export.ExportedProgram)
@slow
def test_export_smolvlm_text_decoder(self):
from transformers import AutoConfig
from transformers.integrations.executorch import TorchExportableModuleForVLM
model_id = "HuggingFaceTB/SmolVLM2-256M-Video-Instruct"
# NOTE: The attention_mask is prepared internally in the vision encoder, depending on whether flash attention is used or not
# For ExecuTorch, flash attention is not supported, so the way of exporting vison encoder should be compatible with text-decoder
config = AutoConfig.from_pretrained(model_id)
config.text_config._flash_attn_2_enabled = False
config.text_config.use_cache = True
config.text_config.attn_implementation = "sdpa"
generation_config = GenerationConfig(
use_cache=True,
cache_implementation="static",
max_length=1234,
cache_config={
"batch_size": 1,
"max_cache_len": 1234,
},
)
# Load the model and extract the text decoder
model = SmolVLMForConditionalGeneration.from_pretrained(
model_id,
dtype=torch.float32,
config=config,
)
model.model.text_model.generation_config = generation_config
text_decoder = model.model.text_model
text_decoder.eval()
exportable_module = TorchExportableModuleForVLM(model)
exported_program = exportable_module.export_text_decoder()
self.assertIsInstance(exported_program, torch.export.ExportedProgram)
| SmolVLMForConditionalGenerationIntegrationTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.