language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/check_ops_test.py | {
"start": 62440,
"end": 65155
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_correct_type(self):
integers = constant_op.constant([1, 2], dtype=dtypes.int64)
with ops.control_dependencies([
check_ops.assert_type(integers, dtypes.int64)]):
out = array_ops.identity(integers)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_sparsetensor_doesnt_raise_when_correct_type(self):
sparse_float = sparse_tensor.SparseTensor(
constant_op.constant([[111], [232]], dtypes.int64),
constant_op.constant([23.4, -43.2], dtypes.float32),
constant_op.constant([500], dtypes.int64))
with ops.control_dependencies(
[check_ops.assert_type(sparse_float, dtypes.float32)]):
out = sparse_tensor.SparseTensor(sparse_float.indices,
array_ops.identity(sparse_float.values),
sparse_float.dense_shape)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raggedtensor_doesnt_raise_when_correct_type(self):
x = ragged_factory_ops.constant([[1., 2.], [3.]])
with ops.control_dependencies(
[check_ops.assert_type(x, dtypes.float32)]):
y = array_ops.identity(x)
self.assertAllEqual(x, y)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_wrong_type(self):
floats = constant_op.constant([1.0, 2.0], dtype=dtypes.float16)
with self.assertRaisesRegex(TypeError, "must be of type tf.float32; "
"got tf.float16"):
check_ops.assert_type(floats, dtypes.float32)
@test_util.run_in_graph_and_eager_modes
def test_sparsetensor_raises_when_wrong_type(self):
sparse_float16 = sparse_tensor.SparseTensor(
constant_op.constant([[111], [232]], dtypes.int64),
constant_op.constant([23.4, -43.2], dtypes.float16),
constant_op.constant([500], dtypes.int64))
with self.assertRaisesRegex(TypeError, "must be of type.*float32"):
check_ops.assert_type(sparse_float16, dtypes.float32)
@test_util.run_in_graph_and_eager_modes
def test_raggedtensor_raises_when_wrong_type(self):
x = ragged_factory_ops.constant([[1, 2], [3]])
with self.assertRaisesRegex(TypeError, "must be of type.*float32"):
check_ops.assert_type(x, dtypes.float32)
def test_raise_when_tf_type_is_not_dtype(self):
# Test case for GitHub issue:
# https://github.com/tensorflow/tensorflow/issues/45975
value = constant_op.constant(0.0)
with self.assertRaisesRegex(TypeError,
"Cannot convert.*to a TensorFlow DType"):
check_ops.assert_type(value, (dtypes.float32,))
| AssertTypeTest |
python | numba__numba | numba/tests/test_asnumbatype.py | {
"start": 691,
"end": 10661
} | class ____(TestCase):
int_nb_type = typeof(0)
float_nb_type = typeof(0.0)
complex_nb_type = typeof(complex(0))
str_nb_type = typeof("numba")
bool_nb_type = typeof(True)
none_nb_type = typeof(None)
def test_simple_types(self):
self.assertEqual(as_numba_type(int), self.int_nb_type)
self.assertEqual(as_numba_type(float), self.float_nb_type)
self.assertEqual(as_numba_type(complex), self.complex_nb_type)
self.assertEqual(as_numba_type(str), self.str_nb_type)
self.assertEqual(as_numba_type(bool), self.bool_nb_type)
self.assertEqual(as_numba_type(type(None)), self.none_nb_type)
def test_numba_types(self):
numba_types = [
types.intp,
types.boolean,
types.ListType(types.float64),
types.DictType(
types.intp, types.Tuple([types.float32, types.float32])
),
]
for ty in numba_types:
self.assertEqual(as_numba_type(ty), ty)
def test_single_containers(self):
self.assertEqual(
as_numba_type(py_typing.List[float]),
types.ListType(self.float_nb_type),
)
self.assertEqual(
as_numba_type(py_typing.Dict[float, str]),
types.DictType(self.float_nb_type, self.str_nb_type),
)
self.assertEqual(
as_numba_type(py_typing.Set[complex]),
types.Set(self.complex_nb_type),
)
self.assertEqual(
as_numba_type(py_typing.Tuple[float, float]),
types.Tuple([self.float_nb_type, self.float_nb_type]),
)
self.assertEqual(
as_numba_type(py_typing.Tuple[float, complex]),
types.Tuple([self.float_nb_type, self.complex_nb_type]),
)
def test_optional(self):
self.assertEqual(
as_numba_type(py_typing.Optional[float]),
types.Optional(self.float_nb_type),
)
self.assertEqual(
as_numba_type(py_typing.Union[str, None]),
types.Optional(self.str_nb_type),
)
self.assertEqual(
as_numba_type(py_typing.Union[None, bool]),
types.Optional(self.bool_nb_type),
)
# Optional[x] is a special case of Union[x, None]. We raise a
# TypingError if the right type is not NoneType.
with self.assertRaises(TypingError) as raises:
as_numba_type(py_typing.Union[int, float])
self.assertIn("Cannot type Union that is not an Optional",
str(raises.exception))
def test_nested_containers(self):
IntList = py_typing.List[int]
self.assertEqual(
as_numba_type(py_typing.List[IntList]),
types.ListType(types.ListType(self.int_nb_type)),
)
self.assertEqual(
as_numba_type(py_typing.List[py_typing.Dict[float, bool]]),
types.ListType(
types.DictType(self.float_nb_type, self.bool_nb_type)
),
)
self.assertEqual(
as_numba_type(
py_typing.Set[py_typing.Tuple[py_typing.Optional[int], float]]),
types.Set(types.Tuple(
[types.Optional(self.int_nb_type), self.float_nb_type])),
)
def test_jitclass_registers(self):
@jitclass
class MyInt:
x: int
def __init__(self, value):
self.x = value
self.assertEqual(as_numba_type(MyInt), MyInt.class_type.instance_type)
def test_type_alias(self):
Pair = py_typing.Tuple[int, int]
ListOfPairs = py_typing.List[Pair]
pair_nb_type = types.Tuple((self.int_nb_type, self.int_nb_type))
self.assertEqual(as_numba_type(Pair), pair_nb_type)
self.assertEqual(
as_numba_type(ListOfPairs), types.ListType(pair_nb_type)
)
def test_overwrite_type(self):
as_numba_type = AsNumbaTypeRegistry()
self.assertEqual(as_numba_type(float), self.float_nb_type)
as_numba_type.register(float, types.float32)
self.assertEqual(as_numba_type(float), types.float32)
self.assertNotEqual(as_numba_type(float), self.float_nb_type)
def test_any_throws(self):
Any = py_typing.Any
any_types = [
py_typing.Optional[Any],
py_typing.List[Any],
py_typing.Set[Any],
py_typing.Dict[float, Any],
py_typing.Dict[Any, float],
py_typing.Tuple[int, Any],
]
for bad_py_type in any_types:
with self.assertRaises(TypingError) as raises:
as_numba_type(bad_py_type)
self.assertIn(
"Cannot infer Numba type of Python type",
str(raises.exception),
)
def test_bad_union_throws(self):
bad_unions = [
py_typing.Union[str, int],
py_typing.Union[int, type(None), py_typing.Tuple[bool, bool]],
]
for bad_py_type in bad_unions:
with self.assertRaises(TypingError) as raises:
as_numba_type(bad_py_type)
self.assertIn("Cannot type Union", str(raises.exception))
def test_instance_check_usecase(self):
# Demonstrates that registering the type class with as_numba_type
# supports instance checks, at least for those subclasses supported by
# the instance check (e.g. types.Number, etc.).
#
# To set up the test we need quite a lot of extension code to support
# a new type independent of the existing types.
# The Python class
class bfloat16:
def __init__(self, value):
self._value = value
# The Numba type class - we use a Number subclass both because it makes
# sense for a new numeric type, and it's one of the types supported by
# instance checks in Numba
class _type_class_bfloat16(Number):
def __init__(self):
self.bitwidth = 16
super().__init__(name="bfloat16")
# The Numba type instance
bfloat16_type = _type_class_bfloat16()
# Register typing of the Python class for use as arguments and
# constants
@typeof_impl.register(bfloat16)
def typeof_bfloat16(val, c):
return bfloat16_type
# A data model for the bfloat16 class. We don't need much actual
# implementation so it doesn't matter too much what this is; a 16-bit
# integer representation is sufficient.
@register_model(_type_class_bfloat16)
class _model_bfloat16(PrimitiveModel):
def __init__(self, dmm, fe_type):
be_type = ir.IntType(fe_type.bitwidth)
super(_model_bfloat16, self).__init__(dmm, fe_type, be_type)
# Ideally we pass in a value so we ensure that the instance check is
# working with values dynamically passed in (preventing the whole check
# being potentially optimized into a simple True or False). For this we
# need an unboxing.
@unbox(_type_class_bfloat16)
def unbox_bfloat16(ty, obj, c):
ll_type = c.context.get_argument_type(ty)
val = cgutils.alloca_once(c.builder, ll_type)
is_error_ptr = cgutils.alloca_once_value(c.builder,
cgutils.false_bit)
with ExitStack() as stack:
value_obj = c.pyapi.object_getattr_string(obj, "_value")
with cgutils.early_exit_if_null(c.builder, stack, value_obj):
c.builder.store(cgutils.true_bit, is_error_ptr)
value_native = c.unbox(types.uint16, value_obj)
c.pyapi.decref(value_obj)
with cgutils.early_exit_if(c.builder, stack,
value_native.is_error):
c.builder.store(cgutils.true_bit, is_error_ptr)
c.builder.store(value_native.value, val)
return NativeValue(c.builder.load(val),
is_error=c.builder.load(is_error_ptr))
# We never call bfloat16 to construct one inside a jitted function, but
# we need this typing so that the type of the bfloat16 class can be
# determined (it's the argument to the instance check).
@type_callable(bfloat16)
def type_bfloat16_ctor(context):
# Note that the typer is never called in this test, because we
# don't call bfloat16 - only the typing of it as a callable is
# used.
def typer(value):
if isinstance(value, types.Integer):
return bfloat16_type
# First we try the instance check without an as_numba_type
# registration, to prove that it is necessary for instance checks to
# work.
@jit
def instancecheck_no_ant_reg(x):
return isinstance(x, bfloat16)
# A "random" value to test with
x_bf16 = bfloat16(0x4049) # bfloat16(3.14)
# Ensure the typing fails without the registration
expected_message = r"Cannot infer Numba type of Python type.*bfloat16"
with self.assertRaisesRegex(TypingError, expected_message):
instancecheck_no_ant_reg(x_bf16)
# Register the typing with as_numba_type so that we can expect instance
# checks to work
as_numba_type.register(bfloat16, bfloat16_type)
# We define a new function to ensure all registrations are as-required
@jit
def instancecheck(x):
return isinstance(x, bfloat16)
# The instance check should be True for bfloat16 instances and False
# otherwise.
self.assertTrue(instancecheck(x_bf16))
self.assertFalse(instancecheck(1))
if __name__ == '__main__':
unittest.main()
| TestAsNumbaType |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/sparse_ops/sparse_cross_op_test.py | {
"start": 1216,
"end": 2869
} | class ____(test.TestCase):
def _sparse_tensor(self, data, batch_size=-1):
"""Generates a SparseTensor.
Args:
data: Should be a list of list of strings or int64. Each item of the outer
list represents a batch. Each item of the batch is a feature of a
specific feature column.
batch_size: optional batch size, especially for cases when data has no
entry for some batches.
Returns:
A SparseTensor.
"""
indices = []
values = []
max_col_count = 0
for batch, batch_ix in zip(data, range(len(data))):
for column, column_ix in zip(batch, range(len(batch))):
indices.append([batch_ix, column_ix])
values.append(column)
max_col_count = max(max_col_count, column_ix + 1)
shape = [batch_size if batch_size != -1 else len(data), max_col_count]
value_type = (
dtypes.string
if not values or isinstance(values[0], str) else dtypes.int64)
return sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64, [len(indices), 2]),
constant_op.constant(values, value_type, [len(indices)]),
constant_op.constant(shape, dtypes.int64))
def _assert_sparse_tensor_equals(self, sp1, sp2):
self.assertAllEqual(sp1.indices, sp2.indices)
self.assertAllEqual(sp1.values, sp2.values)
self.assertAllEqual(sp1.dense_shape, sp2.dense_shape)
def _assert_sparse_tensor_empty(self, sp):
self.assertEqual(0, sp.indices.size)
self.assertEqual(0, sp.values.size)
# TODO(zakaria): check if we can ignore the first dim of the shape.
self.assertEqual(0, sp.dense_shape[1])
| BaseSparseCrossOpTest |
python | spyder-ide__spyder | spyder/plugins/run/models.py | {
"start": 5850,
"end": 9965
} | class ____(QAbstractListModel):
def __init__(self, parent, executor_model):
super().__init__(parent)
self.parent = parent
self.selected_metadata: Optional[RunConfigurationMetadata] = None
self.current_configuration: Optional[str] = None
self.metadata_index: Dict[int, str] = {}
self.inverted_index: Dict[str, int] = {}
self.run_configurations: OrderedDict[
str, RunConfigurationMetadata] = OrderedDict()
self.executor_model: RunExecutorListModel = executor_model
def get_metadata_context_extension(self, uuid: str):
run_conf = self.run_configurations[uuid]
return run_conf['context'], run_conf['input_extension']
def set_current_run_configuration(self, uuid: str):
self.current_configuration = uuid
def get_current_run_configuration(self):
return self.current_configuration
def get_initial_index(self) -> int:
return self.inverted_index[self.current_configuration]
def get_selected_metadata(self) -> Optional[RunConfigurationMetadata]:
return self.selected_metadata
def get_metadata(self, index: int) -> RunConfigurationMetadata:
uuid = self.metadata_index[index]
metadata = self.run_configurations[uuid]
return metadata
def update_index(self, index: int):
uuid = self.metadata_index[index]
metadata = self.run_configurations[uuid]
context_name = metadata['context']['name']
context_id = getattr(RunContext, context_name)
ext = metadata['input_extension']
self.selected_metadata = metadata
self.executor_model.switch_input(uuid, (ext, context_id))
def data(self, index: QModelIndex, role: int = Qt.DisplayRole):
if role == Qt.DisplayRole or role == Qt.EditRole:
uuid = self.metadata_index[index.row()]
metadata = self.run_configurations[uuid]
return metadata['name']
def rowCount(self, parent: QModelIndex = None) -> int:
return len(self.run_configurations)
def get_run_configuration_parameters(
self,
uuid: str,
executor: str
) -> Optional[StoredRunExecutorParameters]:
context, ext = self.get_metadata_context_extension(uuid)
context_name = context['name']
context_id = getattr(RunContext, context_name)
return self.parent.get_executor_configuration_parameters(
executor, ext, context_id)
def get_last_used_execution_params(
self,
uuid: str,
executor: str
) -> Optional[str]:
return self.parent.get_last_used_execution_params(uuid, executor)
def get_last_used_executor_parameters(
self,
uuid: str
) -> StoredRunConfigurationExecutor:
return self.parent.get_last_used_executor_parameters(uuid)
def pop(self, uuid: str) -> RunConfigurationMetadata:
item = self.run_configurations.pop(uuid)
self.metadata_index = dict(enumerate(self.run_configurations))
self.inverted_index = {v: k for k, v in self.metadata_index.items()}
if self.current_configuration not in self.inverted_index:
self.current_configuration = None
self.dataChanged.emit(self.createIndex(0, 0),
self.createIndex(len(self.metadata_index), 0))
return item
def __iter__(self):
return iter(self.run_configurations)
def __len__(self):
return len(self.run_configurations)
def __getitem__(self, uuid: str) -> RunConfigurationMetadata:
return self.run_configurations[uuid]
def __setitem__(self, uuid: str, metadata: RunConfigurationMetadata):
self.run_configurations[uuid] = metadata
self.metadata_index[len(self.metadata_index)] = uuid
self.inverted_index[uuid] = len(self.inverted_index)
self.dataChanged.emit(self.createIndex(0, 0),
self.createIndex(len(self.metadata_index), 0))
def __contains__(self, uuid: str):
return uuid in self.run_configurations
| RunConfigurationListModel |
python | spyder-ide__spyder | spyder/plugins/completion/api.py | {
"start": 21607,
"end": 23817
} | class ____(SpyderConfigurationObserver):
"""
Extension to the :class:`spyder.api.config.mixins.SpyderConfigurationObserver`
mixin implementation to consider a nested provider configuration.
"""
def _gather_observers(self):
"""Gather all the methods decorated with `on_conf_change`."""
for method_name in dir(self):
# Avoid crash at startup due to MRO
if PYSIDE6 and method_name in {
# Method is debounced
"interpreter_changed"
}:
continue
method = getattr(self, method_name, None)
if hasattr(method, '_conf_listen'):
info = method._conf_listen
if len(info) > 1:
self._multi_option_listeners |= {method_name}
for section, option in info:
if section is None:
section = 'completions'
if option == '__section':
option = (
'provider_configuration',
self.COMPLETION_PROVIDER_NAME,
'values'
)
else:
option = self._wrap_provider_option(option)
section_listeners = self._configuration_listeners.get(
section, {})
option_listeners = section_listeners.get(option, [])
option_listeners.append(method_name)
section_listeners[option] = option_listeners
self._configuration_listeners[section] = section_listeners
def _wrap_provider_option(self, option):
if isinstance(option, tuple):
option = (
'provider_configuration',
self.COMPLETION_PROVIDER_NAME,
'values',
*option
)
else:
option = (
'provider_configuration',
self.COMPLETION_PROVIDER_NAME,
'values',
option
)
return option
| CompletionConfigurationObserver |
python | openai__openai-python | src/openai/types/responses/response_function_shell_tool_call_output.py | {
"start": 1071,
"end": 1749
} | class ____(BaseModel):
id: str
"""The unique ID of the shell call output.
Populated when this item is returned via API.
"""
call_id: str
"""The unique ID of the shell tool call generated by the model."""
max_output_length: Optional[int] = None
"""The maximum length of the shell command output.
This is generated by the model and should be passed back with the raw output.
"""
output: List[Output]
"""An array of shell call output contents"""
type: Literal["shell_call_output"]
"""The type of the shell call output. Always `shell_call_output`."""
created_by: Optional[str] = None
| ResponseFunctionShellToolCallOutput |
python | openai__openai-python | src/openai/types/conversations/summary_text_content.py | {
"start": 195,
"end": 405
} | class ____(BaseModel):
text: str
"""A summary of the reasoning output from the model so far."""
type: Literal["summary_text"]
"""The type of the object. Always `summary_text`."""
| SummaryTextContent |
python | django-extensions__django-extensions | django_extensions/management/commands/runscript.py | {
"start": 935,
"end": 12951
} | class ____(EmailNotificationCommand):
help = "Runs a script in django context."
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.current_directory = os.getcwd()
self.last_exit_code = 0
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument("script", nargs="+")
parser.add_argument(
"--fixtures",
action="store_true",
dest="infixtures",
default=False,
help="Also look in app.fixtures subdir",
)
parser.add_argument(
"--noscripts",
action="store_true",
dest="noscripts",
default=False,
help="Do not look in app.scripts subdir",
)
parser.add_argument(
"-s",
"--silent",
action="store_true",
dest="silent",
default=False,
help="Run silently, do not show errors and tracebacks."
" Also implies --continue-on-error.",
)
parser.add_argument(
"-c",
"--continue-on-error",
action="store_true",
dest="continue_on_error",
default=False,
help="Continue executing other scripts even though one has failed. "
"It will print a traceback unless --no-traceback or --silent are given "
"The exit code used when terminating will always be 1.",
)
parser.add_argument(
"--no-traceback",
action="store_true",
dest="no_traceback",
default=False,
help="Do not show tracebacks",
)
parser.add_argument(
"--script-args",
nargs="*",
type=str,
help="Space-separated argument list to be passed to the scripts. Note that "
"the same arguments will be passed to all named scripts.",
)
parser.add_argument(
"--dir-policy",
type=str,
choices=[
DirPolicyChoices.NONE,
DirPolicyChoices.EACH,
DirPolicyChoices.ROOT,
],
help="Policy of selecting scripts execution directory: "
"none - start all scripts in current directory "
"each - start all scripts in their directories "
"root - start all scripts in BASE_DIR directory ",
)
parser.add_argument(
"--chdir",
type=check_is_directory,
help="If dir-policy option is set to custom, than this option determines "
"script execution directory.",
)
@signalcommand
def handle(self, *args, **options):
NOTICE = self.style.SQL_TABLE
NOTICE2 = self.style.SQL_FIELD
ERROR = self.style.ERROR
ERROR2 = self.style.NOTICE
subdirs = []
scripts = options["script"]
if not options["noscripts"]:
subdirs.append(getattr(settings, "RUNSCRIPT_SCRIPT_DIR", "scripts"))
if options["infixtures"]:
subdirs.append("fixtures")
verbosity = options["verbosity"]
show_traceback = options["traceback"]
no_traceback = options["no_traceback"]
continue_on_error = options["continue_on_error"]
if no_traceback:
show_traceback = False
else:
show_traceback = True
silent = options["silent"]
if silent:
verbosity = 0
continue_on_error = True
email_notifications = options["email_notifications"]
if len(subdirs) < 1:
print(NOTICE("No subdirs to run left."))
return
if len(scripts) < 1:
print(ERROR("Script name required."))
return
def get_directory_from_chdir():
directory = options["chdir"] or getattr(settings, "RUNSCRIPT_CHDIR", None)
try:
check_is_directory(directory)
except ArgumentTypeError as e:
raise BadCustomDirectoryException(str(e))
return directory
def get_directory_basing_on_policy(script_module):
policy = options["dir_policy"] or getattr(
settings, "RUNSCRIPT_CHDIR_POLICY", DirPolicyChoices.NONE
)
if policy == DirPolicyChoices.ROOT:
return settings.BASE_DIR
elif policy == DirPolicyChoices.EACH:
return os.path.dirname(inspect.getfile(script_module))
else:
return self.current_directory
def set_directory(script_module):
if options["chdir"]:
directory = get_directory_from_chdir()
elif options["dir_policy"]:
directory = get_directory_basing_on_policy(script_module)
elif getattr(settings, "RUNSCRIPT_CHDIR", None):
directory = get_directory_from_chdir()
else:
directory = get_directory_basing_on_policy(script_module)
os.chdir(os.path.abspath(directory))
def run_script(mod, *script_args):
exit_code = None
try:
set_directory(mod)
exit_code = mod.run(*script_args)
if isinstance(exit_code, bool):
# convert boolean True to exit-code 0 and False to exit-code 1
exit_code = 1 if exit_code else 0
if isinstance(exit_code, int):
if exit_code != 0:
try:
raise CommandError(
"'%s' failed with exit code %s"
% (mod.__name__, exit_code),
returncode=exit_code,
)
except TypeError:
raise CommandError(
"'%s' failed with exit code %s"
% (mod.__name__, exit_code)
)
if email_notifications:
self.send_email_notification(notification_id=mod.__name__)
except Exception as e:
if isinstance(e, CommandError) and hasattr(e, "returncode"):
exit_code = e.returncode
self.last_exit_code = exit_code if isinstance(exit_code, int) else 1
if silent:
return
if verbosity > 0:
print(ERROR("Exception while running run() in '%s'" % mod.__name__))
if continue_on_error:
if show_traceback:
traceback.print_exc()
return
if email_notifications:
self.send_email_notification(
notification_id=mod.__name__, include_traceback=True
)
if no_traceback:
raise CommandError(repr(e))
raise
def my_import(parent_package, module_name):
full_module_path = "%s.%s" % (parent_package, module_name)
if verbosity > 1:
print(NOTICE("Check for %s" % full_module_path))
# Try importing the parent package first
try:
importlib.import_module(parent_package)
except ImportError as e:
if str(e).startswith("No module named"):
# No need to proceed if the parent package doesn't exist
return False
try:
t = importlib.import_module(full_module_path)
except ImportError as e:
# The parent package exists, but the module doesn't
try:
if importlib.util.find_spec(full_module_path) is None:
return False
except Exception:
module_file = (
os.path.join(settings.BASE_DIR, *full_module_path.split("."))
+ ".py"
)
if not os.path.isfile(module_file):
return False
if silent:
return False
if show_traceback:
traceback.print_exc()
if verbosity > 0:
print(
ERROR("Cannot import module '%s': %s." % (full_module_path, e))
)
return False
if hasattr(t, "run"):
if verbosity > 1:
print(NOTICE2("Found script '%s' ..." % full_module_path))
return t
else:
if verbosity > 1:
print(
ERROR2(
"Found script '%s' but no run() function found."
% full_module_path
)
)
def find_modules_for_script(script):
"""Find script module which contains 'run' attribute"""
modules = []
# first look in apps
for app in apps.get_app_configs():
for subdir in subdirs:
mod = my_import("%s.%s" % (app.name, subdir), script)
if mod:
modules.append(mod)
# try direct import
if script.find(".") != -1:
parent, mod_name = script.rsplit(".", 1)
mod = my_import(parent, mod_name)
if mod:
modules.append(mod)
else:
# try app.DIR.script import
for subdir in subdirs:
mod = my_import(subdir, script)
if mod:
modules.append(mod)
return modules
if options["script_args"]:
script_args = options["script_args"]
else:
script_args = []
# first pass to check if all scripts can be found
script_to_run = []
for script in scripts:
script_modules = find_modules_for_script(script)
if not script_modules:
self.last_exit_code = 1
if verbosity > 0 and not silent:
print(ERROR("No (valid) module for script '%s' found" % script))
continue
script_to_run.extend(script_modules)
if self.last_exit_code:
if verbosity < 2 and not silent:
print(
ERROR("Try running with a higher verbosity level like: -v2 or -v3")
)
if not continue_on_error:
script_to_run = []
for script_mod in script_to_run:
if verbosity > 1:
print(NOTICE2("Running script '%s' ..." % script_mod.__name__))
run_script(script_mod, *script_args)
if self.last_exit_code != 0:
if silent:
if hasattr(self, "running_tests"):
return
sys.exit(self.last_exit_code)
try:
raise CommandError(
"An error has occurred running scripts. See errors above.",
returncode=self.last_exit_code,
)
except TypeError:
# Django < 3.1 fallback
if self.last_exit_code == 1:
# if exit_code is 1 we can still raise CommandError without
# returncode argument
raise CommandError(
"An error has occurred running scripts. See errors above."
)
print(ERROR("An error has occurred running scripts. See errors above."))
if hasattr(self, "running_tests"):
return
sys.exit(self.last_exit_code)
| Command |
python | google__flatbuffers | grpc/examples/python/greeter/models/greeter_grpc_fb.py | {
"start": 180,
"end": 530
} | class ____(object):
'''Interface exported by the server.'''
def __init__(self, channel):
'''Constructor.
Args:
channel: A grpc.Channel.
'''
self.SayHello = channel.unary_unary(
method='/models.Greeter/SayHello')
self.SayManyHellos = channel.unary_stream(
method='/models.Greeter/SayManyHellos')
| GreeterStub |
python | explosion__spaCy | spacy/lang/lv/__init__.py | {
"start": 151,
"end": 247
} | class ____(Language):
lang = "lv"
Defaults = LatvianDefaults
__all__ = ["Latvian"]
| Latvian |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/asb.py | {
"start": 4007,
"end": 21691
} | class ____(BaseAzureServiceBusHook):
"""
Interact with the ServiceBusAdministrationClient.
This can create, update, list, and delete resources of a Service Bus
namespace. This hook uses the same Azure Service Bus client connection
inherited from the base class.
"""
def get_conn(self) -> ServiceBusAdministrationClient:
"""
Create a ServiceBusAdministrationClient instance.
This uses the connection string in connection details.
"""
conn = self.get_connection(self.conn_id)
connection_string: str = str(conn.schema)
if connection_string:
client = ServiceBusAdministrationClient.from_connection_string(connection_string)
else:
extras = conn.extra_dejson
credential: str | DefaultAzureCredential = self._get_field(extras=extras, field_name="credential")
fully_qualified_namespace = self._get_field(extras=extras, field_name="fully_qualified_namespace")
if not credential:
managed_identity_client_id = self._get_field(
extras=extras, field_name="managed_identity_client_id"
)
workload_identity_tenant_id = self._get_field(
extras=extras, field_name="workload_identity_tenant_id"
)
credential = get_sync_default_azure_credential(
managed_identity_client_id=managed_identity_client_id,
workload_identity_tenant_id=workload_identity_tenant_id,
)
client = ServiceBusAdministrationClient(
fully_qualified_namespace=fully_qualified_namespace,
credential=credential, # type: ignore[arg-type]
)
self.log.info("Create and returns ServiceBusAdministrationClient")
return client
def create_queue(
self,
queue_name: str,
max_delivery_count: int = 10,
dead_lettering_on_message_expiration: bool = True,
enable_batched_operations: bool = True,
) -> QueueProperties:
"""
Create Queue by connecting to service Bus Admin client return the QueueProperties.
:param queue_name: The name of the queue or a QueueProperties with name.
:param max_delivery_count: The maximum delivery count. A message is automatically
dead lettered after this number of deliveries. Default value is 10..
:param dead_lettering_on_message_expiration: A value that indicates whether this subscription has
dead letter support when a message expires.
:param enable_batched_operations: Value that indicates whether server-side batched
operations are enabled.
"""
if queue_name is None:
raise TypeError("Queue name cannot be None.")
with self.get_conn() as service_mgmt_conn:
queue = service_mgmt_conn.create_queue(
queue_name,
max_delivery_count=max_delivery_count,
dead_lettering_on_message_expiration=dead_lettering_on_message_expiration,
enable_batched_operations=enable_batched_operations,
)
return queue
def delete_queue(self, queue_name: str) -> None:
"""
Delete the queue by queue_name in service bus namespace.
:param queue_name: The name of the queue or a QueueProperties with name.
"""
if queue_name is None:
raise TypeError("Queue name cannot be None.")
with self.get_conn() as service_mgmt_conn:
service_mgmt_conn.delete_queue(queue_name)
def create_topic(
self,
topic_name: str,
azure_service_bus_conn_id: str = "azure_service_bus_default",
default_message_time_to_live: datetime.timedelta | str | None = None,
max_size_in_megabytes: int | None = None,
requires_duplicate_detection: bool | None = None,
duplicate_detection_history_time_window: datetime.timedelta | str | None = None,
enable_batched_operations: bool | None = None,
size_in_bytes: int | None = None,
filtering_messages_before_publishing: bool | None = None,
authorization_rules: list[AuthorizationRule] | None = None,
support_ordering: bool | None = None,
auto_delete_on_idle: datetime.timedelta | str | None = None,
enable_partitioning: bool | None = None,
enable_express: bool | None = None,
user_metadata: str | None = None,
max_message_size_in_kilobytes: int | None = None,
) -> str:
"""
Create a topic by connecting to service Bus Admin client.
:param topic_name: Name of the topic.
:param default_message_time_to_live: ISO 8601 default message time span to live value. This is
the duration after which the message expires, starting from when the message is sent to Service
Bus. This is the default value used when TimeToLive is not set on a message itself.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format
like "PT300S" is accepted.
:param max_size_in_megabytes: The maximum size of the topic in megabytes, which is the size of
memory allocated for the topic.
:param requires_duplicate_detection: A value indicating if this topic requires duplicate
detection.
:param duplicate_detection_history_time_window: ISO 8601 time span structure that defines the
duration of the duplicate detection history. The default value is 10 minutes.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format
like "PT300S" is accepted.
:param enable_batched_operations: Value that indicates whether server-side batched operations
are enabled.
:param size_in_bytes: The size of the topic, in bytes.
:param filtering_messages_before_publishing: Filter messages before publishing.
:param authorization_rules: List of Authorization rules for resource.
:param support_ordering: A value that indicates whether the topic supports ordering.
:param auto_delete_on_idle: ISO 8601 time span idle interval after which the topic is
automatically deleted. The minimum duration is 5 minutes.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format
like "PT300S" is accepted.
:param enable_partitioning: A value that indicates whether the topic is to be partitioned
across multiple message brokers.
:param enable_express: A value that indicates whether Express Entities are enabled. An express
queue holds a message in memory temporarily before writing it to persistent storage.
:param user_metadata: Metadata associated with the topic.
:param max_message_size_in_kilobytes: The maximum size in kilobytes of message payload that
can be accepted by the queue. This feature is only available when using a Premium namespace
and Service Bus API version "2021-05" or higher.
The minimum allowed value is 1024 while the maximum allowed value is 102400. Default value is 1024.
"""
if topic_name is None:
raise TypeError("Topic name cannot be None.")
with self.get_conn() as service_mgmt_conn:
try:
topic_properties = service_mgmt_conn.get_topic(topic_name)
except ResourceNotFoundError:
topic_properties = None
if topic_properties and topic_properties.name == topic_name:
self.log.info("Topic name already exists")
return topic_properties.name
topic = service_mgmt_conn.create_topic(
topic_name=topic_name,
default_message_time_to_live=default_message_time_to_live,
max_size_in_megabytes=max_size_in_megabytes,
requires_duplicate_detection=requires_duplicate_detection,
duplicate_detection_history_time_window=duplicate_detection_history_time_window,
enable_batched_operations=enable_batched_operations,
size_in_bytes=size_in_bytes,
filtering_messages_before_publishing=filtering_messages_before_publishing,
authorization_rules=authorization_rules,
support_ordering=support_ordering,
auto_delete_on_idle=auto_delete_on_idle,
enable_partitioning=enable_partitioning,
enable_express=enable_express,
user_metadata=user_metadata,
max_message_size_in_kilobytes=max_message_size_in_kilobytes,
)
self.log.info("Created Topic %s", topic.name)
return topic.name
def create_subscription(
self,
topic_name: str,
subscription_name: str,
lock_duration: datetime.timedelta | str | None = None,
requires_session: bool | None = None,
default_message_time_to_live: datetime.timedelta | str | None = None,
dead_lettering_on_message_expiration: bool | None = True,
dead_lettering_on_filter_evaluation_exceptions: bool | None = None,
max_delivery_count: int | None = 10,
enable_batched_operations: bool | None = True,
forward_to: str | None = None,
user_metadata: str | None = None,
forward_dead_lettered_messages_to: str | None = None,
auto_delete_on_idle: datetime.timedelta | str | None = None,
filter_rule: CorrelationRuleFilter | SqlRuleFilter | None = None,
filter_rule_name: str | None = None,
) -> SubscriptionProperties:
"""
Create a subscription with specified name on a topic and return the SubscriptionProperties for it.
An optional filter_rule can be provided to filter messages based on their properties. In particular,
the correlation ID filter can be used to pair up replies to requests.
:param topic_name: The topic that will own the to-be-created subscription.
:param subscription_name: Name of the subscription that need to be created
:param lock_duration: ISO 8601 time span duration of a peek-lock; that is, the amount of time that
the message is locked for other receivers. The maximum value for LockDuration is 5 minutes; the
default value is 1 minute. Input value of either type ~datetime.timedelta or string in ISO 8601
duration format like "PT300S" is accepted.
:param requires_session: A value that indicates whether the queue supports the concept of sessions.
:param default_message_time_to_live: ISO 8601 default message time span to live value. This is the
duration after which the message expires, starting from when the message is sent to
Service Bus. This is the default value used when TimeToLive is not set on a message itself.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration
format like "PT300S" is accepted.
:param dead_lettering_on_message_expiration: A value that indicates whether this subscription has
dead letter support when a message expires.
:param dead_lettering_on_filter_evaluation_exceptions: A value that indicates whether this
subscription has dead letter support when a message expires.
:param max_delivery_count: The maximum delivery count. A message is automatically dead lettered
after this number of deliveries. Default value is 10.
:param enable_batched_operations: Value that indicates whether server-side batched
operations are enabled.
:param forward_to: The name of the recipient entity to which all the messages sent to the
subscription are forwarded to.
:param user_metadata: Metadata associated with the subscription. Maximum number of characters is 1024.
:param forward_dead_lettered_messages_to: The name of the recipient entity to which all the
messages sent to the subscription are forwarded to.
:param auto_delete_on_idle: ISO 8601 time Span idle interval after which the subscription is
automatically deleted. The minimum duration is 5 minutes. Input value of either
type ~datetime.timedelta or string in ISO 8601 duration format like "PT300S" is accepted.
:param filter_rule: Optional correlation or SQL rule filter to apply on the messages.
:param filter_rule_name: Optional rule name to use applying the rule filter to the subscription
:param azure_service_bus_conn_id: Reference to the
:ref:`Azure Service Bus connection<howto/connection:azure_service_bus>`.
"""
if subscription_name is None:
raise TypeError("Subscription name cannot be None.")
if topic_name is None:
raise TypeError("Topic name cannot be None.")
with self.get_conn() as connection:
# create subscription with name
subscription = connection.create_subscription(
topic_name=topic_name,
subscription_name=subscription_name,
lock_duration=lock_duration,
requires_session=requires_session,
default_message_time_to_live=default_message_time_to_live,
dead_lettering_on_message_expiration=dead_lettering_on_message_expiration,
dead_lettering_on_filter_evaluation_exceptions=dead_lettering_on_filter_evaluation_exceptions,
max_delivery_count=max_delivery_count,
enable_batched_operations=enable_batched_operations,
forward_to=forward_to,
user_metadata=user_metadata,
forward_dead_lettered_messages_to=forward_dead_lettered_messages_to,
auto_delete_on_idle=auto_delete_on_idle,
)
if filter_rule:
# remove default rule (which accepts all messages)
try:
connection.delete_rule(topic_name, subscription_name, "$Default")
except ResourceNotFoundError:
# as long as it is gone :)
self.log.debug("Could not find default rule '$Default' to delete; ignoring error.")
# add a rule to filter with the filter rule passed in
rule_name = filter_rule_name if filter_rule_name else "rule" + str(uuid4())
connection.create_rule(topic_name, subscription_name, rule_name, filter=filter_rule)
self.log.debug(
"Created rule %s for subscription %s on topic %s",
rule_name,
subscription_name,
topic_name,
)
return subscription
def update_subscription(
self,
topic_name: str,
subscription_name: str,
max_delivery_count: int | None = None,
dead_lettering_on_message_expiration: bool | None = None,
enable_batched_operations: bool | None = None,
) -> None:
"""
Update an Azure ServiceBus Topic Subscription under a ServiceBus Namespace.
:param topic_name: The topic that will own the to-be-created subscription.
:param subscription_name: Name of the subscription that need to be created.
:param max_delivery_count: The maximum delivery count. A message is automatically dead lettered
after this number of deliveries. Default value is 10.
:param dead_lettering_on_message_expiration: A value that indicates whether this subscription
has dead letter support when a message expires.
:param enable_batched_operations: Value that indicates whether server-side batched
operations are enabled.
"""
with self.get_conn() as service_mgmt_conn:
subscription_prop = service_mgmt_conn.get_subscription(topic_name, subscription_name)
if max_delivery_count:
subscription_prop.max_delivery_count = max_delivery_count
if dead_lettering_on_message_expiration is not None:
subscription_prop.dead_lettering_on_message_expiration = dead_lettering_on_message_expiration
if enable_batched_operations is not None:
subscription_prop.enable_batched_operations = enable_batched_operations
# update by updating the properties in the model
service_mgmt_conn.update_subscription(topic_name, subscription_prop)
updated_subscription = service_mgmt_conn.get_subscription(topic_name, subscription_name)
self.log.info("Subscription Updated successfully %s", updated_subscription.name)
def delete_subscription(self, subscription_name: str, topic_name: str) -> None:
"""
Delete a topic subscription entities under a ServiceBus Namespace.
:param subscription_name: The subscription name that will own the rule in topic
:param topic_name: The topic that will own the subscription rule.
"""
if subscription_name is None:
raise TypeError("Subscription name cannot be None.")
if topic_name is None:
raise TypeError("Topic name cannot be None.")
with self.get_conn() as service_mgmt_conn:
self.log.info("Deleting Subscription %s", subscription_name)
service_mgmt_conn.delete_subscription(topic_name, subscription_name)
| AdminClientHook |
python | huggingface__transformers | src/transformers/activations.py | {
"start": 7020,
"end": 7483
} | class ____(nn.Module):
"""
Applies elementwise activation based on Laplace function, introduced in MEGA as an attention activation. See
https://huggingface.co/papers/2209.10655
Inspired by squared relu, but with bounded range and gradient for better stability
"""
def forward(self, input, mu=0.707107, sigma=0.282095):
input = (input - mu).div(sigma * math.sqrt(2.0))
return 0.5 * (1.0 + torch.erf(input))
| LaplaceActivation |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/links/test_emr.py | {
"start": 1476,
"end": 2844
} | class ____(BaseAwsLinksTestCase):
link_class = EmrClusterLink
def test_extra_link(self, mock_supervisor_comms):
if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms:
mock_supervisor_comms.send.return_value = XComResult(
key=self.link_class.key,
value={
"region_name": "us-west-1",
"aws_domain": self.link_class.get_aws_domain("aws"),
"aws_partition": "aws",
"job_flow_id": "j-TEST-FLOW-ID",
},
)
self.assert_extra_link_url(
expected_url=(
"https://console.aws.amazon.com/emr/home?region=us-west-1#/clusterDetails/j-TEST-FLOW-ID"
),
region_name="us-west-1",
aws_partition="aws",
job_flow_id="j-TEST-FLOW-ID",
)
@pytest.mark.parametrize(
("cluster_info", "expected_uri"),
[
pytest.param({"Cluster": {}}, None, id="no-log-uri"),
pytest.param({"Cluster": {"LogUri": "s3://myLogUri/"}}, "myLogUri/", id="has-log-uri"),
],
)
def test_get_log_uri(cluster_info, expected_uri):
emr_client = MagicMock()
emr_client.describe_cluster.return_value = cluster_info
assert get_log_uri(cluster=None, emr_client=emr_client, job_flow_id="test_job_flow_id") == expected_uri
| TestEmrClusterLink |
python | pypa__warehouse | tests/unit/email/test_init.py | {
"start": 56855,
"end": 59940
} | class ____:
def test_send_new_organization_requested_email(
self, pyramid_request, pyramid_config, monkeypatch
):
initiator_user = pretend.stub(
id="id",
username="username",
name="",
email="email@example.com",
primary_email=pretend.stub(email="email@example.com", verified=True),
)
organization_name = "example"
subject_renderer = pyramid_config.testing_add_renderer(
"email/new-organization-requested/subject.txt"
)
subject_renderer.string_response = "Email Subject"
body_renderer = pyramid_config.testing_add_renderer(
"email/new-organization-requested/body.txt"
)
body_renderer.string_response = "Email Body"
html_renderer = pyramid_config.testing_add_renderer(
"email/new-organization-requested/body.html"
)
html_renderer.string_response = "Email HTML Body"
send_email = pretend.stub(
delay=pretend.call_recorder(lambda *args, **kwargs: None)
)
pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email)
monkeypatch.setattr(email, "send_email", send_email)
pyramid_request.db = pretend.stub(
query=lambda a: pretend.stub(
filter=lambda *a: pretend.stub(
one=lambda: pretend.stub(user_id=initiator_user.id)
)
),
)
pyramid_request.user = initiator_user
pyramid_request.registry.settings = {"mail.sender": "noreply@example.com"}
result = email.send_new_organization_requested_email(
pyramid_request,
initiator_user,
organization_name=organization_name,
)
assert result == {"organization_name": organization_name}
subject_renderer.assert_(organization_name=organization_name)
body_renderer.assert_(organization_name=organization_name)
html_renderer.assert_(organization_name=organization_name)
assert pyramid_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{initiator_user.username} <{initiator_user.email}>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": initiator_user.id,
"additional": {
"from_": "noreply@example.com",
"to": initiator_user.email,
"subject": "Email Subject",
"redact_ip": False,
},
},
)
]
| TestSendNewOrganizationRequestedEmail |
python | altair-viz__altair | altair/vegalite/v6/schema/_config.py | {
"start": 177284,
"end": 178129
} | class ____(TypedDict, total=False):
"""
:class:`altair.MultiLineString` ``TypedDict`` wrapper.
Parameters
----------
coordinates
type
Specifies the type of GeoJSON object.
bbox
Bounding box of the coordinate range of the object's Geometries, Features, or
Feature Collections. The value of the bbox member is an array of length 2*n where n
is the number of dimensions represented in the contained geometries, with all axes
of the most southwesterly point followed by all axes of the more northeasterly
point. The axes order of a bbox follows the axes order of geometries.
https://tools.ietf.org/html/rfc7946#section-5
"""
coordinates: Sequence[Sequence[Sequence[float]]]
type: Literal["MultiLineString"]
bbox: Sequence[float]
| MultiLineStringKwds |
python | huggingface__transformers | examples/pytorch/image-pretraining/run_mim.py | {
"start": 2071,
"end": 4331
} | class ____:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class into argparse arguments to be able to
specify them on the command line.
"""
dataset_name: Optional[str] = field(
default="cifar10", metadata={"help": "Name of a dataset from the datasets package"}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
image_column_name: Optional[str] = field(
default=None,
metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."},
)
train_dir: Optional[str] = field(default=None, metadata={"help": "A folder containing the training data."})
validation_dir: Optional[str] = field(default=None, metadata={"help": "A folder containing the validation data."})
train_val_split: Optional[float] = field(
default=0.15, metadata={"help": "Percent to split off of train for validation."}
)
mask_patch_size: int = field(default=32, metadata={"help": "The size of the square patches to use for masking."})
mask_ratio: float = field(
default=0.6,
metadata={"help": "Percentage of patches to mask."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
def __post_init__(self):
data_files = {}
if self.train_dir is not None:
data_files["train"] = self.train_dir
if self.validation_dir is not None:
data_files["val"] = self.validation_dir
self.data_files = data_files if data_files else None
@dataclass
| DataTrainingArguments |
python | doocs__leetcode | solution/3200-3299/3209.Number of Subarrays With AND Value of K/Solution.py | {
"start": 0,
"end": 329
} | class ____:
def countSubarrays(self, nums: List[int], k: int) -> int:
ans = 0
pre = Counter()
for x in nums:
cur = Counter()
for y, v in pre.items():
cur[x & y] += v
cur[x] += 1
ans += cur[k]
pre = cur
return ans
| Solution |
python | explosion__spaCy | spacy/schemas.py | {
"start": 19381,
"end": 19978
} | class ____(BaseModel):
training: ConfigSchemaTraining
nlp: ConfigSchemaNlp
pretraining: Union[ConfigSchemaPretrain, ConfigSchemaPretrainEmpty] = {} # type: ignore[assignment]
components: Dict[str, Dict[str, Any]]
corpora: Dict[str, Reader]
initialize: ConfigSchemaInit
class Config:
extra = "allow"
arbitrary_types_allowed = True
CONFIG_SCHEMAS = {
"nlp": ConfigSchemaNlp,
"training": ConfigSchemaTraining,
"pretraining": ConfigSchemaPretrain,
"initialize": ConfigSchemaInit,
}
# Recommendations for init config workflows
| ConfigSchema |
python | ipython__ipython | IPython/core/profileapp.py | {
"start": 9942,
"end": 10711
} | class ____(Application):
name = u'ipython profile'
description = profile_help
examples = _main_examples
subcommands = Dict(dict(
create = (ProfileCreate, ProfileCreate.description.splitlines()[0]),
list = (ProfileList, ProfileList.description.splitlines()[0]),
locate = (ProfileLocate, ProfileLocate.description.splitlines()[0]),
))
def start(self):
if self.subapp is None:
print(
"No subcommand specified. Must specify one of: "
+ ", ".join(map(repr, self.subcommands))
+ ".\n"
)
self.print_description()
self.print_subcommands()
self.exit(1)
else:
return self.subapp.start()
| ProfileApp |
python | doocs__leetcode | lcof/面试题17. 打印从1到最大的n位数/Solution.py | {
"start": 0,
"end": 527
} | class ____:
def printNumbers(self, n: int) -> List[int]:
return list(range(1, 10**n))
def print(self, n: int) -> List[str]:
def dfs(i, j):
if i == j:
ans.append("".join(s))
return
k = 0 if i else 1
while k < 10:
s.append(str(k))
dfs(i + 1, j)
s.pop()
k += 1
ans = []
s = []
for i in range(1, n + 1):
dfs(0, i)
return ans
| Solution |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_smartsymbols.py | {
"start": 178,
"end": 4110
} | class ____(util.MdCase):
"""Test smart symbols works in various scenarios."""
extension = [
'toc',
'smarty',
'pymdownx.smartsymbols'
]
extension_configs = {}
def test_copyright(self):
"""Test copyright."""
self.check_markdown(
'Copyright (c)',
'<p>Copyright ©</p>'
)
def test_trademark(self):
"""Test trademark."""
self.check_markdown(
'Trademark(tm)',
'<p>Trademark™</p>'
)
def test_registered(self):
"""Test registered."""
self.check_markdown(
'Registered(r)',
'<p>Registered®</p>'
)
def test_plus_minus(self):
"""Test plus/minus."""
self.check_markdown(
'230 +/- 10% V',
'<p>230 ± 10% V</p>'
)
def test_neq(self):
"""Test not equal."""
self.check_markdown(
'A =/= B',
'<p>A ≠ B</p>'
)
def test_right(self):
"""Test right arrow."""
self.check_markdown(
'right arrow -->',
'<p>right arrow →</p>'
)
def test_left(self):
"""Test left arrow."""
self.check_markdown(
'left arrow <--',
'<p>left arrow ←</p>'
)
def test_double_arrow(self):
"""Test double arrow."""
self.check_markdown(
'double arrow <-->',
'<p>double arrow ↔</p>'
)
def test_ordinals(self):
"""Test ordinals."""
self.check_markdown(
"""
Good: 1st 2nd 3rd 11th 12th 13th 15th 32nd 103rd
Bad: 1th 2th 3th 2rd 1nd 22th 33th 41nd 53nd
""",
"""
<p>Good: 1<sup>st</sup> 2<sup>nd</sup> 3<sup>rd</sup> 11<sup>th</sup> 12<sup>th</sup> 13<sup>th</sup> 15<sup>th</sup> 32<sup>nd</sup> 103<sup>rd</sup></p>
<p>Bad: 1th 2th 3th 2rd 1nd 22th 33th 41nd 53nd</p>
""", # noqa: E501
True
)
def test_fractions(self):
"""Test fractions."""
self.check_markdown(
"""
Fraction 1/2
Fraction 1/4
Fraction 3/4
Fraction 1/3
Fraction 2/3
Fraction 1/5
Fraction 2/5
Fraction 3/5
Fraction 4/5
Fraction 1/6
Fraction 5/6
Fraction 1/8
Fraction 3/8
Fraction 5/8
Fraction 7/8
""",
"""
<p>Fraction ½
Fraction ¼
Fraction ¾
Fraction ⅓
Fraction ⅔
Fraction ⅕
Fraction ⅖
Fraction ⅗
Fraction ⅘
Fraction ⅙
Fraction ⅚
Fraction ⅛
Fraction ⅜
Fraction ⅝
Fraction ⅞</p>
""",
True
)
def test_toc_tokens(self):
"""Ensure smart symbols end up correctly in table of content tokens."""
md = markdown.Markdown(extensions=['toc', 'pymdownx.smartsymbols'])
md.convert('# *Foo* =/= `bar`')
self.assertEqual(
md.toc_tokens,
[
{
'children': [],
'data-toc-label': '',
'html': '<em>Foo</em> ≠ <code>bar</code>',
'id': 'foo-bar',
'level': 1,
'name': 'Foo ≠ bar'
}
] if PYMD_3_6 else [
{
'level': 1,
'id': 'foo-bar',
'name': 'Foo ≠ bar',
'children': []
}
]
)
| TestSmartSymbols |
python | realpython__materials | python-class/aircrafts.py | {
"start": 0,
"end": 326
} | class ____:
def __init__(self, thrust, lift, max_speed):
self.thrust = thrust
self.lift = lift
self.max_speed = max_speed
def show_technical_specs(self):
print(f"Thrust: {self.thrust} kW")
print(f"Lift: {self.lift} kg")
print(f"Max speed: {self.max_speed} km/h")
| Aircraft |
python | astropy__astropy | astropy/io/ascii/basic.py | {
"start": 10763,
"end": 10881
} | class ____(TabData):
"""
Data reader for RDB data. Starts reading at line 2.
"""
start_line = 2
| RdbData |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_mixed_precision.py | {
"start": 50038,
"end": 53574
} | class ____(FSDPTest):
@property
def world_size(self):
return 2
@skip_if_lt_x_gpu(2)
def test_train_ema_eval_flow(self):
"""
Tests a train -> EMA update -> eval flow with mixed precision enabled.
"""
self.run_subtests(
{
"sharding_strategy": [
# We mainly want to test `SHARD_GRAD_OP` since it surfaced
# the original bug of not using the right EMA parameters
# for eval, but we also test the others for completeness
ShardingStrategy.SHARD_GRAD_OP,
ShardingStrategy.FULL_SHARD,
ShardingStrategy.NO_SHARD,
]
},
self._test_train_ema_eval_flow,
)
def _test_train_ema_eval_flow(self, sharding_strategy: ShardingStrategy):
class TransformerWithEMA(nn.Module):
def __init__(self, device: torch.device):
super().__init__()
self.module = nn.Transformer(device=device)
self.ema_module = AveragedModel(
nn.Transformer(device=device),
multi_avg_fn=torch.optim.swa_utils.get_ema_multi_avg_fn(),
use_buffers=True,
)
def forward(self, *args, **kwargs):
# Use main copy for training and EMA copy for eval
if self.training:
return self.module(*args, **kwargs)
return self.ema_module(*args, **kwargs)
device = torch.device("cuda")
model = TransformerWithEMA(device=device)
policy = ModuleWrapPolicy(
{nn.Transformer, nn.TransformerEncoderLayer, nn.TransformerDecoderLayer}
)
mixed_precision = MixedPrecision(param_dtype=torch.float16)
fsdp_model = FSDP(
model,
auto_wrap_policy=policy,
mixed_precision=mixed_precision,
sharding_strategy=sharding_strategy,
)
optim = torch.optim.Adam(fsdp_model.module.parameters(), lr=1e-2)
if self.rank == 0:
print(fsdp_model)
torch.manual_seed(1 + self.rank)
eval_src = torch.randn((8, 1, 512), device=device)
eval_tgt = torch.randn((16, 1, 512), device=device)
eval_out_sums: list[torch.Tensor] = []
# An iteration consists of training forward/backward/optimizer,
# updating the EMA copy with the main copy, and eval forward
for _ in range(3):
fsdp_model.train()
train_src = torch.randn((8, 4, 512), device=device)
train_tgt = torch.randn((16, 4, 512), device=device)
train_out = fsdp_model(train_src, train_tgt)
train_out.sum().backward()
optim.step()
optim.zero_grad()
with FSDP.summon_full_params(fsdp_model):
fsdp_model.ema_module.update_parameters(fsdp_model.module)
fsdp_model.eval()
with torch.no_grad():
eval_out = fsdp_model(eval_src, eval_tgt)
eval_out_sums.append(eval_out.sum())
# Check that the eval outputs differ from iteration to iteration as a
# proxy for eval using the correct EMA parameters
for i in range(len(eval_out_sums) - 1):
self.assertNotEqual(eval_out_sums[i], eval_out_sums[i + 1])
self.assertNotEqual(eval_out_sums[0], eval_out_sums[-1])
if __name__ == "__main__":
run_tests()
| TestFSDPTrainEval |
python | keras-team__keras | keras/src/layers/preprocessing/image_preprocessing/mix_up_test.py | {
"start": 212,
"end": 5430
} | class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.MixUp,
init_kwargs={
"alpha": 0.2,
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
# StatelessRandomGammaV3 is not supported on XLA_GPU_JIT
run_training_check=not testing.tensorflow_uses_gpu(),
)
def test_mix_up_inference(self):
seed = 3481
layer = layers.MixUp(alpha=0.2)
np.random.seed(seed)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
def test_mix_up_basic_functionality(self):
image = np.random.random((64, 64, 3))
mix_up_layer = layers.MixUp(alpha=1)
transformation = {"mix_weight": 1, "permutation_order": [0]}
output = mix_up_layer.transform_images(
image, transformation=transformation
)[0]
self.assertAllClose(output, image)
image = np.random.random((4, 64, 64, 3))
mix_up_layer = layers.MixUp(alpha=0.2)
transformation = {"mix_weight": 0.2, "permutation_order": [1, 0, 2, 3]}
output = mix_up_layer.transform_images(
image, transformation=transformation
)
self.assertNotAllClose(output, image)
self.assertAllClose(output.shape, image.shape)
def test_mix_up_basic_functionality_channel_first(self):
image = np.random.random((3, 64, 64))
mix_up_layer = layers.MixUp(alpha=1)
transformation = {"mix_weight": 1, "permutation_order": [0]}
output = mix_up_layer.transform_images(
image, transformation=transformation
)[0]
self.assertAllClose(output, image)
image = np.random.random((4, 3, 64, 64))
mix_up_layer = layers.MixUp(alpha=0.2)
transformation = {"mix_weight": 0.2, "permutation_order": [1, 0, 2, 3]}
output = mix_up_layer.transform_images(
image, transformation=transformation
)
self.assertNotAllClose(output, image)
self.assertAllClose(output.shape, image.shape)
def test_tf_data_compatibility(self):
layer = layers.MixUp()
input_data = np.random.random((2, 8, 8, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
def test_mix_up_bounding_boxes(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
image_shape = (10, 8, 3)
else:
image_shape = (3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
),
"labels": np.array([1, 2]),
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
expected_boxes = [[2, 1, 4, 3, 6, 4, 8, 6], [6, 4, 8, 6, 2, 1, 4, 3]]
random_flip_layer = layers.MixUp(
data_format=data_format,
seed=42,
bounding_box_format="xyxy",
)
transformation = {
"mix_weight": convert_to_tensor([0.5, 0.5]),
"permutation_order": convert_to_tensor([1, 0]),
}
output = random_flip_layer.transform_bounding_boxes(
input_data["bounding_boxes"],
transformation=transformation,
training=True,
)
self.assertAllClose(output["boxes"], expected_boxes)
def test_mix_up_tf_data_bounding_boxes(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
image_shape = (1, 10, 8, 3)
else:
image_shape = (1, 3, 10, 8)
input_image = np.random.random(image_shape)
bounding_boxes = {
"boxes": np.array(
[
[
[2, 1, 4, 3],
[6, 4, 8, 6],
]
]
),
"labels": np.array([[1, 2]]),
}
input_data = {"images": input_image, "bounding_boxes": bounding_boxes}
expected_boxes = [[2, 1, 4, 3, 6, 4, 8, 6], [6, 4, 8, 6, 2, 1, 4, 3]]
ds = tf_data.Dataset.from_tensor_slices(input_data)
layer = layers.MixUp(
data_format=data_format,
seed=42,
bounding_box_format="xyxy",
)
transformation = {
"mix_weight": convert_to_tensor([0.5, 0.5]),
"permutation_order": convert_to_tensor([1, 0]),
}
ds = ds.map(
lambda x: layer.transform_bounding_boxes(
x["bounding_boxes"],
transformation=transformation,
training=True,
)
)
output = next(iter(ds))
expected_boxes = np.array(expected_boxes)
self.assertAllClose(output["boxes"], expected_boxes)
| MixUpTest |
python | Textualize__textual | src/textual/widgets/_select.py | {
"start": 934,
"end": 1067
} | class ____(Exception):
"""Raised when setting a [`Select`][textual.widgets.Select] to an unknown option."""
| InvalidSelectValueError |
python | numba__numba | numba/tests/test_listobject.py | {
"start": 28995,
"end": 30425
} | class ____(MemoryLeakMixin, TestCase):
"""Test list remove. """
def test_list_remove_empty(self):
self.disable_leak_check()
@njit
def foo():
l = listobject.new_list(int32)
l.remove(0)
with self.assertRaises(ValueError):
foo()
def test_list_remove_singleton(self):
@njit
def foo():
l = listobject.new_list(int32)
l.append(0)
l.remove(0)
return len(l)
self.assertEqual(foo(), 0)
def test_list_remove_singleton_value_error(self):
self.disable_leak_check()
@njit
def foo():
l = listobject.new_list(int32)
l.append(1)
l.remove(0)
with self.assertRaises(ValueError):
foo()
def test_list_remove_multiple(self):
@njit
def foo():
l = listobject.new_list(int32)
for j in range(10, 20):
l.append(j)
l.remove(13)
l.remove(19)
return len(l)
self.assertEqual(foo(), 8)
def test_list_remove_multiple_value_error(self):
self.disable_leak_check()
@njit
def foo():
l = listobject.new_list(int32)
for j in range(10, 20):
l.append(j)
l.remove(23)
with self.assertRaises(ValueError):
foo()
| TestRemove |
python | Netflix__metaflow | test/core/tests/card_multiple.py | {
"start": 72,
"end": 4110
} | class ____(MetaflowTest):
"""
Test that checks if the multiple card decorators work with @step code.
- This test adds multiple `test_pathspec_card` cards to a @step
- Each card will contain taskpathspec
- CLI Check:
- List cards and cli will assert multiple cards are present per taskspec using `CliCheck.list_cards`
- Assert the information about the card using the hash and check if taskspec is present in the data
- Metadata Check
- List cards and cli will assert multiple cards are present per taskspec using `MetadataCheck.list_cards`
- Assert the information about the card using the hash and check if taskspec is present in the data
"""
PRIORITY = 3
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@tag('card(type="test_pathspec_card")')
@tag('card(type="test_pathspec_card")')
@steps(0, ["start"])
def step_start(self):
from metaflow import current
self.task = current.pathspec
@tag('card(type="test_pathspec_card")')
@tag('card(type="test_pathspec_card")')
@steps(0, ["foreach-nested-inner"])
def step_foreach_inner(self):
from metaflow import current
self.task = current.pathspec
@tag('card(type="test_pathspec_card")')
@tag('card(type="test_pathspec_card")')
@steps(1, ["join"])
def step_join(self):
from metaflow import current
self.task = current.pathspec
@tag('card(type="test_pathspec_card")')
@tag('card(type="test_pathspec_card")')
@steps(1, ["all"])
def step_all(self):
from metaflow import current
self.task = current.pathspec
def check_results(self, flow, checker):
run = checker.get_run()
if run is None:
# This means CliCheck is in context.
for step in flow:
cli_check_dict = checker.artifact_dict(step.name, "task")
for task_pathspec in cli_check_dict:
full_pathspec = "/".join([flow.name, task_pathspec])
task_id = task_pathspec.split("/")[-1]
cards_info = checker.list_cards(step.name, task_id)
assert_equals(
cards_info is not None
and "cards" in cards_info
and len(cards_info["cards"]) == 2,
True,
)
for card in cards_info["cards"]:
checker.assert_card(
step.name,
task_id,
"test_pathspec_card",
"%s" % full_pathspec,
card_hash=card["hash"],
exact_match=False,
)
else:
# This means MetadataCheck is in context.
for step in flow:
meta_check_dict = checker.artifact_dict(step.name, "task")
for task_id in meta_check_dict:
full_pathspec = meta_check_dict[task_id]["task"]
cards_info = checker.list_cards(step.name, task_id)
assert_equals(
cards_info is not None
and "cards" in cards_info
and len(cards_info["cards"]) == 2,
True,
)
for card in cards_info["cards"]:
checker.assert_card(
step.name,
task_id,
"test_pathspec_card",
"%s" % full_pathspec,
card_hash=card["hash"],
exact_match=False,
)
| MultipleCardDecoratorTest |
python | explosion__spaCy | spacy/lang/et/__init__.py | {
"start": 152,
"end": 251
} | class ____(Language):
lang = "et"
Defaults = EstonianDefaults
__all__ = ["Estonian"]
| Estonian |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/batch_test.py | {
"start": 20777,
"end": 23454
} | class ____(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(
dataset_range=[10],
batch_size=[2, 3],
symbolic_checkpoint=[True, False])))
def testBatch(
self,
verify_fn: Callable[..., None],
dataset_range: int,
batch_size: int,
symbolic_checkpoint: bool):
def _build_dataset() -> dataset_ops.Dataset:
dataset = dataset_ops.Dataset.range(dataset_range)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE)
dataset = global_shuffle_op._global_shuffle(dataset, seed=42)
dataset = dataset.unbatch()
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
return dataset.with_options(options)
verify_fn(
self,
_build_dataset,
num_outputs=(dataset_range // batch_size) * batch_size,
assert_items_equal=True)
# Creating multiple iterators with the same seed is only supported in v2 API.
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode="eager"),
checkpoint_test_base.default_test_combinations(),
combinations.combine(
dataset_range=[10],
batch_size=[2, 3],
reshuffle_each_iteration=[True, False],
symbolic_checkpoint=[True, False])))
def testReshuffleEachIteration(
self,
verify_fn: Callable[..., None],
dataset_range: int,
batch_size: int,
reshuffle_each_iteration: bool,
symbolic_checkpoint: bool):
def _build_dataset() -> dataset_ops.Dataset:
dataset = dataset_ops.Dataset.range(dataset_range)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE)
dataset = global_shuffle_op._global_shuffle(
dataset, seed=42, reshuffle_each_iteration=reshuffle_each_iteration)
dataset = dataset.unbatch()
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
return dataset.with_options(options)
verify_fn(
self,
_build_dataset,
num_outputs=(dataset_range // batch_size) * batch_size,
assert_items_equal=reshuffle_each_iteration)
if __name__ == "__main__":
test.main()
| BatchGlobalShuffleCheckpointTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefault2.py | {
"start": 883,
"end": 980
} | class ____[T = T1]: ...
# This should generate an error because Ts1 is not a valid default.
| ClassT7 |
python | pydantic__pydantic | tests/mypy/modules/plugin_fail.py | {
"start": 442,
"end": 721
} | class ____(BaseModel, alias_generator=None, frozen=True, extra=Extra.forbid):
x: int
y: str
def method(self) -> None:
pass
kwargs_model = KwargsModel(x=1, y='y', z='z')
kwargs_model = KwargsModel(x=1)
kwargs_model.y = 'a'
KwargsModel.from_orm({})
| KwargsModel |
python | dagster-io__dagster | python_modules/libraries/dagster-azure/dagster_azure/adls2/resources.py | {
"start": 798,
"end": 888
} | class ____(Config):
credential_type: Literal["sas"] = "sas"
token: str
| ADLS2SASToken |
python | pypa__warehouse | warehouse/utils/security_policy.py | {
"start": 712,
"end": 4026
} | class ____:
"""
A wrapper for multiple Pyramid 2.0-style "security policies", which replace
Pyramid 1.0's separate AuthN and AuthZ APIs.
Security policies are checked in the order provided during initialization,
with the following semantics:
* `identity`: Selected from the first policy to return non-`None`
* `authenticated_userid`: Selected from the first policy to return an identity
* `forget`: Combined from all policies
* `remember`: Combined from all policies
* `permits`: Uses the the policy that returned the identity.
These semantics mostly mirror those of `pyramid-multiauth`.
"""
def __init__(self, policies):
self._policies = policies
self._identity_cache = RequestLocalCache(self._get_identity_with_policy)
def _get_identity_with_policy(self, request):
# This will be cached per request, which means that we'll have a stable
# result for both the identity AND the policy that produced it. Further
# uses can then make sure to use the same policy throughout, at least
# where it makes sense to.
for policy in self._policies:
if ident := policy.identity(request):
return ident, policy
return None, None
def reset(self, request):
self._identity_cache.clear(request)
def identity(self, request):
identity, _policy = self._identity_cache.get_or_create(request)
return identity
def authenticated_userid(self, request):
if ident := self.identity(request):
# TODO: Note, this logic breaks the contract of a SecurityPolicy, the
# authenticated_userid is intended to be used to fetch the unique
# identifier that represents the current identity. We're leaving
# it here for now, because there are a number of views directly
# using this to detect user vs not, which we'll need to move to a
# more correct pattern before fixing this.
if isinstance(ident, UserContext):
return str(ident.user.id)
return None
def forget(self, request, **kw):
headers = []
for policy in self._policies:
headers.extend(policy.forget(request, **kw))
return headers
def remember(self, request, userid, **kw):
headers = []
for policy in self._policies:
headers.extend(policy.remember(request, userid, **kw))
return headers
def permits(self, request, context, permission):
identity, policy = self._identity_cache.get_or_create(request)
# Sanity check that somehow our cached identity + policy didn't end up
# different than what the request.identity is. This shouldn't be possible
# but we'll assert it because if we let it pass silently it may mean that
# some kind of confused-deputy attack is possible.
assert request.identity == identity, "request has a different identity"
# Dispatch to the underlying policy for the given identity, if there was one
# for this request.
if policy is not None:
return policy.permits(request, context, permission)
else:
return Denied("unknown identity")
| MultiSecurityPolicy |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/ui/test_dependencies.py | {
"start": 6321,
"end": 9708
} | class ____:
@pytest.mark.usefixtures("make_primary_connected_component")
def test_should_response_200(self, test_client, expected_primary_component_response):
with assert_queries_count(5):
response = test_client.get("/dependencies")
assert response.status_code == 200
assert response.json() == expected_primary_component_response
@pytest.mark.usefixtures("make_primary_connected_component")
def test_delete_dag_should_response_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get("/dependencies")
assert response.status_code == 401
@pytest.mark.usefixtures("make_primary_connected_component")
def test_delete_dag_should_response_403(self, unauthorized_test_client):
response = unauthorized_test_client.get("/dependencies")
assert response.status_code == 403
@pytest.mark.parametrize(
("node_id", "expected_response_fixture"),
[
# Primary Component
("dag:downstream", "expected_primary_component_response"),
("sensor:other_dag:downstream:external_task_sensor", "expected_primary_component_response"),
("dag:external_trigger_dag_id", "expected_primary_component_response"),
(
"trigger:external_trigger_dag_id:downstream:trigger_dag_run_operator",
"expected_primary_component_response",
),
("dag:upstream", "expected_primary_component_response"),
# Secondary Component
("dag:downstream_secondary", "expected_secondary_component_response"),
("dag:upstream_secondary", "expected_secondary_component_response"),
],
)
@pytest.mark.usefixtures("make_primary_connected_component", "make_secondary_connected_component")
def test_with_node_id_filter(self, test_client, node_id, expected_response_fixture, request):
expected_response = request.getfixturevalue(expected_response_fixture)
with assert_queries_count(5):
response = test_client.get("/dependencies", params={"node_id": node_id})
assert response.status_code == 200
assert response.json() == expected_response
def test_with_node_id_filter_with_asset(
self,
test_client,
asset1_id,
asset2_id,
expected_primary_component_response,
expected_secondary_component_response,
):
for asset_id, expected_response in (
(asset1_id, expected_primary_component_response),
(asset2_id, expected_secondary_component_response),
):
with assert_queries_count(5):
response = test_client.get("/dependencies", params={"node_id": f"asset:{asset_id}"})
assert response.status_code == 200
assert response.json() == expected_response
@pytest.mark.usefixtures("make_primary_connected_component", "make_secondary_connected_component")
def test_with_node_id_filter_not_found(self, test_client):
response = test_client.get("/dependencies", params={"node_id": "missing_node_id"})
assert response.status_code == 404
assert response.json() == {
"detail": "Unique connected component not found, got [] for connected components of node missing_node_id, expected only 1 connected component.",
}
| TestGetDependencies |
python | PrefectHQ__prefect | src/integrations/prefect-azure/tests/test_blob_storage.py | {
"start": 3324,
"end": 10705
} | class ____:
async def test_download_folder_to_path(
self, mock_blob_storage_credentials, tmp_path
):
container = AzureBlobStorageContainer(
container_name="container",
credentials=mock_blob_storage_credentials,
)
await container.download_folder_to_path("folder", tmp_path / "folder")
assert (tmp_path / "folder").exists()
assert (tmp_path / "folder" / "prefect.txt").exists()
with open(tmp_path / "folder" / "prefect.txt", "rb") as f:
assert f.read() == b"prefect_works"
async def test_download_object_to_file_object(
self, mock_blob_storage_credentials, tmp_path
):
container = AzureBlobStorageContainer(
container_name="container",
credentials=mock_blob_storage_credentials,
)
file_path = tmp_path / "file.txt"
with open(file_path, "wb") as f:
await container.download_object_to_file_object(
from_path="prefect.txt", to_file_object=f
)
assert file_path.exists()
with open(file_path, "rb") as f:
assert f.read() == b"prefect_works"
async def test_download_object_to_path(
self, mock_blob_storage_credentials, tmp_path
):
container = AzureBlobStorageContainer(
container_name="container",
credentials=mock_blob_storage_credentials,
)
from_path = "prefect.txt"
to_path = tmp_path / "file.txt"
await container.download_object_to_path(from_path, to_path)
assert to_path.exists()
with open(to_path, "rb") as f:
assert f.read() == b"prefect_works"
async def test_upload_from_file_object(
self, mock_blob_storage_credentials, tmp_path
):
container = AzureBlobStorageContainer(
container_name="container",
credentials=mock_blob_storage_credentials,
)
file_content = b"prefect_works_again"
file_object = BytesIO(file_content)
to_path = "object"
uploaded_path = await container.upload_from_file_object(
from_file_object=file_object,
to_path=to_path,
)
assert uploaded_path == to_path
await container.download_object_to_path("object", tmp_path / "file.txt")
with open(tmp_path / "file.txt", "rb") as f:
assert f.read() == b"prefect_works_again"
async def test_upload_from_path(self, mock_blob_storage_credentials, tmp_path):
container = AzureBlobStorageContainer(
container_name="container",
credentials=mock_blob_storage_credentials,
)
from_path = tmp_path / "file.txt"
to_path = "object-from-path"
with open(from_path, "wb") as f:
f.write(b"prefect_works_yet_again")
uploaded_path = await container.upload_from_path(
from_path=from_path,
to_path=to_path,
)
assert uploaded_path == to_path
await container.download_object_to_path(to_path, tmp_path / "file.txt")
with open(tmp_path / "file.txt", "rb") as f:
assert f.read() == b"prefect_works_yet_again"
async def test_upload_from_folder(
self, mock_blob_storage_credentials, tmp_path: Path
):
container = AzureBlobStorageContainer(
container_name="container",
credentials=mock_blob_storage_credentials,
)
from_folder = tmp_path / "local_folder"
from_folder.mkdir(parents=True, exist_ok=True)
to_folder = "folder"
file1_path = from_folder / "file1.txt"
file2_path = from_folder / "file2.txt"
file1_path.write_bytes(b"file1_content")
file2_path.write_bytes(b"file2_content")
await container.upload_from_folder(
from_folder=from_folder,
to_folder=to_folder,
)
await container.download_object_to_path(
"folder/file1.txt", tmp_path / "read_file1.txt"
)
with open(tmp_path / "read_file1.txt", "rb") as f:
assert f.read() == b"file1_content"
await container.download_object_to_path(
"folder/file2.txt", tmp_path / "read_file2.txt"
)
with open(tmp_path / "read_file2.txt", "rb") as f:
assert f.read() == b"file2_content"
async def test_get_directory(slef, mock_blob_storage_credentials, tmp_path):
container = AzureBlobStorageContainer(
container_name="container",
credentials=mock_blob_storage_credentials,
)
from_path = "folder"
local_path = str(tmp_path / "local_directory")
await container.get_directory(from_path, local_path)
assert (tmp_path / "local_directory").exists()
assert (tmp_path / "local_directory" / "prefect.txt").exists()
with open(tmp_path / "local_directory" / "prefect.txt", "rb") as f:
assert f.read() == b"prefect_works"
async def test_put_directory(self, mock_blob_storage_credentials, tmp_path):
container = AzureBlobStorageContainer(
container_name="container",
credentials=mock_blob_storage_credentials,
)
local_path = tmp_path / "local_directory"
to_path = "destination_directory"
local_path.mkdir()
file1_path = local_path / "file1.txt"
file2_path = local_path / "file2.txt"
file1_path.write_bytes(b"file1_content")
file2_path.write_bytes(b"file2_content")
await container.put_directory(local_path=str(local_path), to_path=to_path)
await container.download_object_to_path(
"destination_directory/file1.txt", tmp_path / "read_file1.txt"
)
with open(tmp_path / "read_file1.txt", "rb") as f:
assert f.read() == b"file1_content"
await container.download_object_to_path(
"destination_directory/file2.txt", tmp_path / "read_file2.txt"
)
with open(tmp_path / "read_file2.txt", "rb") as f:
assert f.read() == b"file2_content"
async def test_read_path(self, mock_blob_storage_credentials):
container = AzureBlobStorageContainer(
container_name="container",
credentials=mock_blob_storage_credentials,
)
path = "file.txt"
file_content = b"prefect_works"
await container.upload_from_file_object(BytesIO(file_content), path)
result = await container.read_path(path)
assert result == file_content
async def test_blob_storage_write_path(self, mock_blob_storage_credentials):
container = AzureBlobStorageContainer(
container_name="prefect",
credentials=mock_blob_storage_credentials,
)
await container.write_path("prefect-write-path.txt", b"write_path_works")
result = await container.read_path("prefect-write-path.txt")
assert result == b"write_path_works"
async def test_list_blobs(self, mock_blob_storage_credentials):
blob_container = AzureBlobStorageContainer(
container_name="container",
credentials=mock_blob_storage_credentials,
)
blob_result = await blob_container.list_blobs()
assert sorted(blob_result) == ["folder/prefect.txt"]
| TestAzureBlobStorageContainer |
python | sqlalchemy__sqlalchemy | test/orm/test_relationships.py | {
"start": 168579,
"end": 171111
} | class ____(_fixtures.FixtureTest):
run_inserts = None
run_deletes = None
def _test_attribute(self, obj, attrname, newvalue):
sess = fixture_session()
sess.add(obj)
oldvalue = getattr(obj, attrname)
sess.commit()
# expired
assert attrname not in obj.__dict__
setattr(obj, attrname, newvalue)
eq_(
attributes.get_history(obj, attrname), ([newvalue], (), [oldvalue])
)
def test_column_property_flag(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(
User,
users,
properties={
"name": column_property(users.c.name, active_history=True)
},
)
u1 = User(name="jack")
self._test_attribute(u1, "name", "ed")
def test_relationship_property_flag(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={"user": relationship(User, active_history=True)},
)
self.mapper_registry.map_imperatively(User, users)
u1 = User(name="jack")
u2 = User(name="ed")
a1 = Address(email_address="a1", user=u1)
self._test_attribute(a1, "user", u2)
def test_composite_property_flag(self):
Order, orders = self.classes.Order, self.tables.orders
class MyComposite:
def __init__(self, description, isopen):
self.description = description
self.isopen = isopen
def __composite_values__(self):
return [self.description, self.isopen]
def __eq__(self, other):
return (
isinstance(other, MyComposite)
and other.description == self.description
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"composite": composite(
MyComposite,
orders.c.description,
orders.c.isopen,
active_history=True,
)
},
)
o1 = Order(composite=MyComposite("foo", 1))
self._test_attribute(o1, "composite", MyComposite("bar", 1))
| ActiveHistoryFlagTest |
python | fastai__fastai | fastai/vision/core.py | {
"start": 8329,
"end": 8730
} | class ____(TensorPoint):
"Basic type for a tensor of bounding boxes in an image"
@classmethod
def create(cls, x, img_size=None)->None: return cls(tensor(x).view(-1, 4).float(), img_size=img_size)
def show(self, ctx=None, **kwargs):
x = self.view(-1,4)
for b in x: _draw_rect(ctx, b, hw=False, **kwargs)
return ctx
# %% ../../nbs/07_vision.core.ipynb 73
| TensorBBox |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 221672,
"end": 224948
} | class ____(Request):
"""
Delete existing artifacts (search by key/mode)
:param task: Task ID
:type task: str
:param artifacts: Artifacts to delete
:type artifacts: Sequence[ArtifactId]
:param force: If set to True then both new and running task artifacts can be
deleted. Otherwise only the new task ones. Default is False
:type force: bool
"""
_service = "tasks"
_action = "delete_artifacts"
_version = "2.23"
_schema = {
"definitions": {
"artifact_id": {
"properties": {
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
},
"required": ["key"],
"type": "object",
},
"artifact_mode_enum": {
"default": "output",
"enum": ["input", "output"],
"type": "string",
},
},
"properties": {
"artifacts": {
"description": "Artifacts to delete",
"items": {"$ref": "#/definitions/artifact_id"},
"type": "array",
},
"force": {
"description": (
"If set to True then both new and running task artifacts can be deleted. Otherwise only the new"
" task ones. Default is False"
),
"type": "boolean",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "artifacts"],
"type": "object",
}
def __init__(self, task, artifacts, force=None, **kwargs):
super(DeleteArtifactsRequest, self).__init__(**kwargs)
self.task = task
self.artifacts = artifacts
self.force = force
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("artifacts")
def artifacts(self):
return self._property_artifacts
@artifacts.setter
def artifacts(self, value):
if value is None:
self._property_artifacts = None
return
self.assert_isinstance(value, "artifacts", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [
ArtifactId.from_dict(v) if isinstance(v, dict) else v for v in value
]
else:
self.assert_isinstance(value, "artifacts", ArtifactId, is_array=True)
self._property_artifacts = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
| DeleteArtifactsRequest |
python | apache__airflow | providers/celery/tests/unit/celery/log_handlers/test_log_handlers.py | {
"start": 1647,
"end": 3747
} | class ____:
def clean_up(self):
with create_session() as session:
session.query(DagRun).delete()
session.query(TaskInstance).delete()
def setup_method(self):
logging.root.disabled = False
self.clean_up()
# We use file task handler by default.
def teardown_method(self):
self.clean_up()
def test__read_for_celery_executor_fallbacks_to_worker(self, create_task_instance):
"""Test for executors which do not have `get_task_log` method, it fallbacks to reading
log from worker"""
executor_name = "CeleryExecutor"
ti = create_task_instance(
dag_id="dag_for_testing_celery_executor_log_read",
task_id="task_for_testing_celery_executor_log_read",
run_type=DagRunType.SCHEDULED,
logical_date=DEFAULT_DATE,
)
ti.state = TaskInstanceState.RUNNING
ti.try_number = 1
with conf_vars({("core", "executor"): executor_name}):
reload(executor_loader)
fth = FileTaskHandler("")
fth._read_from_logs_server = mock.Mock()
# compat with 2.x and 3.x
if AIRFLOW_V_3_0_PLUS:
fth._read_from_logs_server.return_value = (
["this message"],
[convert_list_to_stream(["this", "log", "content"])],
)
else:
fth._read_from_logs_server.return_value = ["this message"], ["this\nlog\ncontent"]
logs, metadata = fth._read(ti=ti, try_number=1)
fth._read_from_logs_server.assert_called_once()
if AIRFLOW_V_3_0_PLUS:
logs = list(logs)
assert logs[0].sources == ["this message"]
assert [x.event for x in logs[-3:]] == ["this", "log", "content"]
assert metadata == {"end_of_log": False, "log_pos": 3}
else:
assert "*** this message\n" in logs
assert logs.endswith("this\nlog\ncontent")
assert metadata == {"end_of_log": False, "log_pos": 16}
| TestFileTaskLogHandler |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-cohere/llama_index/embeddings/cohere/base.py | {
"start": 3521,
"end": 15581
} | class ____(MultiModalEmbedding):
"""CohereEmbedding uses the Cohere API to generate embeddings for text."""
# Instance variables initialized via Pydantic's mechanism
api_key: str = Field(description="The Cohere API key.")
base_url: Optional[str] = Field(
default=None, description="The endpoint to use. Defaults to the Cohere API."
)
truncate: str = Field(description="Truncation type - START/ END/ NONE")
input_type: Optional[str] = Field(
default=None,
description="Model Input type. If not provided, search_document and search_query are used when needed.",
)
embedding_type: str = Field(
description="Embedding type. If not provided float embedding_type is used when needed."
)
_client: cohere.Client = PrivateAttr()
_async_client: cohere.AsyncClient = PrivateAttr()
_timeout: Optional[float] = PrivateAttr()
_httpx_client: Optional[httpx.Client] = PrivateAttr()
_httpx_async_client: Optional[httpx.AsyncClient] = PrivateAttr()
def __init__(
self,
# deprecated
cohere_api_key: Optional[str] = None,
api_key: Optional[str] = None,
model_name: str = "embed-english-v3.0",
truncate: str = "END",
input_type: Optional[str] = None,
embedding_type: str = "float",
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
base_url: Optional[str] = None,
timeout: Optional[float] = None,
httpx_client: Optional[httpx.Client] = None,
httpx_async_client: Optional[httpx.AsyncClient] = None,
num_workers: Optional[int] = None,
**kwargs: Any,
):
"""
A class representation for generating embeddings using the Cohere API.
Args:
truncate (str): A string indicating the truncation strategy to be applied to input text. Possible values
are 'START', 'END', or 'NONE'.
input_type (Optional[str]): An optional string that specifies the type of input provided to the model.
This is model-dependent and could be one of the following: 'search_query',
'search_document', 'classification', or 'clustering'.
model_name (str): The name of the model to be used for generating embeddings. The class ensures that
this model is supported and that the input type provided is compatible with the model.
embed_batch_size (int): The batch size for embedding generation. Maximum allowed value is 96 (MAX_EMBED_BATCH_SIZE)
due to Cohere API limitations. Defaults to DEFAULT_EMBED_BATCH_SIZE.
"""
# Validate model_name and input_type
if model_name not in VALID_MODEL_INPUT_TYPES:
raise ValueError(f"{model_name} is not a valid model name")
if input_type not in VALID_MODEL_INPUT_TYPES[model_name]:
raise ValueError(
f"{input_type} is not a valid input type for the provided model."
)
if embedding_type not in VALID_MODEL_EMBEDDING_TYPES[model_name]:
raise ValueError(
f"{embedding_type} is not a embedding type for the provided model."
)
if truncate not in VALID_TRUNCATE_OPTIONS:
raise ValueError(f"truncate must be one of {VALID_TRUNCATE_OPTIONS}")
# Validate embed_batch_size
if embed_batch_size > MAX_EMBED_BATCH_SIZE:
raise ValueError(
f"embed_batch_size {embed_batch_size} exceeds the maximum allowed value of {MAX_EMBED_BATCH_SIZE} for Cohere API"
)
super().__init__(
api_key=api_key or cohere_api_key,
model_name=model_name,
input_type=input_type,
embedding_type=embedding_type,
truncate=truncate,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
num_workers=num_workers,
**kwargs,
)
self.base_url = base_url
self._client = None
self._async_client = None
self._timeout = timeout
self._httpx_client = httpx_client
self._httpx_async_client = httpx_async_client
def _get_client(self) -> cohere.ClientV2:
if self._client is None:
self._client = cohere.ClientV2(
api_key=self.api_key,
client_name="llama_index",
base_url=self.base_url,
timeout=self._timeout,
httpx_client=self._httpx_client,
)
return self._client
def _get_async_client(self) -> cohere.AsyncClientV2:
if self._async_client is None:
self._async_client = cohere.AsyncClientV2(
api_key=self.api_key,
client_name="llama_index",
base_url=self.base_url,
timeout=self._timeout,
httpx_client=self._httpx_async_client,
)
return self._async_client
@classmethod
def class_name(cls) -> str:
return "CohereEmbedding"
def _image_to_base64_data_url(self, image_input: Union[str, Path, BytesIO]) -> str:
"""Convert an image to a base64 Data URL."""
if isinstance(image_input, (str, Path)):
# If it's a string or Path, assume it's a file path
image_path = Path(image_input)
file_extension = image_path.suffix.lower().replace(".", "")
with open(image_path, "rb") as f:
image_data = f.read()
elif isinstance(image_input, BytesIO):
# If it's a BytesIO, use it directly
image = Image.open(image_input)
file_extension = image.format.lower()
image_input.seek(0) # Reset the BytesIO stream to the beginning
image_data = image_input.read()
else:
raise ValueError("Unsupported input type. Must be a file path or BytesIO.")
if self._validate_image_format(file_extension):
enc_img = base64.b64encode(image_data).decode("utf-8")
return f"data:image/{file_extension};base64,{enc_img}"
else:
raise ValueError(f"Unsupported image format: {file_extension}")
def _validate_image_format(self, file_type: str) -> bool:
"""Validate image format."""
return file_type.lower() in SUPPORTED_IMAGE_FORMATS
def _embed(
self,
texts: Optional[List[str]] = None,
input_type: str = "search_document",
) -> List[List[float]]:
"""Embed sentences using Cohere."""
client = self._get_client()
if self.model_name not in (V3_MODELS + V4_MODELS):
input_type = None
else:
input_type = self.input_type or input_type
result = client.embed(
texts=texts,
input_type=input_type,
embedding_types=[self.embedding_type],
model=self.model_name,
truncate=self.truncate,
).embeddings
return getattr(result, self.embedding_type, None)
async def _aembed(
self,
texts: Optional[List[str]] = None,
input_type: str = "search_document",
) -> List[List[float]]:
"""Embed sentences using Cohere."""
async_client = self._get_async_client()
if self.model_name not in (V3_MODELS + V4_MODELS):
input_type = None
else:
input_type = self.input_type or input_type
result = (
await async_client.embed(
texts=texts,
input_type=input_type,
embedding_types=[self.embedding_type],
model=self.model_name,
truncate=self.truncate,
)
).embeddings
return getattr(result, self.embedding_type, None)
def _embed_image(
self, image_paths: List[ImageType], input_type: str
) -> List[List[float]]:
"""Embed images using Cohere."""
if self.model_name not in (V3_MODELS + V4_MODELS):
raise ValueError(
f"{self.model_name} is not a valid multi-modal embedding model. Supported models are {V3_MODELS + V4_MODELS}"
)
client = self._get_client()
processed_images = [
self._image_to_base64_data_url(image_path) for image_path in image_paths
]
inputs = [
{"content": [{"type": "image_url", "image_url": {"url": processed_image}}]}
for processed_image in processed_images
]
embeddings = client.embed(
inputs=inputs,
input_type=input_type,
embedding_types=[self.embedding_type],
model=self.model_name,
truncate=self.truncate,
).embeddings
return getattr(embeddings, self.embedding_type, None)
async def _aembed_image(
self,
image_paths: List[ImageType],
input_type: str,
) -> List[List[float]]:
"""Embed images using Cohere."""
if self.model_name not in (V3_MODELS + V4_MODELS):
raise ValueError(
f"{self.model_name} is not a valid multi-modal embedding model. Supported models are {V3_MODELS + V4_MODELS}"
)
async_client = self._get_async_client()
processed_images = [
self._image_to_base64_data_url(image_path) for image_path in image_paths
]
inputs = [
{"content": [{"type": "image_url", "image_url": {"url": processed_image}}]}
for processed_image in processed_images
]
embeddings = (
await async_client.embed(
inputs=inputs,
input_type=input_type,
embedding_types=[self.embedding_type],
model=self.model_name,
truncate=self.truncate,
)
).embeddings
return getattr(embeddings, self.embedding_type, None)
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding. For query embeddings, input_type='search_query'."""
return self._embed([query], input_type="search_query")[0]
async def _aget_query_embedding(self, query: str) -> List[float]:
"""Get query embedding async. For query embeddings, input_type='search_query'."""
return (await self._aembed([query], input_type="search_query"))[0]
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._embed([text], input_type="search_document")[0]
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Get text embedding async."""
return (await self._aembed([text], input_type="search_document"))[0]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
return self._embed(texts, input_type="search_document")
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
return await self._aembed(texts, input_type="search_document")
def _get_image_embedding(self, img_file_path: ImageType) -> Embedding:
"""Get image embedding."""
return self._embed_image([img_file_path], "image")[0]
async def _aget_image_embedding(self, img_file_path: ImageType) -> Embedding:
"""Get image embedding async."""
return (await self._aembed_image([img_file_path], "image"))[0]
def _get_image_embeddings(
self, img_file_paths: List[ImageType]
) -> List[List[float]]:
"""Get image embeddings."""
return self._embed_image(img_file_paths, "image")
async def _aget_image_embeddings(
self, img_file_paths: List[ImageType]
) -> List[List[float]]:
"""Get image embeddings async."""
return await self._aembed_image(img_file_paths, "image")
| CohereEmbedding |
python | Netflix__metaflow | test/core/tests/card_default_editable_customize.py | {
"start": 72,
"end": 4307
} | class ____(MetaflowTest):
"""
`current.card.append` should be accessible to the card with `customize=True`.
- Even if there are other editable cards without `id` and with `id`
"""
PRIORITY = 3
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@tag('card(type="test_editable_card",customize=True)')
@tag('card(type="test_editable_card",id="abc")')
@tag('card(type="taskspec_card")')
@tag('card(type="test_editable_card_2")')
@steps(0, ["start"])
def step_start(self):
from metaflow import current
from metaflow.plugins.cards.card_modules.test_cards import TestStringComponent
import random
self.random_number = random.randint(0, 100)
current.card.append(TestStringComponent(str(self.random_number)))
@steps(1, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
run = checker.get_run()
card_type = "test_editable_card"
if run is None:
# This means CliCheck is in context.
for step in flow:
if step.name != "start":
continue
cli_check_dict = checker.artifact_dict(step.name, "random_number")
for task_pathspec in cli_check_dict:
task_id = task_pathspec.split("/")[-1]
cards_info = checker.list_cards(step.name, task_id, card_type)
assert_equals(
cards_info is not None
and "cards" in cards_info
and len(cards_info["cards"]) == 2,
True,
)
# Find the card without the id
default_editable_cards = [
c for c in cards_info["cards"] if c["id"] is None
]
# There should only be one card of type "test_editable_card" with no id.
# That is the default editable card because it has `customize=True`
assert_equals(len(default_editable_cards) == 1, True)
card = default_editable_cards[0]
number = cli_check_dict[task_pathspec]["random_number"]
checker.assert_card(
step.name,
task_id,
card_type,
"%d" % number,
card_hash=card["hash"],
exact_match=True,
)
else:
# This means MetadataCheck is in context.
for step in flow:
if step.name != "start":
continue
meta_check_dict = checker.artifact_dict(step.name, "random_number")
for task_id in meta_check_dict:
cards_info = checker.list_cards(step.name, task_id, card_type)
assert_equals(
cards_info is not None
and "cards" in cards_info
and len(cards_info["cards"]) == 2,
True,
)
default_editable_cards = [
c for c in cards_info["cards"] if c["id"] is None
]
# There should only be one card of type "test_editable_card" with no id.
# That is the default editable card since it has `customize=True`
assert_equals(len(default_editable_cards) == 1, True)
card = default_editable_cards[0]
random_number = meta_check_dict[task_id]["random_number"]
checker.assert_card(
step.name,
task_id,
card_type,
"%d" % random_number,
card_hash=card["hash"],
exact_match=True,
)
| DefaultEditableCardWithCustomizeTest |
python | pytest-dev__pytest | testing/test_collection.py | {
"start": 829,
"end": 4509
} | class ____:
def test_collect_versus_item(self) -> None:
from pytest import Collector
from pytest import Item
assert not issubclass(Collector, Item)
assert not issubclass(Item, Collector)
def test_check_equality(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
fn1 = pytester.collect_by_name(modcol, "test_pass")
assert isinstance(fn1, pytest.Function)
fn2 = pytester.collect_by_name(modcol, "test_pass")
assert isinstance(fn2, pytest.Function)
assert fn1 == fn2
assert fn1 != modcol
assert hash(fn1) == hash(fn2)
fn3 = pytester.collect_by_name(modcol, "test_fail")
assert isinstance(fn3, pytest.Function)
assert not (fn1 == fn3)
assert fn1 != fn3
for fn in fn1, fn2, fn3:
assert isinstance(fn, pytest.Function)
assert fn != 3 # type: ignore[comparison-overlap]
assert fn != modcol
assert fn != [1, 2, 3] # type: ignore[comparison-overlap]
assert [1, 2, 3] != fn # type: ignore[comparison-overlap]
assert modcol != fn
assert pytester.collect_by_name(modcol, "doesnotexist") is None
def test_getparent_and_accessors(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol(
"""
class TestClass:
def test_foo(self):
pass
"""
)
cls = pytester.collect_by_name(modcol, "TestClass")
assert isinstance(cls, pytest.Class)
fn = pytester.collect_by_name(cls, "test_foo")
assert isinstance(fn, pytest.Function)
assert fn.getparent(pytest.Module) is modcol
assert modcol.module is not None
assert modcol.cls is None
assert modcol.instance is None
assert fn.getparent(pytest.Class) is cls
assert cls.module is not None
assert cls.cls is not None
assert cls.instance is None
assert fn.getparent(pytest.Function) is fn
assert fn.module is not None
assert fn.cls is not None
assert fn.instance is not None
assert fn.function is not None
def test_getcustomfile_roundtrip(self, pytester: Pytester) -> None:
hello = pytester.makefile(".xxx", hello="world")
pytester.makepyfile(
conftest="""
import pytest
class CustomFile(pytest.File):
def collect(self):
return []
def pytest_collect_file(file_path, parent):
if file_path.suffix == ".xxx":
return CustomFile.from_parent(path=file_path, parent=parent)
"""
)
node = pytester.getpathnode(hello)
assert isinstance(node, pytest.File)
assert node.name == "hello.xxx"
nodes = node.session.perform_collect([node.nodeid], genitems=False)
assert len(nodes) == 1
assert isinstance(nodes[0], pytest.File)
def test_can_skip_class_with_test_attr(self, pytester: Pytester) -> None:
"""Assure test class is skipped when using `__test__=False` (See #2007)."""
pytester.makepyfile(
"""
class TestFoo(object):
__test__ = False
def __init__(self):
pass
def test_foo():
assert True
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["collected 0 items", "*no tests ran in*"])
| TestCollector |
python | google__jax | jax_plugins/rocm/plugin_setup.py | {
"start": 1466,
"end": 2519
} | class ____(Distribution):
"""This class makes 'bdist_wheel' include an ABI tag on the wheel."""
def has_ext_modules(self):
return True
setup(
name=project_name,
version=__version__,
cmdclass=_cmdclass,
description=f"JAX Plugin for AMD GPUs (ROCm:{rocm_detected_version})",
long_description="",
long_description_content_type="text/markdown",
author="Ruturaj4",
author_email="Ruturaj.Vaidya@amd.com",
packages=[package_name],
python_requires=">=3.11",
install_requires=[f"jax-rocm{rocm_version}-pjrt=={__version__}"],
url="https://github.com/jax-ml/jax",
license="Apache-2.0",
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Programming Language :: Python :: 3.14",
],
package_data={
package_name: [
"*",
],
},
zip_safe=False,
distclass=BinaryDistribution,
)
| BinaryDistribution |
python | django-compressor__django-compressor | compressor/tests/test_parsers.py | {
"start": 3703,
"end": 5026
} | class ____(ParserTestCase, CompressorTestCase):
parser_cls = "compressor.parser.BeautifulSoupParser"
# just like in the Html5LibParserTests, provide special tests because
# in bs4 attributes are held in dictionaries
def test_css_split(self):
split = self.css_node.split_contents()
out0 = (
SOURCE_FILE,
os.path.join(settings.COMPRESS_ROOT, "css", "one.css"),
"css/one.css",
None,
None,
)
self.assertEqual(out0, split[0][:3] + (split[0][3].tag, split[0][3].attrib))
out1 = (
SOURCE_HUNK,
"p { border:5px solid green;}",
None,
'<style type="text/css">p { border:5px solid green;}</style>',
)
self.assertEqual(
out1, split[1][:3] + (self.css_node.parser.elem_str(split[1][3]),)
)
out2 = (
SOURCE_FILE,
os.path.join(settings.COMPRESS_ROOT, "css", "two.css"),
"css/two.css",
None,
None,
)
self.assertEqual(out2, split[2][:3] + (split[2][3].tag, split[2][3].attrib))
@override_settings(COMPRESS_ENABLED=False)
def test_css_return_if_off(self):
self.assertEqual(len(self.css), len(self.css_node.output()))
| BeautifulSoupParserTests |
python | apache__airflow | providers/zendesk/src/airflow/providers/zendesk/hooks/zendesk.py | {
"start": 1130,
"end": 4944
} | class ____(BaseHook):
"""
Interact with Zendesk. This hook uses the Zendesk conn_id.
:param zendesk_conn_id: The Airflow connection used for Zendesk credentials.
"""
conn_name_attr = "zendesk_conn_id"
default_conn_name = "zendesk_default"
conn_type = "zendesk"
hook_name = "Zendesk"
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
return {
"hidden_fields": ["schema", "port", "extra"],
"relabeling": {"host": "Zendesk domain", "login": "Zendesk email"},
}
def __init__(self, zendesk_conn_id: str = default_conn_name) -> None:
super().__init__()
self.zendesk_conn_id = zendesk_conn_id
self.base_api: BaseApi | None = None
zenpy_client, url = self._init_conn()
self.zenpy_client = zenpy_client
self.__url = url
self.get = self.zenpy_client.users._get
def _init_conn(self) -> tuple[Zenpy, str]:
"""
Create the Zenpy Client for our Zendesk connection.
:return: zenpy.Zenpy client and the url for the API.
"""
conn = self.get_connection(self.zendesk_conn_id)
domain = ""
url = ""
subdomain: str | None = None
if conn.host:
url = "https://" + conn.host
domain = conn.host
if conn.host.count(".") >= 2:
dot_splitted_string = conn.host.rsplit(".", 2)
subdomain = dot_splitted_string[0]
domain = ".".join(dot_splitted_string[1:])
return Zenpy(domain=domain, subdomain=subdomain, email=conn.login, password=conn.password), url
def get_conn(self) -> Zenpy:
"""
Get the underlying Zenpy client.
:return: zenpy.Zenpy client.
"""
return self.zenpy_client
def get_ticket(self, ticket_id: int) -> Ticket:
"""
Retrieve ticket.
:return: Ticket object retrieved.
"""
return self.zenpy_client.tickets(id=ticket_id)
def search_tickets(self, **kwargs) -> SearchResultGenerator:
"""
Search tickets.
:param kwargs: (optional) Search fields given to the zenpy search method.
:return: SearchResultGenerator of Ticket objects.
"""
return self.zenpy_client.search(type="ticket", **kwargs)
def create_tickets(self, tickets: Ticket | list[Ticket], **kwargs) -> TicketAudit | JobStatus:
"""
Create tickets.
:param tickets: Ticket or List of Ticket to create.
:param kwargs: (optional) Additional fields given to the zenpy create method.
:return: A TicketAudit object containing information about the Ticket created.
When sending bulk request, returns a JobStatus object.
"""
return self.zenpy_client.tickets.create(tickets, **kwargs)
def update_tickets(self, tickets: Ticket | list[Ticket], **kwargs) -> TicketAudit | JobStatus:
"""
Update tickets.
:param tickets: Updated Ticket or List of Ticket object to update.
:param kwargs: (optional) Additional fields given to the zenpy update method.
:return: A TicketAudit object containing information about the Ticket updated.
When sending bulk request, returns a JobStatus object.
"""
return self.zenpy_client.tickets.update(tickets, **kwargs)
def delete_tickets(self, tickets: Ticket | list[Ticket], **kwargs) -> None:
"""
Delete tickets, returns nothing on success and raises APIException on failure.
:param tickets: Ticket or List of Ticket to delete.
:param kwargs: (optional) Additional fields given to the zenpy delete method.
:return:
"""
return self.zenpy_client.tickets.delete(tickets, **kwargs)
| ZendeskHook |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol17.py | {
"start": 962,
"end": 1107
} | class ____(Protocol[_T1]):
def m1(self, p0: _T1) -> None: ...
# This should generate an error because _T1_co should be contravariant.
| Protocol4 |
python | ethereum__web3.py | tests/core/utilities/test_http_session_manager.py | {
"start": 592,
"end": 10532
} | class ____:
def __init__(self, text="", status_code=200):
assert isinstance(text, str)
assert isinstance(status_code, int)
self.status_code = status_code
self.ok = 200 <= status_code < 400
self.text = text
self.reason = None
self.content = "content"
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def iter_content(self):
return [b"iter content"]
@staticmethod
def json():
return json.dumps({"data": "content"})
def raise_for_status(self):
pass
TEST_URI = URI("http://mynode.local:8545")
UNIQUE_URIS = [
"https://www.test1.com",
"https://www.test2.com",
"https://www.test3.com",
"https://www.test4.com",
"https://www.test5.com",
]
def check_adapters_mounted(session: Session):
assert isinstance(session, Session)
assert len(session.adapters) == 2
def _simulate_call(http_session_manager, uri):
_session = http_session_manager.cache_and_return_session(uri, request_timeout=0.01)
# simulate a call taking 0.01s to return a response
time.sleep(0.01)
return _session
@pytest.fixture
def http_session_manager():
return HTTPSessionManager()
def test_session_manager_json_make_get_request(mocker, http_session_manager):
mocker.patch("requests.Session.get", return_value=MockedResponse())
# Submit a first request to create a session with default parameters
assert len(http_session_manager.session_cache) == 0
response = http_session_manager.json_make_get_request(TEST_URI)
assert response == json.dumps({"data": "content"})
assert len(http_session_manager.session_cache) == 1
cache_key = generate_cache_key(f"{threading.get_ident()}:{TEST_URI}")
session = http_session_manager.session_cache.get_cache_entry(cache_key)
session.get.assert_called_once_with(TEST_URI, timeout=30)
# Ensure the adapter was created with default values
check_adapters_mounted(session)
adapter = session.get_adapter(TEST_URI)
assert isinstance(adapter, HTTPAdapter)
assert adapter._pool_connections == DEFAULT_POOLSIZE
assert adapter._pool_maxsize == DEFAULT_POOLSIZE
def test_session_manager_json_make_post_request(mocker, http_session_manager):
mocker.patch("requests.Session.post", return_value=MockedResponse())
# Submit a first request to create a session with default parameters
assert len(http_session_manager.session_cache) == 0
response = http_session_manager.json_make_post_request(
TEST_URI, json={"data": "request"}
)
assert response == json.dumps({"data": "content"})
assert len(http_session_manager.session_cache) == 1
cache_key = generate_cache_key(f"{threading.get_ident()}:{TEST_URI}")
session = http_session_manager.session_cache.get_cache_entry(cache_key)
session.post.assert_called_once_with(TEST_URI, json={"data": "request"}, timeout=30)
# Ensure the adapter was created with default values
check_adapters_mounted(session)
adapter = session.get_adapter(TEST_URI)
assert isinstance(adapter, HTTPAdapter)
assert adapter._pool_connections == DEFAULT_POOLSIZE
assert adapter._pool_maxsize == DEFAULT_POOLSIZE
def test_session_manager_make_post_request_no_args(mocker, http_session_manager):
mocker.patch("requests.Session.post", return_value=MockedResponse())
# Submit a first request to create a session with default parameters
assert len(http_session_manager.session_cache) == 0
response = http_session_manager.make_post_request(TEST_URI, data=b"request")
assert response == "content"
assert len(http_session_manager.session_cache) == 1
cache_key = generate_cache_key(f"{threading.get_ident()}:{TEST_URI}")
session = http_session_manager.session_cache.get_cache_entry(cache_key)
session.post.assert_called_once_with(
TEST_URI, data=b"request", timeout=30, stream=False
)
# Ensure the adapter was created with default values
check_adapters_mounted(session)
adapter = session.get_adapter(TEST_URI)
assert isinstance(adapter, HTTPAdapter)
assert adapter._pool_connections == DEFAULT_POOLSIZE
assert adapter._pool_maxsize == DEFAULT_POOLSIZE
def test_session_manager_make_post_request_streaming(mocker, http_session_manager):
mocker.patch("requests.Session.post", return_value=MockedResponse())
# Submit a first request to create a session
assert len(http_session_manager.session_cache) == 0
response = http_session_manager.make_post_request(
TEST_URI, data=b"request", stream=True
)
assert response == b"iter content"
assert len(http_session_manager.session_cache) == 1
cache_key = generate_cache_key(f"{threading.get_ident()}:{TEST_URI}")
session = http_session_manager.session_cache.get_cache_entry(cache_key)
session.post.assert_called_once_with(
TEST_URI, data=b"request", timeout=30, stream=True
)
# Ensure the adapter was created with passed in values
check_adapters_mounted(session)
adapter = session.get_adapter(TEST_URI)
assert isinstance(adapter, HTTPAdapter)
assert adapter._pool_connections == DEFAULT_POOLSIZE
assert adapter._pool_maxsize == DEFAULT_POOLSIZE
def test_session_manager_make_post_request_times_out_while_streaming(
mocker, http_session_manager
):
mocker.patch("requests.Session.post", return_value=MockedResponse())
# Submit a first request to create a session
assert len(http_session_manager.session_cache) == 0
with pytest.raises(TimeExhausted):
http_session_manager.make_post_request(
TEST_URI, data=b"request", stream=True, timeout=0.000001
)
assert len(http_session_manager.session_cache) == 1
cache_key = generate_cache_key(f"{threading.get_ident()}:{TEST_URI}")
session = http_session_manager.session_cache.get_cache_entry(cache_key)
session.post.assert_called_once_with(
TEST_URI, data=b"request", timeout=0.000001, stream=True
)
# Ensure the adapter was created with default values
check_adapters_mounted(session)
adapter = session.get_adapter(TEST_URI)
assert isinstance(adapter, HTTPAdapter)
assert adapter._pool_connections == DEFAULT_POOLSIZE
assert adapter._pool_maxsize == DEFAULT_POOLSIZE
def test_session_manager_precached_session(mocker, http_session_manager):
mocker.patch("requests.Session.post", return_value=MockedResponse())
# Update the cache with a handcrafted session
adapter = adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100)
session = Session()
session.mount("http://", adapter)
session.mount("https://", adapter)
http_session_manager.cache_and_return_session(TEST_URI, session)
# Submit a second request with different arguments
assert len(http_session_manager.session_cache) == 1
response = http_session_manager.make_post_request(
TEST_URI, data=b"request", timeout=60
)
assert response == "content"
assert len(http_session_manager.session_cache) == 1
# Ensure the timeout was passed to the request
session = http_session_manager.cache_and_return_session(TEST_URI)
session.post.assert_called_once_with(
TEST_URI, data=b"request", timeout=60, stream=False
)
# Ensure the adapter parameters match those we specified
check_adapters_mounted(session)
adapter = session.get_adapter(TEST_URI)
assert isinstance(adapter, HTTPAdapter)
assert adapter._pool_connections == 100
assert adapter._pool_maxsize == 100
def test_simple_cache_cache_session():
cache = SimpleCache(2)
_, evicted_items = cache.cache("1", "Hello1")
assert cache.get_cache_entry("1") == "Hello1"
assert evicted_items is None
_, evicted_items = cache.cache("2", "Hello2")
assert cache.get_cache_entry("2") == "Hello2"
assert evicted_items is None
# Changing what is stored at a given cache key should not cause the
# anything to be evicted
_, evicted_items = cache.cache("1", "HelloChanged")
assert cache.get_cache_entry("1") == "HelloChanged"
assert evicted_items is None
_, evicted_items = cache.cache("3", "Hello3")
assert "2" in cache
assert "3" in cache
assert "1" not in cache
assert "1" in evicted_items
# Cache size is `3`. We should have "2" and "3" in the cache and "1" should have
# been evicted.
assert cache.get_cache_entry("1") is None
def test_session_manager_cache_does_not_close_session_before_a_call_when_multithreading(
http_session_manager,
):
# set cache size to 1 + set future session close thread time to 0.01s
http_session_manager.session_cache = SimpleCache(1)
with ThreadPoolExecutor(max_workers=len(UNIQUE_URIS)) as exc:
all_sessions = [
exc.submit(_simulate_call, http_session_manager, uri) for uri in UNIQUE_URIS
]
# assert last session remains in cache, all others evicted
cache_data = http_session_manager.session_cache._data
assert len(cache_data) == 1
_key, cached_session = cache_data.popitem()
assert cached_session == all_sessions[-1].result() # result of the `Future`
# -- teardown -- #
# close the cached session before exiting test
cached_session.close()
def test_session_manager_unique_cache_keys_created_per_thread_with_same_uri(
http_session_manager,
):
# somewhat inspired by issue #2680
with ThreadPoolExecutor(max_workers=2) as exc:
test_sessions = [
exc.submit(_simulate_call, http_session_manager, TEST_URI) for _ in range(2)
]
# assert unique keys are generated per thread for the same uri
assert len(http_session_manager.session_cache._data) == 2
# -- teardown -- #
# appropriately close the test sessions
[session.result().close() for session in test_sessions]
# -- async -- #
| MockedResponse |
python | pypa__packaging | src/packaging/version.py | {
"start": 12622,
"end": 17543
} | class ____(Version):
__slots__ = ()
def __init__(self, version: str | Version) -> None:
if isinstance(version, Version):
self._epoch = version._epoch
self._release = version._release
self._dev = version._dev
self._pre = version._pre
self._post = version._post
self._local = version._local
self._key_cache = version._key_cache
return
super().__init__(version) # pragma: no cover
@property
def release(self) -> tuple[int, ...]:
"""
Release segment without any trailing zeros.
>>> _TrimmedRelease('1.0.0').release
(1,)
>>> _TrimmedRelease('0.0').release
(0,)
"""
# Unlike _strip_trailing_zeros, this leaves one 0.
rel = super().release
i = len(rel)
while i > 1 and rel[i - 1] == 0:
i -= 1
return rel[:i]
def _parse_letter_version(
letter: str | None, number: str | bytes | SupportsInt | None
) -> tuple[str, int] | None:
if letter:
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
return letter, int(number or 0)
if number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
return "post", int(number)
return None
_local_version_separators = re.compile(r"[\._-]")
def _parse_local_version(local: str | None) -> LocalType | None:
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_separators.split(local)
)
return None
def _strip_trailing_zeros(release: tuple[int, ...]) -> tuple[int, ...]:
# We want to strip trailing zeros from a tuple of values. This starts
# from the end and returns as soon as it finds a non-zero value. When
# reading a lot of versions, this is a fairly hot function, so not using
# enumerate/reversed, which is slightly slower.
for i in range(len(release) - 1, -1, -1):
if release[i] != 0:
return release[: i + 1]
return ()
def _cmpkey(
epoch: int,
release: tuple[int, ...],
pre: tuple[str, int] | None,
post: tuple[str, int] | None,
dev: tuple[str, int] | None,
local: LocalType | None,
) -> CmpKey:
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. We will use this for our sorting key.
_release = _strip_trailing_zeros(release)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
_pre: CmpPrePostDevType = NegativeInfinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
_pre = Infinity
else:
_pre = pre
# Versions without a post segment should sort before those with one.
if post is None:
_post: CmpPrePostDevType = NegativeInfinity
else:
_post = post
# Versions without a development segment should sort after those with one.
if dev is None:
_dev: CmpPrePostDevType = Infinity
else:
_dev = dev
if local is None:
# Versions without a local segment should sort before those with one.
_local: CmpLocalType = NegativeInfinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
_local = tuple(
(i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
)
return epoch, _release, _pre, _post, _dev, _local
| _TrimmedRelease |
python | wandb__wandb | wandb/integration/keras/keras.py | {
"start": 7846,
"end": 8442
} | class ____(_custom_optimizer_parent_class):
def __init__(self):
super().__init__(name="CustomOptimizer")
self._resource_apply_dense = tf.function(self._resource_apply_dense)
self._resource_apply_sparse = tf.function(self._resource_apply_sparse)
def _resource_apply_dense(self, grad, var):
var.assign(grad)
# this needs to be implemented to prevent a NotImplementedError when
# using Lookup layers.
def _resource_apply_sparse(self, grad, var, indices):
pass
def get_config(self):
return super().get_config()
| _CustomOptimizer |
python | ray-project__ray | python/ray/train/tests/test_iter_torch_batches_gpu.py | {
"start": 3817,
"end": 4073
} | class ____(TupleNumpyBatchCollateFn):
"""Collate function that returns id and value as a list of tensors."""
def __call__(self, batch: Dict[str, np.ndarray]) -> List[torch.Tensor]:
return list(super().__call__(batch))
| ListNumpyBatchCollateFn |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/root_adds_virtual/package.py | {
"start": 216,
"end": 454
} | class ____(Package):
homepage = "http://www.example.com"
url = "http://www.example.com/root-adds-virtual-1.0.tar.gz"
version("1.0", sha256="abcdef0123456789abcdef0123456789")
depends_on("middle-adds-virtual")
| RootAddsVirtual |
python | walkccc__LeetCode | solutions/1279. Traffic Light Controlled Intersection/1279.py | {
"start": 0,
"end": 626
} | class ____:
def __init__(self):
self.canPassRoadId = 1 # 1 := road A, 2 := road B
def carArrived(
self,
# ID of the car
carId: int,
# ID of the road the car travels on. Can be 1 (road A) or 2 (road B).
roadId: int,
# direction of the car
direction: int,
# Use turnGreen() to turn light to green on current road.
turnGreen: Callable[[], None],
# Use crossCar() to make car cross the intersection.
crossCar: Callable[[], None]
) -> None:
if roadId != self.canPassRoadId:
self.canPassRoadId = roadId
turnGreen()
crossCar()
| TrafficLight |
python | streamlit__streamlit | lib/streamlit/elements/lib/column_types.py | {
"start": 5645,
"end": 5945
} | class ____(TypedDict):
type: Literal["progress"]
format: NotRequired[str | NumberFormat | None]
min_value: NotRequired[int | float | None]
max_value: NotRequired[int | float | None]
step: NotRequired[int | float | None]
color: NotRequired[ChartColor | None]
| ProgressColumnConfig |
python | pytorch__pytorch | torch/_higher_order_ops/triton_kernel_wrap.py | {
"start": 6867,
"end": 6931
} | class ____:
idx: int
@dataclasses.dataclass(frozen=True)
| Param |
python | pypa__warehouse | warehouse/manage/views/__init__.py | {
"start": 23508,
"end": 28137
} | class ____:
def __init__(self, request):
self.request = request
self.user_service = request.find_service(IUserService, context=None)
@view_config(
request_method="GET",
route_name="manage.account.webauthn-provision",
renderer="warehouse:templates/manage/account/webauthn-provision.html",
)
def webauthn_provision(self):
if not self.request.user.has_burned_recovery_codes:
return HTTPSeeOther(
self.request.route_path("manage.account.recovery-codes.burn")
)
return {}
@view_config(
request_method="GET",
route_name="manage.account.webauthn-provision.options",
renderer="json",
)
def webauthn_provision_options(self):
return self.user_service.get_webauthn_credential_options(
self.request.user.id,
challenge=self.request.session.get_webauthn_challenge(),
rp_name=self.request.registry.settings["site.name"],
rp_id=self.request.domain,
)
@view_config(
request_method="POST",
request_param=ProvisionWebAuthnForm.__params__,
route_name="manage.account.webauthn-provision.validate",
renderer="json",
)
def validate_webauthn_provision(self):
form = ProvisionWebAuthnForm(
self.request.POST,
user_service=self.user_service,
user_id=self.request.user.id,
challenge=self.request.session.get_webauthn_challenge(),
rp_id=self.request.domain,
origin=self.request.host_url,
)
self.request.session.clear_webauthn_challenge()
if form.validate():
self.user_service.add_webauthn(
self.request.user.id,
label=form.label.data,
credential_id=bytes_to_base64url(
form.validated_credential.credential_id
),
public_key=bytes_to_base64url(
form.validated_credential.credential_public_key
),
sign_count=form.validated_credential.sign_count,
)
self.request.user.record_event(
tag=EventTag.Account.TwoFactorMethodAdded,
request=self.request,
additional={"method": "webauthn", "label": form.label.data},
)
self.request.session.flash(
"Security device successfully set up", queue="success"
)
send_two_factor_added_email(
self.request, self.request.user, method="webauthn"
)
return {"success": "Security device successfully set up"}
errors = [
str(error) for error_list in form.errors.values() for error in error_list
]
return {"fail": {"errors": errors}}
@view_config(
request_method="POST",
request_param=DeleteWebAuthnForm.__params__,
route_name="manage.account.webauthn-provision.delete",
)
def delete_webauthn(self):
if len(self.request.user.webauthn) == 0:
self.request.session.flash(
"There is no security device to delete", queue="error"
)
return HTTPSeeOther(self.request.route_path("manage.account"))
if self.request.user.has_single_2fa:
self.request.session.flash("Cannot remove last 2FA method", queue="error")
return HTTPSeeOther(self.request.route_path("manage.account"))
form = DeleteWebAuthnForm(
self.request.POST,
username=self.request.user.username,
user_service=self.user_service,
user_id=self.request.user.id,
)
if form.validate():
self.request.user.webauthn.remove(form.webauthn)
self.request.user.record_event(
tag=EventTag.Account.TwoFactorMethodRemoved,
request=self.request,
additional={"method": "webauthn", "label": form.label.data},
)
self.request.session.flash("Security device removed", queue="success")
send_two_factor_removed_email(
self.request, self.request.user, method="webauthn"
)
else:
self.request.session.flash("Invalid credentials", queue="error")
return HTTPSeeOther(self.request.route_path("manage.account"))
@view_defaults(
uses_session=True,
require_csrf=True,
require_methods=False,
permission=Permissions.Account2FA,
http_cache=0,
has_translations=True,
)
| ProvisionWebAuthnViews |
python | huggingface__transformers | examples/pytorch/audio-classification/run_audio_classification.py | {
"start": 4509,
"end": 15951
} | class ____:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
default="facebook/wav2vec2-base",
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"}
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
feature_extractor_name: Optional[str] = field(
default=None, metadata={"help": "Name or path of preprocessor config."}
)
freeze_feature_encoder: bool = field(
default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
)
attention_mask: bool = field(
default=True, metadata={"help": "Whether to generate an attention mask in the feature extractor."}
)
token: str = field(
default=None,
metadata={
"help": (
"The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
"generated when running `hf auth login` (stored in `~/.huggingface`)."
)
},
)
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether to trust the execution of code from datasets/models defined on the Hub."
" This option should only be set to `True` for repositories you trust and in which you have read the"
" code, as it will execute code present on the Hub on your local machine."
)
},
)
ignore_mismatched_sizes: bool = field(
default=False,
metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
+ f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Initialize our dataset and prepare it for the audio classification task.
raw_datasets = DatasetDict()
raw_datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.train_split_name,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
raw_datasets["eval"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.eval_split_name,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f"{', '.join(raw_datasets['train'].column_names)}."
)
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--label_column_name` to the correct text column - one of "
f"{', '.join(raw_datasets['train'].column_names)}."
)
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path,
return_attention_mask=model_args.attention_mask,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
raw_datasets = raw_datasets.cast_column(
data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
)
model_input_name = feature_extractor.model_input_names[0]
def train_transforms(batch):
"""Apply train_transforms across a batch."""
subsampled_wavs = []
for audio in batch[data_args.audio_column_name]:
wav = random_subsample(
audio["array"], max_length=data_args.max_length_seconds, sample_rate=feature_extractor.sampling_rate
)
subsampled_wavs.append(wav)
inputs = feature_extractor(subsampled_wavs, sampling_rate=feature_extractor.sampling_rate)
output_batch = {model_input_name: inputs.get(model_input_name)}
output_batch["labels"] = list(batch[data_args.label_column_name])
return output_batch
def val_transforms(batch):
"""Apply val_transforms across a batch."""
wavs = [audio["array"] for audio in batch[data_args.audio_column_name]]
inputs = feature_extractor(wavs, sampling_rate=feature_extractor.sampling_rate)
output_batch = {model_input_name: inputs.get(model_input_name)}
output_batch["labels"] = list(batch[data_args.label_column_name])
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
labels = raw_datasets["train"].features[data_args.label_column_name].names
label2id, id2label = {}, {}
for i, label in enumerate(labels):
label2id[label] = str(i)
id2label[str(i)] = label
# Load the accuracy metric from the datasets package
metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(eval_pred):
"""Computes accuracy on a batch of predictions"""
predictions = np.argmax(eval_pred.predictions, axis=1)
return metric.compute(predictions=predictions, references=eval_pred.label_ids)
config = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path,
num_labels=len(labels),
label2id=label2id,
id2label=id2label,
finetuning_task="audio-classification",
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
model = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
raw_datasets["train"] = (
raw_datasets["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
)
# Set the training transforms
raw_datasets["train"].set_transform(train_transforms, output_all_columns=False)
if training_args.do_eval:
if data_args.max_eval_samples is not None:
raw_datasets["eval"] = (
raw_datasets["eval"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
raw_datasets["eval"].set_transform(val_transforms, output_all_columns=False)
# Initialize our trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=raw_datasets["train"] if training_args.do_train else None,
eval_dataset=raw_datasets["eval"] if training_args.do_eval else None,
compute_metrics=compute_metrics,
processing_class=feature_extractor,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
metrics = trainer.evaluate()
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Write model card and (optionally) push to hub
kwargs = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
if __name__ == "__main__":
main()
| ModelArguments |
python | langchain-ai__langchain | libs/langchain/tests/unit_tests/callbacks/fake_callback_handler.py | {
"start": 1209,
"end": 2965
} | class ____(BaseFakeCallbackHandler):
"""Base fake callback handler mixin for testing."""
def on_llm_start_common(self) -> None:
self.llm_starts += 1
self.starts += 1
def on_llm_end_common(self) -> None:
self.llm_ends += 1
self.ends += 1
def on_llm_error_common(self) -> None:
self.errors += 1
def on_llm_new_token_common(self) -> None:
self.llm_streams += 1
def on_retry_common(self) -> None:
self.retries += 1
def on_chain_start_common(self) -> None:
self.chain_starts += 1
self.starts += 1
def on_chain_end_common(self) -> None:
self.chain_ends += 1
self.ends += 1
def on_chain_error_common(self) -> None:
self.errors += 1
def on_tool_start_common(self) -> None:
self.tool_starts += 1
self.starts += 1
def on_tool_end_common(self) -> None:
self.tool_ends += 1
self.ends += 1
def on_tool_error_common(self) -> None:
self.errors += 1
def on_agent_action_common(self) -> None:
self.agent_actions += 1
self.starts += 1
def on_agent_finish_common(self) -> None:
self.agent_ends += 1
self.ends += 1
def on_chat_model_start_common(self) -> None:
self.chat_model_starts += 1
self.starts += 1
def on_text_common(self) -> None:
self.text += 1
def on_retriever_start_common(self) -> None:
self.starts += 1
self.retriever_starts += 1
def on_retriever_end_common(self) -> None:
self.ends += 1
self.retriever_ends += 1
def on_retriever_error_common(self) -> None:
self.errors += 1
self.retriever_errors += 1
| BaseFakeCallbackHandlerMixin |
python | django__django | django/core/serializers/xml_serializer.py | {
"start": 17128,
"end": 17245
} | class ____(ValueError):
"""Base exception."""
def __repr__(self):
return str(self)
| DefusedXmlException |
python | doocs__leetcode | lcof2/剑指 Offer II 074. 合并区间/Solution.py | {
"start": 0,
"end": 379
} | class ____:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
intervals.sort()
ans = []
st, ed = intervals[0]
for s, e in intervals[1:]:
if ed < s:
ans.append([st, ed])
st, ed = s, e
else:
ed = max(ed, e)
ans.append([st, ed])
return ans
| Solution |
python | tornadoweb__tornado | tornado/test/websocket_test.py | {
"start": 3851,
"end": 4041
} | class ____(TestWebSocketHandler):
@gen.coroutine
def prepare(self):
yield gen.moment
def on_message(self, message):
self.write_message(message)
| AsyncPrepareHandler |
python | python-pillow__Pillow | src/PIL/BlpImagePlugin.py | {
"start": 9344,
"end": 10931
} | class ____(abc.ABC, ImageFile.PyDecoder):
_pulls_fd = True
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
try:
self._read_header()
self._load()
except struct.error as e:
msg = "Truncated BLP file"
raise OSError(msg) from e
return -1, 0
@abc.abstractmethod
def _load(self) -> None:
pass
def _read_header(self) -> None:
self._offsets = struct.unpack("<16I", self._safe_read(16 * 4))
self._lengths = struct.unpack("<16I", self._safe_read(16 * 4))
def _safe_read(self, length: int) -> bytes:
assert self.fd is not None
return ImageFile._safe_read(self.fd, length)
def _read_palette(self) -> list[tuple[int, int, int, int]]:
ret = []
for i in range(256):
try:
b, g, r, a = struct.unpack("<4B", self._safe_read(4))
except struct.error:
break
ret.append((b, g, r, a))
return ret
def _read_bgra(
self, palette: list[tuple[int, int, int, int]], alpha: bool
) -> bytearray:
data = bytearray()
_data = BytesIO(self._safe_read(self._lengths[0]))
while True:
try:
(offset,) = struct.unpack("<B", _data.read(1))
except struct.error:
break
b, g, r, a = palette[offset]
d: tuple[int, ...] = (r, g, b)
if alpha:
d += (a,)
data.extend(d)
return data
| _BLPBaseDecoder |
python | joke2k__faker | tests/providers/test_date_time.py | {
"start": 2883,
"end": 3196
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("ko_KR")
Faker.seed(0)
def test_day(self):
day = self.fake.day_of_week()
assert isinstance(day, str)
def test_month(self):
month = self.fake.month()
assert isinstance(month, str)
| TestKoKR |
python | walkccc__LeetCode | solutions/828. Count Unique Characters of All Substrings of a Given String/828.py | {
"start": 0,
"end": 534
} | class ____:
def uniqueLetterString(self, s: str) -> int:
ans = 0
# the number of unique letters in all the substrings ending in the index so
# far
dp = 0
lastCount = {}
lastSeen = {}
for i, c in enumerate(s):
newCount = i - lastSeen.get(c, -1)
# Substract the duplicates.
dp -= lastCount.get(c, 0)
# Add count of s[lastSeen[c] + 1..i], s[lastSeen[c] + 2..i], ..., s[i].
dp += newCount
lastCount[c] = newCount
lastSeen[c] = i
ans += dp
return ans
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/array.py | {
"start": 2969,
"end": 7708
} | class ____(expression.ExpressionClauseList[_T]):
"""A PostgreSQL ARRAY literal.
This is used to produce ARRAY literals in SQL expressions, e.g.::
from sqlalchemy.dialects.postgresql import array
from sqlalchemy.dialects import postgresql
from sqlalchemy import select, func
stmt = select(array([1, 2]) + array([3, 4, 5]))
print(stmt.compile(dialect=postgresql.dialect()))
Produces the SQL:
.. sourcecode:: sql
SELECT ARRAY[%(param_1)s, %(param_2)s] ||
ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
An instance of :class:`.array` will always have the datatype
:class:`_types.ARRAY`. The "inner" type of the array is inferred from the
values present, unless the :paramref:`_postgresql.array.type_` keyword
argument is passed::
array(["foo", "bar"], type_=CHAR)
When constructing an empty array, the :paramref:`_postgresql.array.type_`
argument is particularly important as PostgreSQL server typically requires
a cast to be rendered for the inner type in order to render an empty array.
SQLAlchemy's compilation for the empty array will produce this cast so
that::
stmt = array([], type_=Integer)
print(stmt.compile(dialect=postgresql.dialect()))
Produces:
.. sourcecode:: sql
ARRAY[]::INTEGER[]
As required by PostgreSQL for empty arrays.
.. versionadded:: 2.0.40 added support to render empty PostgreSQL array
literals with a required cast.
Multidimensional arrays are produced by nesting :class:`.array` constructs.
The dimensionality of the final :class:`_types.ARRAY`
type is calculated by
recursively adding the dimensions of the inner :class:`_types.ARRAY`
type::
stmt = select(
array(
[array([1, 2]), array([3, 4]), array([column("q"), column("x")])]
)
)
print(stmt.compile(dialect=postgresql.dialect()))
Produces:
.. sourcecode:: sql
SELECT ARRAY[
ARRAY[%(param_1)s, %(param_2)s],
ARRAY[%(param_3)s, %(param_4)s],
ARRAY[q, x]
] AS anon_1
.. seealso::
:class:`_postgresql.ARRAY`
""" # noqa: E501
__visit_name__ = "array"
stringify_dialect = "postgresql"
_traverse_internals: _TraverseInternalsType = [
("clauses", InternalTraversal.dp_clauseelement_tuple),
("type", InternalTraversal.dp_type),
]
def __init__(
self,
clauses: Iterable[_T],
*,
type_: Optional[_TypeEngineArgument[_T]] = None,
**kw: typing_Any,
):
r"""Construct an ARRAY literal.
:param clauses: iterable, such as a list, containing elements to be
rendered in the array
:param type\_: optional type. If omitted, the type is inferred
from the contents of the array.
"""
super().__init__(operators.comma_op, *clauses, **kw)
main_type = (
type_
if type_ is not None
else self.clauses[0].type if self.clauses else sqltypes.NULLTYPE
)
if isinstance(main_type, ARRAY):
self.type = ARRAY(
main_type.item_type,
dimensions=(
main_type.dimensions + 1
if main_type.dimensions is not None
else 2
),
) # type: ignore[assignment]
else:
self.type = ARRAY(main_type) # type: ignore[assignment]
@property
def _select_iterable(self) -> _SelectIterable:
return (self,)
def _bind_param(
self,
operator: OperatorType,
obj: typing_Any,
type_: Optional[TypeEngine[_T]] = None,
_assume_scalar: bool = False,
) -> BindParameter[_T]:
if _assume_scalar or operator is operators.getitem:
return expression.BindParameter(
None,
obj,
_compared_to_operator=operator,
type_=type_,
_compared_to_type=self.type,
unique=True,
)
else:
return array(
[
self._bind_param(
operator, o, _assume_scalar=True, type_=type_
)
for o in obj
]
) # type: ignore[return-value]
def self_group(
self, against: Optional[OperatorType] = None
) -> Union[Self, Grouping[_T]]:
if against in (operators.any_op, operators.all_op, operators.getitem):
return expression.Grouping(self)
else:
return self
| array |
python | getsentry__sentry | tests/sentry/hybridcloud/test_organizationmembermapping.py | {
"start": 6759,
"end": 9573
} | class ____(TransactionTestCase, HybridCloudTestMixin):
def test_process_organization_member_update_receiver(self) -> None:
inviter = self.create_user("foo@example.com")
assert OrganizationMember.objects.all().count() == 0
with assume_test_silo_mode(SiloMode.CONTROL):
assert OrganizationMemberMapping.objects.all().count() == 0
fields = {
"organization_id": self.organization.id,
"role": "member",
"email": "mail@testserver.com",
"inviter_id": inviter.id,
"invite_status": InviteStatus.REQUESTED_TO_JOIN.value,
}
# Creation step of receiver
org_member = OrganizationMember.objects.create(**fields)
assert OrganizationMember.objects.all().count() == 2
with assume_test_silo_mode(SiloMode.CONTROL):
# rows are created for owner, and invited member.
assert OrganizationMemberMapping.objects.all().count() == 2
for org_member in OrganizationMember.objects.all().iterator():
self.assert_org_member_mapping(org_member=org_member)
# Update step of receiver
org_member.update(role="owner")
assert OrganizationMember.objects.all().count() == 2
with assume_test_silo_mode(SiloMode.CONTROL):
assert OrganizationMemberMapping.objects.all().count() == 2
for org_member in OrganizationMember.objects.all().iterator():
self.assert_org_member_mapping(org_member=org_member)
def test_process_organization_member_deletes_receiver(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
inviter = self.create_user("foo@example.com")
assert OrganizationMemberMapping.objects.all().count() == 0
fields = {
"organization_id": self.organization.id,
"role": "member",
"email": "mail@testserver.com",
"inviter_id": inviter.id,
"invite_status": InviteStatus.REQUESTED_TO_JOIN.value,
}
org_member = OrganizationMember.objects.create(**fields)
assert OrganizationMember.objects.all().count() == 2
with assume_test_silo_mode(SiloMode.CONTROL):
# rows are created for owner, and invited member.
assert OrganizationMemberMapping.objects.all().count() == 2
for om in OrganizationMember.objects.all().iterator():
self.assert_org_member_mapping(org_member=om)
with outbox_runner():
org_member.delete()
assert OrganizationMember.objects.all().count() == 1
with assume_test_silo_mode(SiloMode.CONTROL):
assert OrganizationMemberMapping.objects.all().count() == 1
self.assert_org_member_mapping_not_exists(org_member=org_member)
| ReceiverTest |
python | bokeh__bokeh | src/bokeh/core/property_mixins.py | {
"start": 8994,
"end": 9708
} | class ____(HasProps):
''' Properties relevant to rendering path operations.
Mirrors the BokehJS ``properties.LineVector`` class.
'''
line_color = ColorSpec(default="black", help=_color_help % "stroke paths")
line_alpha = AlphaSpec(help=_alpha_help % "stroke paths")
line_width = FloatSpec(default=1, help=_line_width_help)
line_join = LineJoinSpec(default="bevel", help=_line_join_help)
line_cap = LineCapSpec(default="butt", help=_line_cap_help)
line_dash = DashPatternSpec(default=[], help="""How should the line be dashed.""")
line_dash_offset = IntSpec(default=0, help="""The distance into the ``line_dash`` (in pixels) that the pattern should start from.""")
| LineProps |
python | dagster-io__dagster | python_modules/automation/python_modules/automation/automation_tests/dagster_dev_tests/ai_review_tests/python_modules/automation/automation_tests/dagster_dev_tests/ai_review_tests/test_comprehensive.py | {
"start": 10845,
"end": 25092
} | class ____:
"""Comprehensive tests for ai-review-summarize targeting 80% coverage."""
@patch("automation.dagster_dev.commands.ai_review_summarize.get_smart_diff_summary")
def test_basic_json_output(self, mock_diff):
"""Test basic JSON output with confidence above threshold."""
mock_summary = Mock()
mock_summary.summary_confidence = 0.8
mock_diff.return_value = mock_summary
with patch(
"automation.dagster_dev.commands.ai_review_summarize.format_summary_for_ai"
) as mock_format:
mock_format.return_value = {"summary": "test"}
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, ["--format", "json"])
assert result.exit_code == 0
mock_diff.assert_called_once()
mock_format.assert_called_once()
@patch("automation.dagster_dev.commands.ai_review_summarize.format_summary_for_ai")
@patch("automation.dagster_dev.commands.ai_review_summarize.get_smart_diff_summary")
def test_low_confidence_json_output(self, mock_diff, mock_format):
"""Test JSON output with low confidence warning."""
mock_summary = Mock()
mock_summary.summary_confidence = 0.5
mock_diff.return_value = mock_summary
mock_format.return_value = {"summary": "test"}
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(
ai_review_summarize, ["--confidence-threshold", "0.8", "--format", "json"]
)
assert result.exit_code == 0
assert "⚠️ Summary confidence" in result.output
mock_format.assert_called_once()
@patch("automation.dagster_dev.commands.ai_review_summarize.get_smart_diff_summary")
def test_low_confidence_human_output(self, mock_diff):
"""Test human output with low confidence warning."""
mock_summary = Mock()
mock_summary.summary_confidence = 0.4
mock_summary.change_category = Mock()
mock_summary.change_category.value = "bugfix"
mock_summary.files_changed = 2
mock_summary.additions = 10
mock_summary.deletions = 5
mock_summary.functions = []
mock_summary.classes = []
mock_summary.imports = []
mock_summary.api_changes = []
mock_summary.key_implementation_details = ""
mock_summary.needs_detailed_review = False
mock_diff.return_value = mock_summary
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(
ai_review_summarize, ["--format", "human", "--confidence-threshold", "0.7"]
)
assert result.exit_code == 0
assert "🔍 Analyzing changes in range: master..HEAD" in result.output
assert "⚠️ Summary confidence" in result.output
assert "📋 Change Summary" in result.output
@patch("automation.dagster_dev.commands.ai_review_summarize.get_smart_diff_summary")
def test_human_format_complete_display(self, mock_diff):
"""Test human format with complete summary display."""
mock_summary = Mock()
mock_summary.summary_confidence = 0.9
mock_summary.change_category = Mock()
mock_summary.change_category.value = "feature"
mock_summary.files_changed = 5
mock_summary.additions = 100
mock_summary.deletions = 20
mock_summary.functions = [Mock(details="add_function()"), Mock(details="modify_function()")]
mock_summary.classes = [Mock(details="NewClass")]
mock_summary.imports = [Mock(details="import module")]
mock_summary.api_changes = ["Added new API endpoint"]
mock_summary.key_implementation_details = "Key implementation details here"
mock_summary.needs_detailed_review = False
mock_diff.return_value = mock_summary
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, ["--format", "human"])
assert result.exit_code == 0
assert "📋 Change Summary" in result.output
assert "Category: feature" in result.output
assert "Scope: 5 files, +100/-20 lines" in result.output
assert "🔧 Function Changes:" in result.output
assert "📦 Class Changes:" in result.output
assert "📥 Import Changes:" in result.output
assert "🔗 API Impact:" in result.output
assert "💡 Key Implementation Details:" in result.output
@patch("automation.dagster_dev.commands.ai_review_summarize.get_smart_diff_summary")
def test_needs_detailed_review_warning(self, mock_diff):
"""Test detailed review warning display."""
mock_summary = Mock()
mock_summary.summary_confidence = 0.9
mock_summary.change_category = Mock()
mock_summary.change_category.value = "refactor"
mock_summary.files_changed = 50
mock_summary.additions = 1000
mock_summary.deletions = 500
mock_summary.functions = []
mock_summary.classes = []
mock_summary.imports = []
mock_summary.api_changes = []
mock_summary.key_implementation_details = ""
mock_summary.needs_detailed_review = True
mock_diff.return_value = mock_summary
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, ["--format", "human"])
assert result.exit_code == 0
assert "⚠️ Recommendation: Large change - consider full diff review" in result.output
@patch("automation.dagster_dev.commands.ai_review_summarize.get_smart_diff_summary")
def test_value_error_handling(self, mock_diff):
"""Test ValueError handling."""
mock_diff.side_effect = ValueError("Invalid diff range")
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, [])
assert result.exit_code == 1
assert "❌ Error analyzing diff: Invalid diff range" in result.output
@patch("automation.dagster_dev.commands.ai_review_summarize.get_smart_diff_summary")
def test_unexpected_exception_handling(self, mock_diff):
"""Test unexpected exception handling."""
mock_diff.side_effect = RuntimeError("Unexpected error")
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, [])
assert result.exit_code == 1
assert "❌ Unexpected error: Unexpected error" in result.output
@patch("automation.dagster_dev.commands.ai_review_summarize.get_smart_diff_summary")
def test_custom_diff_range(self, mock_diff):
"""Test custom diff range parameter."""
mock_summary = Mock()
mock_summary.summary_confidence = 0.8
mock_diff.return_value = mock_summary
with patch(
"automation.dagster_dev.commands.ai_review_summarize.format_summary_for_ai"
) as mock_format:
mock_format.return_value = {"summary": "test"}
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, ["--diff-range", "HEAD~3..HEAD"])
assert result.exit_code == 0
assert "🔍 Analyzing changes in range: HEAD~3..HEAD" in result.output
mock_diff.assert_called_once_with("HEAD~3..HEAD")
@patch("automation.dagster_dev.commands.ai_review_summarize.get_smart_diff_summary")
def test_truncated_details_display(self, mock_diff):
"""Test truncation of long implementation details."""
mock_summary = Mock()
mock_summary.summary_confidence = 0.9
mock_summary.change_category = Mock()
mock_summary.change_category.value = "feature"
mock_summary.files_changed = 1
mock_summary.additions = 10
mock_summary.deletions = 5
mock_summary.functions = []
mock_summary.classes = []
mock_summary.imports = []
mock_summary.api_changes = []
# Long implementation details that should be truncated
long_details = "\n".join([f"Detail line {i}" for i in range(15)])
mock_summary.key_implementation_details = long_details
mock_summary.needs_detailed_review = False
mock_diff.return_value = mock_summary
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, ["--format", "human"])
assert result.exit_code == 0
assert "... (truncated)" in result.output
@patch("automation.dagster_dev.commands.ai_review_summarize.get_smart_diff_summary")
def test_empty_sections_not_displayed(self, mock_diff):
"""Test that empty sections are not displayed in human format."""
mock_summary = Mock()
mock_summary.summary_confidence = 0.9
mock_summary.change_category = Mock()
mock_summary.change_category.value = "bugfix"
mock_summary.files_changed = 1
mock_summary.additions = 5
mock_summary.deletions = 2
# Empty lists - these sections should not be displayed
mock_summary.functions = []
mock_summary.classes = []
mock_summary.imports = []
mock_summary.api_changes = []
mock_summary.key_implementation_details = ""
mock_summary.needs_detailed_review = False
mock_diff.return_value = mock_summary
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, ["--format", "human"])
assert result.exit_code == 0
assert "📋 Change Summary" in result.output
assert "Category: bugfix" in result.output
# These sections should not appear when empty
assert "🔧 Function Changes:" not in result.output
assert "📦 Class Changes:" not in result.output
assert "📥 Import Changes:" not in result.output
assert "🔗 API Impact:" not in result.output
assert "💡 Key Implementation Details:" not in result.output
@patch("automation.dagster_dev.commands.ai_review_summarize.get_smart_diff_summary")
def test_format_summary_for_ai_exception(self, mock_diff):
"""Test exception handling in format_summary_for_ai."""
mock_summary = Mock()
mock_summary.summary_confidence = 0.8
mock_diff.return_value = mock_summary
with patch(
"automation.dagster_dev.commands.ai_review_summarize.format_summary_for_ai"
) as mock_format:
mock_format.side_effect = RuntimeError("Format error")
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, ["--format", "json"])
assert result.exit_code == 1
assert "❌ Unexpected error: Format error" in result.output
@patch("automation.dagster_dev.commands.ai_review_summarize.get_smart_diff_summary")
def test_default_parameters(self, mock_diff):
"""Test command with default parameters (format=json, confidence-threshold=0.7)."""
mock_summary = Mock()
mock_summary.summary_confidence = 0.8
mock_diff.return_value = mock_summary
with patch(
"automation.dagster_dev.commands.ai_review_summarize.format_summary_for_ai"
) as mock_format:
mock_format.return_value = {"summary": "test"}
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, [])
assert result.exit_code == 0
assert "🔍 Analyzing changes in range: master..HEAD" in result.output
# Should not show low confidence warning since 0.8 > 0.7 (default threshold)
assert "⚠️ Summary confidence" not in result.output
mock_diff.assert_called_once_with("master..HEAD")
@patch("automation.dagster_dev.commands.ai_review_summarize.get_smart_diff_summary")
def test_multiple_function_changes(self, mock_diff):
"""Test display of multiple function changes."""
mock_summary = Mock()
mock_summary.summary_confidence = 0.9
mock_summary.change_category = Mock()
mock_summary.change_category.value = "refactor"
mock_summary.files_changed = 3
mock_summary.additions = 50
mock_summary.deletions = 30
# Multiple function changes
mock_fn1 = Mock()
mock_fn1.details = "Added validate_input() function"
mock_fn2 = Mock()
mock_fn2.details = "Modified process_data() to handle edge cases"
mock_fn3 = Mock()
mock_fn3.details = "Removed deprecated_function()"
mock_summary.functions = [mock_fn1, mock_fn2, mock_fn3]
mock_summary.classes = []
mock_summary.imports = []
mock_summary.api_changes = []
mock_summary.key_implementation_details = "Improved data processing pipeline"
mock_summary.needs_detailed_review = False
mock_diff.return_value = mock_summary
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, ["--format", "human"])
assert result.exit_code == 0
assert "🔧 Function Changes:" in result.output
assert "Added validate_input() function" in result.output
assert "Modified process_data() to handle edge cases" in result.output
assert "Removed deprecated_function()" in result.output
| TestAiReviewSummarizeComprehensive |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor23.py | {
"start": 323,
"end": 417
} | class ____(Generic[V]):
pass
ParseFn = Callable[[Sequence[T], int, int], Result[V]]
| Result |
python | getsentry__sentry | src/sentry/utils/kvstore/bigtable.py | {
"start": 755,
"end": 13389
} | class ____(KVStorage[str, bytes]):
column_family = "x"
# The data column contains a bytearray up that may be up to ``max_size``
# bytes. The value may be compressed or otherwise encoded based on the
# value of the ``flags`` column.
data_column = b"0"
max_size = 1024 * 1024 * 10
# The TTL column contains an integer that represents the original TTL
# (default or row-level) a row was written with in seconds. (The original
# write timestamp can be calculated by taking the cell timestamp and
# subtracting this value.) If this column is not present, the row was
# written without a TTL, and all other columns (data, and optionally flags)
# will have a cell timestamp that corresponds to when the write occurred.
ttl_column = b"t"
ttl_struct = struct.Struct("<I")
# The flags column contains a single byte that represents a bit set. The
# structure of the bit set is defined in ``Flags``. If this column is not
# present in the row returned by Bigtable, its value is assumed to be 0.
flags_column = b"f"
flags_struct = struct.Struct("B")
class Flags(enum.IntFlag):
# XXX: Compression flags are assumed to be mutually exclusive, the
# behavior is explicitly undefined if both bits are set on a record.
COMPRESSED_ZLIB = 1 << 0
COMPRESSED_ZSTD = 1 << 1
compression_strategies: Mapping[str, tuple[Flags, Codec[bytes, bytes]]] = {
"zlib": (Flags.COMPRESSED_ZLIB, ZlibCodec()),
"zstd": (Flags.COMPRESSED_ZSTD, ZstdCodec()),
}
def __init__(
self,
instance: str,
table_name: str,
project: str | None = None,
client_options: Mapping[Any, Any] | None = None,
default_ttl: timedelta | None = None,
compression: str | None = None,
app_profile: str | None = None,
) -> None:
client_options = client_options if client_options is not None else {}
if "admin" in client_options:
raise ValueError('"admin" cannot be provided as a client option')
if compression is not None and compression not in self.compression_strategies:
raise ValueError(f'"compression" must be one of {self.compression_strategies.keys()!r}')
self.project = project
self.instance = instance
self.table_name = table_name
self.client_options = client_options
self.default_ttl = default_ttl
self.compression = compression
self.app_profile = app_profile
self.__table: Table
self.__table_lock = Lock()
def _get_table(self, admin: bool = False) -> Table:
if admin is True:
return (
bigtable.Client(project=self.project, admin=True, **self.client_options)
.instance(self.instance)
.table(self.table_name, app_profile_id=self.app_profile)
)
try:
# Fast check for an existing table
return self.__table
except AttributeError:
# If missing, we acquire our lock to initialize a new one
with self.__table_lock:
# It's possible that the lock was blocked waiting on someone
# else who already initialized, so we first check again to make
# sure this isn't the case.
try:
table = self.__table
except AttributeError:
table = self.__table = (
bigtable.Client(project=self.project, **self.client_options)
.instance(self.instance)
.table(self.table_name, app_profile_id=self.app_profile)
)
return table
def get(self, key: str) -> bytes | None:
with sentry_sdk.start_span(op="bigtable.get"):
# Default timeout is 60 seconds, much too long for our ingestion pipeline
# Modify retry based on https://cloud.google.com/python/docs/reference/storage/latest/retry_timeout#configuring-retries
modified_retry = DEFAULT_RETRY_READ_ROWS.with_timeout(5.0)
row = self._get_table().read_row(key, retry=modified_retry)
if row is None:
return None
return self.__decode_row(row)
def get_many(self, keys: Sequence[str]) -> Iterator[tuple[str, bytes]]:
if not keys:
# This is probably unintentional, and the behavior isn't specified.
logging.warning("get_many called with empty keys sequence")
return
rows = RowSet()
for key in keys:
rows.add_row_key(key)
for row in self._get_table().read_rows(row_set=rows):
value = self.__decode_row(row)
# Even though Bigtable in't going to return empty rows, an empty
# value may be returned by ``__decode_row`` if the row has
# outlived its TTL, so we need to check its value here.
if value is not None:
yield row.row_key.decode("utf-8"), value
def __decode_row(self, row: PartialRowData) -> bytes | None:
columns = row.cells[self.column_family]
try:
cell = columns[self.data_column][0]
except KeyError:
logger.warning("Retrieved row (%r) which does not contain a data column!", row.row_key)
return None
# Check if a TTL column exists for this row. If there is, we can use
# the `timestamp` property of the cells to see if we should return the
# row or not.
if self.ttl_column in columns:
# NOTE: If you need the value, it can be unpacked with the struct.
if cell.timestamp < timezone.now():
return None
value = cell.value
if self.flags_column in columns:
flags = self.Flags(self.flags_struct.unpack(columns[self.flags_column][0].value)[0])
# Check if there is a compression flag set, if so decompress the value.
# XXX: If no compression flags are matched, we unfortunately can't
# tell the difference between data written with a compression
# strategy that we're not aware of and data that was not compressed
# at all, so we just return the data and hope for the best. It is
# also possible that multiple compression flags match. We just stop
# after the first one matches. Good luck!
for compression_flag, strategy in self.compression_strategies.values():
if compression_flag in flags:
value = strategy.decode(value)
break
return value
def set(self, key: str, value: bytes, ttl: timedelta | None = None) -> None:
try:
return self._set(key, value, ttl)
except (exceptions.InternalServerError, exceptions.ServiceUnavailable):
# Delete cached client before retry
with self.__table_lock:
del self.__table
# Retry once on InternalServerError or ServiceUnavailable
# 500 Received RST_STREAM with error code 2
# SENTRY-S6D
return self._set(key, value, ttl)
def _set(self, key: str, value: bytes, ttl: timedelta | None = None) -> None:
# XXX: There is a type mismatch here -- ``direct_row`` expects
# ``bytes`` but we are providing it with ``str``.
row = self._get_table().direct_row(key)
# Call to delete is just a state mutation, and in this case is just
# used to clear all columns so the entire row will be replaced.
# Otherwise, if an existing row were mutated, and it took up more than
# one column, it'd be possible to overwrite beginning columns and still
# retain the end ones. This also ensures old data is collected during
# garbage collection, as well ensuring that TTL mutation is respected,
# particularly if the TTL is reduced (if the prior state was retained,
# cell values would persist using the longest TTL, not the most
# recently set TTL.)
row.delete()
# If we are setting a TTL on this row, we want to set the timestamp of
# the cells into the future. This allows our GC policy to delete them
# when the time comes. It also allows us to filter the rows on read if
# we are past the timestamp to not return. We want to set a ttl column
# to the ttl value in the future if we wanted to bump the timestamp and
# rewrite a row with a new ttl.
ttl = ttl or self.default_ttl
if ttl is None:
# XXX: If ``automatic_expiry`` is enabled and no TTL (default or
# row-level TTL) is provided, this will default to the Bigtable
# server timestamp and this row will be immediately evicted per the
# garbage collection policy.
ts = None
else:
ts = timezone.now() + ttl
row.set_cell(
self.column_family,
self.ttl_column,
self.ttl_struct.pack(int(ttl.total_seconds())),
timestamp=ts,
)
# Track flags for metadata about this row. This only flag we're
# tracking now is whether compression is on or not for the data column.
flags = self.Flags(0)
if self.compression:
compression_flag, strategy = self.compression_strategies[self.compression]
flags |= compression_flag
value = strategy.encode(value)
metrics.distribution(
"storage.put.size",
len(value),
tags={"usecase": "nodestore", "compression": self.compression},
unit="byte",
)
# Only need to write the column at all if any flags are enabled. And if
# so, pack it into a single byte.
if flags:
row.set_cell(
self.column_family,
self.flags_column,
self.flags_struct.pack(flags),
timestamp=ts,
)
assert len(value) <= self.max_size
row.set_cell(self.column_family, self.data_column, value, timestamp=ts)
status = row.commit()
if status.code != 0:
raise BigtableError(status.code, status.message)
def delete(self, key: str) -> None:
# XXX: There is a type mismatch here -- ``direct_row`` expects
# ``bytes`` but we are providing it with ``str``.
row = self._get_table().direct_row(key)
row.delete()
status = row.commit()
if status.code != 0:
raise BigtableError(status.code, status.message)
def delete_many(self, keys: Sequence[str]) -> None:
table = self._get_table()
rows = []
for key in keys:
# XXX: There is a type mismatch here -- ``direct_row`` expects
# ``bytes`` but we are providing it with ``str``.
row = table.direct_row(key)
row.delete()
rows.append(row)
errors = []
for status in table.mutate_rows(rows):
if status.code != 0:
errors.append(BigtableError(status.code, status.message))
if errors:
raise BigtableError(errors)
def bootstrap(self, automatic_expiry: bool = True) -> None:
table = self._get_table(admin=True)
if table.exists():
return
# With automatic expiry, we set a GC rule to automatically delete rows
# with an age of 0. This sounds odd, but when we write rows, we write
# them with a future timestamp as long as a TTL is set during write. By
# doing this, we are effectively writing rows into the future, and they
# will be deleted due to TTL when their timestamp is passed.
if automatic_expiry:
# NOTE: Bigtable can't actually use 0 TTL, and
# requires a minimum value of 1ms.
# > InvalidArgument desc = Error in field 'Modifications list' : Error in element #0 : max_age must be at least one millisecond
delta = timedelta(milliseconds=1)
gc_rule = bigtable.column_family.MaxAgeGCRule(delta)
else:
gc_rule = None
retry_504 = retry.Retry(retry.if_exception_type(exceptions.DeadlineExceeded))
retry_504(table.create)(column_families={self.column_family: gc_rule})
def destroy(self) -> None:
table = self._get_table(admin=True)
if not table.exists():
return
table.delete()
| BigtableKVStorage |
python | ray-project__ray | python/ray/data/_internal/execution/bundle_queue/fifo_bundle_queue.py | {
"start": 269,
"end": 377
} | class ____:
value: "RefBundle"
next: Optional["_Node"] = None
prev: Optional["_Node"] = None
| _Node |
python | openai__openai-python | src/openai/lib/_old_api.py | {
"start": 814,
"end": 1947
} | class ____(LazyProxy[Any]):
def __init__(self, *, symbol: str) -> None:
super().__init__()
self._symbol = symbol
@override
def __load__(self) -> Any:
# return the proxy until it is eventually called so that
# we don't break people that are just checking the attributes
# of a module
return self
def __call__(self, *_args: Any, **_kwargs: Any) -> Any:
raise APIRemovedInV1(symbol=self._symbol)
SYMBOLS = [
"Edit",
"File",
"Audio",
"Image",
"Model",
"Engine",
"Customer",
"FineTune",
"Embedding",
"Completion",
"Deployment",
"Moderation",
"ErrorObject",
"FineTuningJob",
"ChatCompletion",
]
# we explicitly tell type checkers that nothing is exported
# from this file so that when we re-export the old symbols
# in `openai/__init__.py` they aren't added to the auto-complete
# suggestions given by editors
if TYPE_CHECKING:
__all__: list[str] = []
else:
__all__ = SYMBOLS
__locals = locals()
for symbol in SYMBOLS:
__locals[symbol] = APIRemovedInV1Proxy(symbol=symbol)
| APIRemovedInV1Proxy |
python | ApeWorX__ape | src/ape/contracts/base.py | {
"start": 17720,
"end": 34742
} | class ____(BaseInterfaceModel):
"""
The types of events on a :class:`~ape.contracts.base.ContractInstance`.
Use the event types via ``.`` access on the contract instances.
Usage example::
# 'my_contract' refers to a ContractInstance in this case.
my_event_type = my_contract.MyEvent
"""
contract: "ContractTypeWrapper"
abi: EventABI
_logs: Optional[list[ContractLog]] = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Inject the doc-str using the NatSpec to better integrate with IPython.
# NOTE: This must happen AFTER super().__init__().
self.__doc__ = self.info
@log_instead_of_fail(default="<ContractEvent>")
def __repr__(self) -> str:
return self.abi.signature
@log_instead_of_fail()
def _repr_pretty_(self, printer, cycle):
console = get_rich_console()
console.print(self._get_info(enrich=True))
@property
def name(self) -> str:
"""
The name of the contract event, as defined in the contract.
"""
return self.abi.name
@property
def info(self) -> str:
"""
NatSpec info derived from the contract-type developer-documentation.
"""
return self._get_info()
def _get_info(self, enrich: bool = False) -> str:
info_str = self.abi.signature
if spec := self.contract.contract_type.natspecs.get(self.abi.selector):
spec_indented = spec.replace("\n", "\n ")
info_str = f"{info_str}\n {spec_indented}"
return _enrich_natspec(info_str) if enrich else info_str
def __iter__(self) -> Iterator[ContractLog]: # type: ignore[override]
"""
Get all logs that have occurred for this event.
"""
yield from self.range(self.chain_manager.blocks.height + 1)
@property
def log_filter(self) -> LogFilter:
# NOTE: This shouldn't really be called when given contract containers.
address = getattr(self.contract, "address", None)
addresses = [] if not address else [address]
return LogFilter.from_event(event=self.abi, addresses=addresses, start_block=0)
@singledispatchmethod
def __getitem__(self, value) -> Union[ContractLog, list[ContractLog]]: # type: ignore[override]
raise NotImplementedError(f"Cannot use '{type(value)}' to access logs.")
@__getitem__.register
def __getitem_int(self, index: int) -> ContractLog:
"""
Access events on the contract by the index of when they occurred.
Args:
index (int): The index such that ``0`` is the first log to have occurred
and ``-1`` is the last.
Returns:
:class:`~ape.contracts.base.ContractLog`
"""
logs = self.provider.get_contract_logs(self.log_filter)
try:
if index == 0:
return next(logs)
elif index > 0:
return next(islice(logs, index, index + 1))
else:
return list(logs)[index]
except (IndexError, StopIteration) as err:
raise IndexError(f"No log at index '{index}' for event '{self.abi.name}'.") from err
@__getitem__.register
def __getitem_slice(self, value: slice) -> list[ContractLog]:
"""
Access a slice of logs from this event.
Args:
value (slice): The range of log to get, e.g. ``[5:10]``.
Returns:
Iterator[:class:`~ape.contracts.base.ContractLog`]
"""
logs = self.provider.get_contract_logs(self.log_filter)
return list(islice(logs, value.start, value.stop, value.step))
def __len__(self):
logs = self.provider.get_contract_logs(self.log_filter)
return sum(1 for _ in logs)
def __call__(self, *args: Any, **kwargs: Any) -> MockContractLog:
"""
Create a mock-instance of a log using this event ABI and the contract address.
Args:
*args: Positional arguments for the event.
**kwargs: Key-word arguments for the event.
Returns:
:class:`~ape.types.events.MockContractLog`
"""
# Create a dictionary from the positional arguments
event_args: dict[Any, Any] = dict(zip((ipt.name for ipt in self.abi.inputs), args))
if overlapping_keys := set(event_args).intersection(kwargs):
raise ValueError(
f"Overlapping keys found in arguments: '{', '.join(overlapping_keys)}'."
)
# Update event_args with keyword arguments
event_args.update(kwargs)
# Check that event_args.keys() is a subset of the expected input names
keys_given = set(event_args.keys())
keys_expected = {ipt.name for ipt in self.abi.inputs}
if unknown_input_names := keys_given - keys_expected:
message = "Unknown keys: "
sections = []
for unknown in unknown_input_names:
if matches := difflib.get_close_matches(unknown, keys_expected, n=1, cutoff=0.5):
matches_str = ", ".join(matches)
sections.append(f"{unknown} (did you mean: '{matches_str}'?)")
else:
sections.append(unknown)
message = f"{message} '{', '.join(sections)}'"
raise ValueError(message)
# Convert the arguments using the conversion manager
converted_args = {}
ecosystem = self.provider.network.ecosystem
parser = StructParser(self.abi)
for key, value in event_args.items():
if value is None:
continue
input_abi = next(ipt for ipt in self.abi.inputs if ipt.name == key)
py_type = ecosystem.get_python_types(input_abi)
if isinstance(value, dict):
ls_values = list(value.values())
converted_values = self.conversion_manager.convert(ls_values, py_type)
converted_args[key] = parser.decode_input([converted_values])
elif isinstance(value, (list, tuple)):
converted_args[key] = parser.decode_input(value)
else:
converted_args[key] = self.conversion_manager.convert(value, py_type)
properties: dict = {
"event_arguments": converted_args,
"event_name": self.abi.name,
}
if hasattr(self.contract, "address"):
# Only address if this is off an instance.
properties["contract_address"] = self.contract.address
return MockContractLog(**properties)
def query(
self,
*columns: str,
start_block: int = 0,
stop_block: Optional[int] = None,
step: int = 1,
engine_to_use: Optional[str] = None,
) -> "DataFrame":
"""
Iterate through blocks for log events
Args:
*columns (str): ``*``-based argument for columns in the DataFrame to
return.
start_block (int): The first block, by number, to include in the
query. Defaults to ``0``.
stop_block (Optional[int]): The last block, by number, to include
in the query. Defaults to the latest block.
step (int): The number of blocks to iterate between block numbers.
Defaults to ``1``.
engine_to_use (Optional[str]): query engine to use, bypasses query
engine selection algorithm.
Returns:
pd.DataFrame
"""
# perf: pandas import is really slow. Avoid importing at module level.
import pandas as pd
HEAD = self.chain_manager.blocks.height
if start_block < 0:
start_block = HEAD + start_block
if stop_block is None:
stop_block = HEAD
elif stop_block < 0:
stop_block = HEAD + stop_block
elif stop_block > HEAD:
raise ChainError(
f"'stop={stop_block}' cannot be greater than the chain length ({HEAD})."
)
query: dict = {
"columns": (list(ContractLog.__pydantic_fields__) if columns[0] == "*" else columns),
"event": self.abi,
"start_block": start_block,
"stop_block": stop_block,
"step": step,
}
if hasattr(self.contract, "address"):
# Only query for a specific contract when checking an instance.
query["contract"] = self.contract.address
contract_event_query = ContractEventQuery(**query)
contract_events = self.query_manager.query(
contract_event_query, engine_to_use=engine_to_use
)
columns_ls = validate_and_expand_columns(columns, ContractLog)
data = map(partial(extract_fields, columns=columns_ls), contract_events)
return pd.DataFrame(columns=columns_ls, data=data)
def range(
self,
start_or_stop: int,
stop: Optional[int] = None,
search_topics: Optional[dict[str, Any]] = None,
extra_addresses: Optional[list] = None,
) -> Iterator[ContractLog]:
"""
Search through the logs for this event using the given filter parameters.
Args:
start_or_stop (int): When also given ``stop``, this is the earliest
block number in the desired log set.
Otherwise, it is the total amount of blocks to get starting from ``0``.
stop (Optional[int]): The latest block number in the
desired log set. Defaults to delegating to provider.
search_topics (Optional[dict]): Search topics, such as indexed event inputs,
to query by. Defaults to getting all events.
extra_addresses (Optional[list[:class:`~ape.types.address.AddressType`]]):
Additional contract addresses containing the same event type. Defaults to
only looking at the contract instance where this event is defined.
Returns:
Iterator[:class:`~ape.contracts.base.ContractLog`]
"""
if not (contract_address := getattr(self.contract, "address", None)):
return
start_block = None
stop_block = None
HEAD = self.chain_manager.blocks.height # Current block height
if stop is None:
contract = None
try:
contract = self.chain_manager.contracts.instance_at(contract_address)
except Exception:
pass
# Determine the start block from contract creation metadata
if contract and (creation := contract.creation_metadata):
start_block = creation.block
# Handle single parameter usage (like Python's range(stop))
if start_or_stop == 0:
# stop==0 is the same as stop==HEAD
# because of the -1 (turns to negative).
stop_block = HEAD + 1
elif start_or_stop >= 0:
# Given like range(1)
stop_block = min(start_or_stop - 1, HEAD)
else:
# Give like range(-1)
stop_block = HEAD + start_or_stop
elif start_or_stop is not None and stop is not None:
# Handle cases where both start and stop are provided
if start_or_stop >= 0:
start_block = min(start_or_stop, HEAD)
else:
# Negative start relative to HEAD
adjusted_value = HEAD + start_or_stop + 1
start_block = max(adjusted_value, 0)
if stop == 0:
# stop==0 is the same as stop==HEAD
# because of the -1 (turns to negative).
stop_block = HEAD
elif stop > 0:
# Positive stop, capped to the chain HEAD
stop_block = min(stop - 1, HEAD)
else:
# Negative stop.
adjusted_value = HEAD + stop
stop_block = max(adjusted_value, 0)
# Gather all addresses to query (contract and any extra ones provided)
addresses = list(set([contract_address] + (extra_addresses or [])))
# Construct the event query
contract_event_query = ContractEventQuery(
columns=list(ContractLog.__pydantic_fields__), # Ensure all necessary columns
contract=addresses,
event=self.abi,
search_topics=search_topics,
start_block=start_block or 0, # Default to block 0 if not set
stop_block=stop_block, # None means query to the current HEAD
)
# Execute the query and yield results
yield from self.query_manager.query(contract_event_query) # type: ignore
def from_receipt(self, receipt: "ReceiptAPI") -> list[ContractLog]:
"""
Get all the events from the given receipt.
Args:
receipt (:class:`~ape.api.transactions.ReceiptAPI`): The receipt containing the logs.
Returns:
list[:class:`~ape.contracts.base.ContractLog`]
"""
ecosystem = self.provider.network.ecosystem
# NOTE: Safe to use a list because a receipt should never have too many logs.
return list(
ecosystem.decode_logs(
# NOTE: Filter out logs that do not match this address (if address available)
# (okay to be empty list, since EcosystemAPI.decode_logs will return [])
[
log
for log in receipt.logs
if log["address"] == getattr(self.contract, "address", log["address"])
],
self.abi,
)
)
def poll_logs(
self,
start_block: Optional[int] = None,
stop_block: Optional[int] = None,
required_confirmations: Optional[int] = None,
new_block_timeout: Optional[int] = None,
search_topics: Optional[dict[str, Any]] = None,
**search_topic_kwargs: dict[str, Any],
) -> Iterator[ContractLog]:
"""
Poll new logs for this event. Can specify topic filters to use when setting up the filter.
Optionally set a start block to include historical blocks.
**NOTE**: This is a daemon method; it does not terminate unless an exception occurs.
Usage example::
for new_log in contract.MyEvent.poll_logs(arg=1):
print(f"New event log found: block_number={new_log.block_number}")
Args:
start_block (Optional[int]): The block number to start with. Defaults to the pending
block number.
stop_block (Optional[int]): Optionally set a future block number to stop at.
Defaults to never-ending.
required_confirmations (Optional[int]): The amount of confirmations to wait
before yielding the block. The more confirmations, the less likely a reorg will occur.
Defaults to the network's configured required confirmations.
new_block_timeout (Optional[int]): The amount of time to wait for a new block before
quitting. Defaults to 10 seconds for local networks or ``50 * block_time`` for live
networks.
search_topics (Optional[dict[str, Any]]): A dictionary of search topics to use when
constructing a polling filter. Overrides the value of `**search_topics_kwargs`.
search_topics_kwargs: Search topics to use when constructing a polling filter. Allows
easily specifying topic filters using kwarg syntax but can be used w/ `search_topics`
Returns:
Iterator[:class:`~ape.types.ContractLog`]
"""
if required_confirmations is None:
required_confirmations = self.provider.network.required_confirmations
# NOTE: We process historical blocks separately here to minimize rpc calls
height = max(self.chain_manager.blocks.height - required_confirmations, 0)
if start_block and height > 0 and start_block < height:
yield from self.range(start_block, height)
start_block = height + 1
if search_topics:
search_topic_kwargs.update(search_topics)
topics = encode_topics(self.abi, search_topic_kwargs)
if address := getattr(self.contract, "address", None):
# NOTE: Now we process the rest
yield from self.provider.poll_logs(
stop_block=stop_block,
address=address,
required_confirmations=required_confirmations,
new_block_timeout=new_block_timeout,
events=[self.abi],
topics=topics,
)
# TODO: In 0.9, just make `_events_` or ContractEvent possibly handle multiple ABIs
# much like the transactions handlers do. OR at least take the opportunity to refactor.
| ContractEvent |
python | walkccc__LeetCode | solutions/1207. Unique Number of Occurrences/1207.py | {
"start": 0,
"end": 264
} | class ____:
def uniqueOccurrences(self, arr: list[int]) -> bool:
count = collections.Counter(arr)
occurrences = set()
for value in count.values():
if value in occurrences:
return False
occurrences.add(value)
return True
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/utils/validators.py | {
"start": 918,
"end": 1459
} | class ____:
"""
Validates data is valid JSON.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, message=None):
self.message = message
def __call__(self, form, field):
if field.data:
try:
json.loads(field.data)
except JSONDecodeError as ex:
message = self.message or f"JSON Validation Error: {ex}"
raise ValidationError(message=field.gettext(message.format(field.data)))
| ValidJson |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vision.py | {
"start": 41879,
"end": 45705
} | class ____(GoogleCloudBaseOperator):
"""
Delete a ReferenceImage ID resource.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionDeleteReferenceImageOperator`
:param location: (Required) The region where the Product is located. Valid regions (as of 2019-02-05) are:
us-east1, us-west1, europe-west1, asia-east1
:param reference_image_id: (Optional) A user-supplied resource id for the ReferenceImage to be added.
If set, the server will attempt to use this value as the resource id. If it is already in use, an
error is returned with code ALREADY_EXISTS. Must be at most 128 characters long. It cannot contain
the character `/`.
:param product_id: (Optional) The resource id of this Product.
:param project_id: (Optional) The project in which the Product is located. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_reference_image_create_template_fields]
template_fields: Sequence[str] = (
"location",
"product_id",
"reference_image_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_reference_image_create_template_fields]
def __init__(
self,
*,
location: str,
product_id: str,
reference_image_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.product_id = product_id
self.reference_image_id = reference_image_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.delete_reference_image(
location=self.location,
product_id=self.product_id,
reference_image_id=self.reference_image_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
| CloudVisionDeleteReferenceImageOperator |
python | scipy__scipy | benchmarks/benchmarks/optimize_qap.py | {
"start": 342,
"end": 2852
} | class ____(Benchmark):
methods = ['faq', '2opt']
probs = ["bur26a", "bur26b", "bur26c", "bur26d", "bur26e", "bur26f",
"bur26g", "bur26h", "chr12a", "chr12b", "chr12c", "chr15a",
"chr15b", "chr15c", "chr18a", "chr18b", "chr20a", "chr20b",
"chr20c", "chr22a", "chr22b", "chr25a",
"els19",
"esc16a", "esc16b", "esc16c", "esc16d", "esc16e", "esc16g",
"esc16h", "esc16i", "esc16j", "esc32e", "esc32g", "esc128",
"had12", "had14", "had16", "had18", "had20", "kra30a",
"kra30b", "kra32",
"lipa20a", "lipa20b", "lipa30a", "lipa30b", "lipa40a", "lipa40b",
"lipa50a", "lipa50b", "lipa60a", "lipa60b", "lipa70a", "lipa70b",
"lipa80a", "lipa90a", "lipa90b",
"nug12", "nug14", "nug16a", "nug16b", "nug17", "nug18", "nug20",
"nug21", "nug22", "nug24", "nug25", "nug27", "nug28", "nug30",
"rou12", "rou15", "rou20",
"scr12", "scr15", "scr20",
"sko42", "sko49", "sko56", "sko64", "sko72", "sko81", "sko90",
"sko100a", "sko100b", "sko100c", "sko100d", "sko100e", "sko100f",
"ste36b", "ste36c",
"tai12a", "tai12b", "tai15a", "tai15b", "tai17a", "tai20a",
"tai20b", "tai25a", "tai25b", "tai30a", "tai30b", "tai35a",
"tai40a", "tai40b", "tai50a", "tai50b", "tai60a", "tai60b",
"tai64c", "tai80a", "tai100a", "tai100b", "tai150b", "tai256c",
"tho30", "tho40", "tho150", "wil50", "wil100"]
params = [methods, probs]
param_names = ['Method', 'QAP Problem']
def setup(self, method, qap_prob):
dir_path = os.path.dirname(os.path.realpath(__file__))
datafile = np.load(os.path.join(dir_path, "qapdata/qap_probs.npz"),
allow_pickle=True)
slnfile = np.load(os.path.join(dir_path, "qapdata/qap_sols.npz"),
allow_pickle=True)
self.A = datafile[qap_prob][0]
self.B = datafile[qap_prob][1]
self.opt_solution = slnfile[qap_prob]
self.method = method
def time_evaluation(self, method, qap_prob):
quadratic_assignment(self.A, self.B, self.method)
def track_score(self, method, qap_prob):
res = quadratic_assignment(self.A, self.B, self.method)
score = int(res['fun'])
percent_diff = (score - self.opt_solution) / self.opt_solution
return percent_diff
| QuadraticAssignment |
python | python-attrs__attrs | src/attr/validators.py | {
"start": 9029,
"end": 10714
} | class ____:
member_validator = attrib(validator=is_callable())
iterable_validator = attrib(
default=None, validator=optional(is_callable())
)
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if self.iterable_validator is not None:
self.iterable_validator(inst, attr, value)
for member in value:
self.member_validator(inst, attr, member)
def __repr__(self):
iterable_identifier = (
""
if self.iterable_validator is None
else f" {self.iterable_validator!r}"
)
return (
f"<deep_iterable validator for{iterable_identifier}"
f" iterables of {self.member_validator!r}>"
)
def deep_iterable(member_validator, iterable_validator=None):
"""
A validator that performs deep validation of an iterable.
Args:
member_validator: Validator(s) to apply to iterable members.
iterable_validator:
Validator(s) to apply to iterable itself (optional).
Raises
TypeError: if any sub-validators fail
.. versionadded:: 19.1.0
.. versionchanged:: 25.4.0
*member_validator* and *iterable_validator* can now be a list or tuple
of validators.
"""
if isinstance(member_validator, (list, tuple)):
member_validator = and_(*member_validator)
if isinstance(iterable_validator, (list, tuple)):
iterable_validator = and_(*iterable_validator)
return _DeepIterable(member_validator, iterable_validator)
@attrs(repr=False, slots=True, unsafe_hash=True)
| _DeepIterable |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-directory/source_google_directory/api.py | {
"start": 664,
"end": 2855
} | class ____:
def __init__(self, credentials: Mapping[str, Any]):
self._creds = None
self._raw_credentials = credentials
self._service = None
@staticmethod
def _load_account_info(credentials_json: str) -> Dict:
account_info = json.loads(credentials_json)
return account_info
def _obtain_service_account_creds(self) -> service_account.Credentials:
"""Obtaining creds based on Service account scenario"""
credentials_json = self._raw_credentials.get("credentials_json")
admin_email = self._raw_credentials.get("email")
account_info = self._load_account_info(credentials_json)
creds = service_account.Credentials.from_service_account_info(account_info, scopes=SCOPES)
self._creds = creds.with_subject(admin_email)
def _obtain_web_app_creds(self) -> Credentials:
"""Obtaining creds based on Web server application scenario"""
info = {
"client_id": self._raw_credentials.get("client_id"),
"client_secret": self._raw_credentials.get("client_secret"),
"refresh_token": self._raw_credentials.get("refresh_token"),
}
creds = Credentials.from_authorized_user_info(info)
if creds.expired:
creds.refresh(Request())
self._creds = creds
def _obtain_creds(self):
if "credentials_json" in self._raw_credentials:
self._obtain_service_account_creds()
elif "client_id" and "client_secret" in self._raw_credentials:
self._obtain_web_app_creds()
def _construct_resource(self):
if not self._creds:
self._obtain_creds()
if not self._service:
self._service = build("admin", "directory_v1", credentials=self._creds)
def _get_resource(self, name: str):
self._construct_resource()
return getattr(self._service, name)
@backoff.on_exception(backoff.expo, GoogleApiHttpError, max_tries=7, giveup=rate_limit_handling)
def get(self, name: str, params: Dict = None) -> Dict:
resource = self._get_resource(name)
response = resource().list(**params).execute()
return response
| API |
python | google__pytype | pytype/pyi/parser_test.py | {
"start": 46897,
"end": 48982
} | class ____(parser_test_base.ParserTestBase):
# These tests assume that IfTest has already covered the inner workings of
# peer's functions. Instead, they focus on verifying that if statements
# under a class allow things that normally appear in a class (constants,
# functions), and disallow statements that aren't allowed in a class (import,
# etc).
def test_conditional_constant(self):
self.check(
"""
class Foo:
if sys.version_info == (3, 4, 0):
x = ... # type: int
elif sys.version_info >= (3, 5, 0):
y = ... # type: str
else:
z = ... # type: float
""",
"""
class Foo:
y: str
""",
)
def test_conditional_method(self):
self.check(
"""
class Foo:
if sys.version_info == (3, 4, 0):
def a(self, x: int) -> str: ...
elif sys.version_info >= (3, 5, 0):
def b(self, x: int) -> str: ...
else:
def c(self, x: int) -> str: ...
""",
"""
class Foo:
def b(self, x: int) -> str: ...
""",
)
def test_nested(self):
self.check(
"""
class Foo:
if sys.version_info > (3, 4, 0):
if sys.version_info >= (3, 5, 0):
def b(self, x: int) -> str: ...
""",
"""
class Foo:
def b(self, x: int) -> str: ...
""",
)
def test_no_import(self):
self.check_error(
"""
class Foo:
if sys.version_info > (3, 4, 0):
import foo
""",
3,
"Import statements need to be at module level",
)
def test_no_class(self):
self.check(
"""
class Foo:
if sys.version_info <= (3, 4, 0):
class Bar: ...
""",
"""
class Foo: ...
""",
)
def test_no_typevar(self):
self.check_error(
"""
class Foo:
if sys.version_info > (3, 4, 0):
T = TypeVar('T')
""",
3,
r"TypeVars need to be defined at module level",
)
| ClassIfTest |
python | python-visualization__folium | folium/plugins/heat_map.py | {
"start": 281,
"end": 3666
} | class ____(JSCSSMixin, Layer):
"""
Create a Heatmap layer
Parameters
----------
data : list of points of the form [lat, lng] or [lat, lng, weight]
The points you want to plot.
You can also provide a numpy.array of shape (n,2) or (n,3).
name : string, default None
The name of the Layer, as it will appear in LayerControls.
min_opacity : default 1.
The minimum opacity the heat will start at.
max_zoom : default 18
Zoom level where the points reach maximum intensity (as intensity
scales with zoom), equals maxZoom of the map by default
radius : int, default 25
Radius of each "point" of the heatmap
blur : int, default 15
Amount of blur
gradient : dict, default None
Color gradient config. Defaults to
{.4: "blue", .6: "cyan", .7: "lime", .8: "yellow", 1: "red"}
overlay : bool, default True
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
show: bool, default True
Whether the layer will be shown on opening.
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = L.heatLayer(
{{ this.data|tojson }},
{{ this.options|tojavascript }}
);
{% endmacro %}
"""
)
default_js = [
(
"leaflet-heat.js",
"https://cdn.jsdelivr.net/gh/python-visualization/folium@main/folium/templates/leaflet_heat.min.js",
),
]
def __init__(
self,
data,
name=None,
min_opacity=0.5,
max_zoom=18,
radius=25,
blur=15,
gradient=None,
overlay=True,
control=True,
show=True,
**kwargs
):
super().__init__(name=name, overlay=overlay, control=control, show=show)
self._name = "HeatMap"
data = if_pandas_df_convert_to_numpy(data)
self.data = [
[*validate_location(line[:2]), *line[2:]] for line in data # noqa: E999
]
if np.any(np.isnan(self.data)):
raise ValueError("data may not contain NaNs.")
if kwargs.pop("max_val", None):
warnings.warn(
"The `max_val` parameter is no longer necessary. "
"The largest intensity is calculated automatically.",
stacklevel=2,
)
self.options = remove_empty(
min_opacity=min_opacity,
max_zoom=max_zoom,
radius=radius,
blur=blur,
gradient=gradient,
**kwargs
)
def _get_self_bounds(self):
"""
Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]].
"""
bounds = [[None, None], [None, None]]
for point in self.data:
bounds = [
[
none_min(bounds[0][0], point[0]),
none_min(bounds[0][1], point[1]),
],
[
none_max(bounds[1][0], point[0]),
none_max(bounds[1][1], point[1]),
],
]
return bounds
| HeatMap |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 658338,
"end": 658734
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("ExternalIdentity", graphql_name="node")
"""The item at the end of the edge."""
| ExternalIdentityEdge |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_selection.py | {
"start": 30398,
"end": 31353
} | class ____(OperandListAssetSelection):
def resolve_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetKey]:
return reduce(
operator.and_,
(
selection.resolve_inner(asset_graph, allow_missing=allow_missing)
for selection in self.operands
),
)
def resolve_checks_inner( # pyright: ignore[reportIncompatibleMethodOverride]
self, asset_graph: AssetGraph, allow_missing: bool
) -> AbstractSet[AssetCheckKey]:
return reduce(
operator.and_,
(
selection.resolve_checks_inner(asset_graph, allow_missing=allow_missing)
for selection in self.operands
),
)
def to_selection_str(self) -> str:
return " and ".join(f"{operand.operand_to_selection_str()}" for operand in self.operands)
@whitelist_for_serdes
| AndAssetSelection |
python | django__django | django/db/migrations/graph.py | {
"start": 176,
"end": 967
} | class ____:
"""
A single node in the migration graph. Contains direct links to adjacent
nodes in either direction.
"""
def __init__(self, key):
self.key = key
self.children = set()
self.parents = set()
def __eq__(self, other):
return self.key == other
def __lt__(self, other):
return self.key < other
def __hash__(self):
return hash(self.key)
def __getitem__(self, item):
return self.key[item]
def __str__(self):
return str(self.key)
def __repr__(self):
return "<%s: (%r, %r)>" % (self.__class__.__name__, self.key[0], self.key[1])
def add_child(self, child):
self.children.add(child)
def add_parent(self, parent):
self.parents.add(parent)
| Node |
python | crytic__slither | slither/core/expressions/binary_operation.py | {
"start": 5082,
"end": 6047
} | class ____(Expression):
def __init__(
self,
left_expression: Expression,
right_expression: Expression,
expression_type: BinaryOperationType,
) -> None:
assert isinstance(left_expression, Expression)
assert isinstance(right_expression, Expression)
super().__init__()
self._expressions = [left_expression, right_expression]
self._type: BinaryOperationType = expression_type
@property
def expressions(self) -> List[Expression]:
return self._expressions
@property
def expression_left(self) -> Expression:
return self._expressions[0]
@property
def expression_right(self) -> Expression:
return self._expressions[1]
@property
def type(self) -> BinaryOperationType:
return self._type
def __str__(self) -> str:
return str(self.expression_left) + " " + str(self.type) + " " + str(self.expression_right)
| BinaryOperation |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/cache_key.py | {
"start": 1277,
"end": 1454
} | class ____(Protocol):
def __call__(
s, self: HasCacheKey, visitor: _CacheKeyTraversal
) -> _CacheKeyTraversalDispatchTypeReturn: ...
| _CacheKeyTraversalDispatchType |
python | google__jax | jax/_src/interpreters/pxla.py | {
"start": 130176,
"end": 131777
} | class ____:
donate_argnums: tuple[int, ...] | None = None
donate_argnames: tuple[str, ...] | None = None
device: xc.Device | None = None
backend: str | None = None
in_shardings_treedef: PyTreeDef | None = None
in_shardings_leaves: tuple[Any, ...] | None = None
out_shardings_treedef: PyTreeDef | None = None
out_shardings_leaves: tuple[Any, ...] | None = None
in_layouts_treedef: PyTreeDef | None = None
in_layouts_leaves: tuple[Any, ...] | None = None
out_layouts_treedef: PyTreeDef | None = None
out_layouts_leaves: tuple[Any, ...] | None = None
compiler_options_kvs: tuple[tuple[str, Any], ...] | None = None
@functools.cached_property
def contains_explicit_attributes(self):
return (self.donate_argnums is not None or
self.donate_argnames is not None or
self.device is not None or
self.backend is not None or
any(not isinstance(i, UnspecifiedValue) for i in self.in_shardings_leaves) or
any(not isinstance(o, UnspecifiedValue) for o in self.out_shardings_leaves) or
any(i is not None for i in self.in_layouts_leaves) or
any(o is not None for o in self.out_layouts_leaves) or
self.compiler_options_kvs)
def reflatten_outputs_for_dispatch(out_tree, out_flat):
# We arrive at dispatch having flattened according to the default
# pytree registry, but we want to re-flatten according to our
# dispatch-specific registry.
out_unflat = tree_util.tree_unflatten(out_tree, out_flat)
return tree_util.dispatch_registry.flatten(out_unflat, None)
| JitGlobalCppCacheKeys |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/utils/kubernetes.py | {
"start": 3583,
"end": 3808
} | class ____(BaseModel):
enabled: bool = True
model_config = {
"extra": "allow",
"json_schema_extra": {
"$ref": create_definition_ref("io.k8s.api.core.v1.Probe"),
},
}
| StartupProbe |
python | boto__boto3 | tests/unit/resources/test_params.py | {
"start": 869,
"end": 6655
} | class ____(BaseTestCase):
def test_service_action_params_identifier(self):
request_model = Request(
{
'operation': 'GetFrobs',
'params': [
{
'target': 'WarehouseUrl',
'source': 'identifier',
'name': 'Url',
}
],
}
)
parent = mock.Mock()
parent.url = 'w-url'
params = create_request_parameters(parent, request_model)
assert params['WarehouseUrl'] == 'w-url'
def test_service_action_params_data_member(self):
request_model = Request(
{
'operation': 'GetFrobs',
'params': [
{
'target': 'WarehouseUrl',
'source': 'data',
'path': 'SomeMember',
}
],
}
)
parent = mock.Mock()
parent.meta = ResourceMeta('test', data={'SomeMember': 'w-url'})
params = create_request_parameters(parent, request_model)
assert params['WarehouseUrl'] == 'w-url'
def test_service_action_params_data_member_missing(self):
request_model = Request(
{
'operation': 'GetFrobs',
'params': [
{
'target': 'WarehouseUrl',
'source': 'data',
'path': 'SomeMember',
}
],
}
)
parent = mock.Mock()
def load_data():
parent.meta.data = {'SomeMember': 'w-url'}
parent.load.side_effect = load_data
parent.meta = ResourceMeta('test')
params = create_request_parameters(parent, request_model)
parent.load.assert_called_with()
assert params['WarehouseUrl'] == 'w-url'
def test_service_action_params_data_member_missing_no_load(self):
request_model = Request(
{
'operation': 'GetFrobs',
'params': [
{
'target': 'WarehouseUrl',
'source': 'data',
'path': 'SomeMember',
}
],
}
)
# This mock has no ``load`` method.
parent = mock.Mock(spec=ServiceResource)
parent.meta = ResourceMeta('test', data=None)
with pytest.raises(ResourceLoadException):
create_request_parameters(parent, request_model)
def test_service_action_params_constants(self):
request_model = Request(
{
'operation': 'GetFrobs',
'params': [
{
'target': 'Param1',
'source': 'string',
'value': 'param1',
},
{'target': 'Param2', 'source': 'integer', 'value': 123},
{'target': 'Param3', 'source': 'boolean', 'value': True},
],
}
)
params = create_request_parameters(None, request_model)
assert params['Param1'] == 'param1'
assert params['Param2'] == 123
assert params['Param3'] is True
def test_service_action_params_input(self):
request_model = Request(
{
'operation': 'GetFrobs',
'params': [{'target': 'Param1', 'source': 'input'}],
}
)
params = create_request_parameters(None, request_model)
assert params == {}
params['param1'] = 'myinput'
params = create_request_parameters(None, request_model, params=params)
assert params == {'param1': 'myinput'}
def test_service_action_params_invalid(self):
request_model = Request(
{
'operation': 'GetFrobs',
'params': [{'target': 'Param1', 'source': 'invalid'}],
}
)
with pytest.raises(NotImplementedError):
create_request_parameters(None, request_model)
def test_service_action_params_list(self):
request_model = Request(
{
'operation': 'GetFrobs',
'params': [
{
'target': 'WarehouseUrls[0]',
'source': 'string',
'value': 'w-url',
}
],
}
)
params = create_request_parameters(None, request_model)
assert isinstance(params['WarehouseUrls'], list)
assert len(params['WarehouseUrls']) == 1
assert 'w-url' in params['WarehouseUrls']
def test_service_action_params_reuse(self):
request_model = Request(
{
'operation': 'GetFrobs',
'params': [
{
'target': 'Delete.Objects[].Key',
'source': 'data',
'path': 'Key',
}
],
}
)
item1 = mock.Mock()
item1.meta = ResourceMeta('test', data={'Key': 'item1'})
item2 = mock.Mock()
item2.meta = ResourceMeta('test', data={'Key': 'item2'})
# Here we create params and then re-use it to build up a more
# complex structure over multiple calls.
params = create_request_parameters(item1, request_model)
create_request_parameters(item2, request_model, params=params)
assert params == {
'Delete': {'Objects': [{'Key': 'item1'}, {'Key': 'item2'}]}
}
| TestServiceActionParams |
python | doocs__leetcode | solution/3200-3299/3277.Maximum XOR Score Subarray Queries/Solution.py | {
"start": 0,
"end": 501
} | class ____:
def maximumSubarrayXor(
self, nums: List[int], queries: List[List[int]]
) -> List[int]:
n = len(nums)
f = [[0] * n for _ in range(n)]
g = [[0] * n for _ in range(n)]
for i in range(n - 1, -1, -1):
f[i][i] = g[i][i] = nums[i]
for j in range(i + 1, n):
f[i][j] = f[i][j - 1] ^ f[i + 1][j]
g[i][j] = max(f[i][j], g[i][j - 1], g[i + 1][j])
return [g[l][r] for l, r in queries]
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 68065,
"end": 70072
} | class ____(Response):
"""
Response of tasks.add_or_update_artifacts endpoint.
:param added: Keys of artifacts added
:type added: Sequence[str]
:param updated: Keys of artifacts updated
:type updated: Sequence[str]
"""
_service = "tasks"
_action = "add_or_update_artifacts"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"added": {
"description": "Keys of artifacts added",
"items": {"type": "string"},
"type": ["array", "null"],
},
"updated": {
"description": "Keys of artifacts updated",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(self, added: Optional[List[str]] = None, updated: Optional[List[str]] = None, **kwargs: Any) -> None:
super(AddOrUpdateArtifactsResponse, self).__init__(**kwargs)
self.added = added
self.updated = updated
@schema_property("added")
def added(self) -> Optional[List[str]]:
return self._property_added
@added.setter
def added(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_added = None
return
self.assert_isinstance(value, "added", (list, tuple))
self.assert_isinstance(value, "added", six.string_types, is_array=True)
self._property_added = value
@schema_property("updated")
def updated(self) -> Optional[List[str]]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_updated = None
return
self.assert_isinstance(value, "updated", (list, tuple))
self.assert_isinstance(value, "updated", six.string_types, is_array=True)
self._property_updated = value
| AddOrUpdateArtifactsResponse |
python | pypa__setuptools | setuptools/tests/test_dist_info.py | {
"start": 2366,
"end": 5005
} | class ____:
"""Make sure the .dist-info directory produced with the ``dist_info`` command
is the same as the one produced by ``bdist_wheel``.
"""
SETUPCFG = DALS(
"""
[metadata]
name = {name}
version = {version}
[options]
install_requires =
foo>=12; sys_platform != "linux"
[options.extras_require]
test = pytest
[options.entry_points]
console_scripts =
executable-name = my_package.module:function
discover =
myproj = my_package.other_module:function
"""
)
EGG_INFO_OPTS = [
# Related: #3088 #2872
("", ""),
(".post", "[egg_info]\ntag_build = post\n"),
(".post", "[egg_info]\ntag_build = .post\n"),
(".post", "[egg_info]\ntag_build = post\ntag_date = 1\n"),
(".dev", "[egg_info]\ntag_build = .dev\n"),
(".dev", "[egg_info]\ntag_build = .dev\ntag_date = 1\n"),
("a1", "[egg_info]\ntag_build = .a1\n"),
("+local", "[egg_info]\ntag_build = +local\n"),
]
@pytest.mark.parametrize("name", "my-proj my_proj my.proj My.Proj".split())
@pytest.mark.parametrize("version", ["0.42.13"])
@pytest.mark.parametrize(("suffix", "cfg"), EGG_INFO_OPTS)
def test_dist_info_is_the_same_as_in_wheel(
self, name, version, tmp_path, suffix, cfg
):
config = self.SETUPCFG.format(name=name, version=version) + cfg
for i in "dir_wheel", "dir_dist":
(tmp_path / i).mkdir()
(tmp_path / i / "setup.cfg").write_text(config, encoding="utf-8")
run_command("bdist_wheel", cwd=tmp_path / "dir_wheel")
wheel = next(tmp_path.glob("dir_wheel/dist/*.whl"))
unpack_archive(wheel, tmp_path / "unpack")
wheel_dist_info = next(tmp_path.glob("unpack/*.dist-info"))
run_command("dist_info", cwd=tmp_path / "dir_dist")
dist_info = next(tmp_path.glob("dir_dist/*.dist-info"))
assert dist_info.name == wheel_dist_info.name
assert dist_info.name.startswith(f"my_proj-{version}{suffix}")
for file in "METADATA", "entry_points.txt":
assert read(dist_info / file) == read(wheel_dist_info / file)
def run_command_inner(*cmd, **kwargs):
opts = {
"stderr": subprocess.STDOUT,
"stdout": subprocess.PIPE,
"text": True,
"encoding": "utf-8",
"check": True,
**kwargs,
}
cmd = [sys.executable, "-c", "__import__('setuptools').setup()", *map(str, cmd)]
return subprocess.run(cmd, **opts)
def run_command(*args, **kwargs):
return run_command_inner(*args, **kwargs).stdout
| TestWheelCompatibility |
python | jina-ai__jina | jina/proto/serializer.py | {
"start": 1068,
"end": 2171
} | class ____:
"""This class is a drop-in replacement for gRPC default serializer.
It replaces default serializer to make sure the message sending interface is convenient.
It can handle sending single messages or a list of messages. It also returns a list of messages.
Effectively this is hiding MessageListProto from the consumer
"""
@staticmethod
def SerializeToString(x: 'Union[List[DataRequest], DataRequest]'):
"""
# noqa: DAR101
# noqa: DAR102
# noqa: DAR201
"""
protos = []
if not isinstance(x, Iterable):
protos.append(x.proto_with_data)
else:
protos = [r.proto_with_data for r in x]
return jina_pb2.DataRequestListProto(requests=protos).SerializeToString()
@staticmethod
def FromString(x: bytes):
"""
# noqa: DAR101
# noqa: DAR102
# noqa: DAR201
"""
rlp = jina_pb2.DataRequestListProto()
rlp.ParseFromString(x)
return [DataRequest.from_proto(request) for request in rlp.requests]
| DataRequestListProto |
python | pypa__pipenv | pipenv/patched/pip/_internal/cache.py | {
"start": 1032,
"end": 3825
} | class ____:
"""An abstract class - provides cache directories for data from links
:param cache_dir: The root of the cache.
"""
def __init__(self, cache_dir: str) -> None:
super().__init__()
assert not cache_dir or os.path.isabs(cache_dir)
self.cache_dir = cache_dir or None
def _get_cache_path_parts(self, link: Link) -> List[str]:
"""Get parts of part that must be os.path.joined with cache_dir"""
# We want to generate an url to use as our cache key, we don't want to
# just reuse the URL because it might have other items in the fragment
# and we don't care about those.
key_parts = {"url": link.url_without_fragment}
if link.hash_name is not None and link.hash is not None:
key_parts[link.hash_name] = link.hash
if link.subdirectory_fragment:
key_parts["subdirectory"] = link.subdirectory_fragment
# Include interpreter name, major and minor version in cache key
# to cope with ill-behaved sdists that build a different wheel
# depending on the python version their setup.py is being run on,
# and don't encode the difference in compatibility tags.
# https://github.com/pypa/pip/issues/7296
key_parts["interpreter_name"] = interpreter_name()
key_parts["interpreter_version"] = interpreter_version()
# Encode our key url with sha224, we'll use this because it has similar
# security properties to sha256, but with a shorter total output (and
# thus less secure). However the differences don't make a lot of
# difference for our use case here.
hashed = _hash_dict(key_parts)
# We want to nest the directories some to prevent having a ton of top
# level directories where we might run out of sub directories on some
# FS.
parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
return parts
def _get_candidates(self, link: Link, canonical_package_name: str) -> List[Any]:
can_not_cache = not self.cache_dir or not canonical_package_name or not link
if can_not_cache:
return []
path = self.get_path_for_link(link)
if os.path.isdir(path):
return [(candidate, path) for candidate in os.listdir(path)]
return []
def get_path_for_link(self, link: Link) -> str:
"""Return a directory to store cached items in for link."""
raise NotImplementedError()
def get(
self,
link: Link,
package_name: Optional[str],
supported_tags: List[Tag],
) -> Link:
"""Returns a link to a cached item if it exists, otherwise returns the
passed link.
"""
raise NotImplementedError()
| Cache |
python | huggingface__transformers | src/transformers/pipelines/zero_shot_classification.py | {
"start": 1570,
"end": 12307
} | class ____(ChunkPipeline):
"""
NLI-based zero-shot classification pipeline using a `ModelForSequenceClassification` trained on NLI (natural
language inference) tasks. Equivalent of `text-classification` pipelines, but these models don't require a
hardcoded number of potential classes, they can be chosen at runtime. It usually means it's slower but it is
**much** more flexible.
Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis
pair and passed to the pretrained model. Then, the logit for *entailment* is taken as the logit for the candidate
label being valid. Any NLI model can be used, but the id of the *entailment* label must be included in the model
config's :attr:*~transformers.PreTrainedConfig.label2id*.
Example:
```python
>>> from transformers import pipeline
>>> oracle = pipeline(model="facebook/bart-large-mnli")
>>> oracle(
... "I have a problem with my iphone that needs to be resolved asap!!",
... candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"],
... )
{'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]}
>>> oracle(
... "I have a problem with my iphone that needs to be resolved asap!!",
... candidate_labels=["english", "german"],
... )
{'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['english', 'german'], 'scores': [0.814, 0.186]}
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This NLI pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"zero-shot-classification"`.
The models that this pipeline can use are models that have been fine-tuned on an NLI task. See the up-to-date list
of available models on [huggingface.co/models](https://huggingface.co/models?search=nli).
"""
_load_processor = False
_load_image_processor = False
_load_feature_extractor = False
_load_tokenizer = True
def __init__(self, args_parser=ZeroShotClassificationArgumentHandler(), **kwargs):
self._args_parser = args_parser
super().__init__(**kwargs)
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs."
)
@property
def entailment_id(self):
for label, ind in self.model.config.label2id.items():
if label.lower().startswith("entail"):
return ind
return -1
def _parse_and_tokenize(
self, sequence_pairs, padding=True, add_special_tokens=True, truncation=TruncationStrategy.ONLY_FIRST, **kwargs
):
"""
Parse arguments and tokenize only_first so that hypothesis (label) is not truncated
"""
return_tensors = "pt"
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`"
)
self.tokenizer.pad_token = self.tokenizer.eos_token
try:
inputs = self.tokenizer(
sequence_pairs,
add_special_tokens=add_special_tokens,
return_tensors=return_tensors,
padding=padding,
truncation=truncation,
)
except Exception as e:
if "too short" in str(e):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
inputs = self.tokenizer(
sequence_pairs,
add_special_tokens=add_special_tokens,
return_tensors=return_tensors,
padding=padding,
truncation=TruncationStrategy.DO_NOT_TRUNCATE,
)
else:
raise e
return inputs
def _sanitize_parameters(self, **kwargs):
if kwargs.get("multi_class") is not None:
kwargs["multi_label"] = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers."
)
preprocess_params = {}
if "candidate_labels" in kwargs:
preprocess_params["candidate_labels"] = self._args_parser._parse_labels(kwargs["candidate_labels"])
if "hypothesis_template" in kwargs:
preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"]
postprocess_params = {}
if "multi_label" in kwargs:
postprocess_params["multi_label"] = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__(
self,
sequences: str | list[str],
*args,
**kwargs,
):
"""
Classify the sequence(s) given as inputs. See the [`ZeroShotClassificationPipeline`] documentation for more
information.
Args:
sequences (`str` or `list[str]`):
The sequence(s) to classify, will be truncated if the model input is too large.
candidate_labels (`str` or `list[str]`):
The set of possible class labels to classify each sequence into. Can be a single label, a string of
comma-separated labels, or a list of labels.
hypothesis_template (`str`, *optional*, defaults to `"This example is {}."`):
The template used to turn each label into an NLI-style hypothesis. This template must include a {} or
similar syntax for the candidate label to be inserted into the template. For example, the default
template is `"This example is {}."` With the candidate label `"sports"`, this would be fed into the
model like `"<cls> sequence to classify <sep> This example is sports . <sep>"`. The default template
works well in many cases, but it may be worthwhile to experiment with different templates depending on
the task setting.
multi_label (`bool`, *optional*, defaults to `False`):
Whether or not multiple candidate labels can be true. If `False`, the scores are normalized such that
the sum of the label likelihoods for each sequence is 1. If `True`, the labels are considered
independent and probabilities are normalized for each candidate by doing a softmax of the entailment
score vs. the contradiction score.
Return:
A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys:
- **sequence** (`str`) -- The sequence for which this is the output.
- **labels** (`list[str]`) -- The labels sorted by order of likelihood.
- **scores** (`list[float]`) -- The probabilities for each of the labels.
"""
if len(args) == 0:
pass
elif len(args) == 1 and "candidate_labels" not in kwargs:
kwargs["candidate_labels"] = args[0]
else:
raise ValueError(f"Unable to understand extra arguments {args}")
return super().__call__(sequences, **kwargs)
def preprocess(self, inputs, candidate_labels=None, hypothesis_template="This example is {}."):
sequence_pairs, sequences = self._args_parser(inputs, candidate_labels, hypothesis_template)
for i, (candidate_label, sequence_pair) in enumerate(zip(candidate_labels, sequence_pairs)):
model_input = self._parse_and_tokenize([sequence_pair])
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(candidate_labels) - 1,
**model_input,
}
def _forward(self, inputs):
candidate_label = inputs["candidate_label"]
sequence = inputs["sequence"]
model_inputs = {k: inputs[k] for k in self.tokenizer.model_input_names}
# `XXXForSequenceClassification` models should not use `use_cache=True` even if it's supported
model_forward = self.model.forward
if "use_cache" in inspect.signature(model_forward).parameters:
model_inputs["use_cache"] = False
outputs = self.model(**model_inputs)
model_outputs = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def postprocess(self, model_outputs, multi_label=False):
candidate_labels = [outputs["candidate_label"] for outputs in model_outputs]
sequences = [outputs["sequence"] for outputs in model_outputs]
logits = np.concatenate([output["logits"].float().numpy() for output in model_outputs])
N = logits.shape[0]
n = len(candidate_labels)
num_sequences = N // n
reshaped_outputs = logits.reshape((num_sequences, n, -1))
if multi_label or len(candidate_labels) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
entailment_id = self.entailment_id
contradiction_id = -1 if entailment_id == 0 else 0
entail_contr_logits = reshaped_outputs[..., [contradiction_id, entailment_id]]
scores = np.exp(entail_contr_logits) / np.exp(entail_contr_logits).sum(-1, keepdims=True)
scores = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
entail_logits = reshaped_outputs[..., self.entailment_id]
scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True)
top_inds = list(reversed(scores[0].argsort()))
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| ZeroShotClassificationPipeline |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_workers.py | {
"start": 2595,
"end": 15315
} | class ____:
async def test_create_work_pool(self, session, client):
response = await client.post(
"/work_pools/",
json=dict(
name="Pool 1",
type="test",
),
)
assert response.status_code == status.HTTP_201_CREATED, response.text
result = parse_obj_as(WorkPool, response.json())
assert result.name == "Pool 1"
assert result.is_paused is False
assert result.concurrency_limit is None
assert result.base_job_template == {}
model = await models.workers.read_work_pool(
session=session, work_pool_id=result.id
)
assert model
assert model.name == "Pool 1"
assert model.storage_configuration == WorkPoolStorageConfiguration()
assert_status_events("Pool 1", ["prefect.work-pool.not-ready"])
async def test_create_work_pool_with_storage_configuration(self, client):
bundle_upload_step = {
"prefect_aws.experimental.bundles.upload": {
"requires": "prefect-aws",
"bucket": "MY_BUCKET_NAME",
"aws_credentials_block_name": "MY_CREDS_BLOCK_NAME",
},
}
bundle_execution_step = {
"prefect_aws.experimental.bundles.execute": {
"requires": "prefect-aws",
"bucket": "MY_BUCKET_NAME",
"aws_credentials_block_name": "MY_CREDS_BLOCK_NAME",
},
}
default_result_storage_block_id = uuid.uuid4()
data = schemas.actions.WorkPoolCreate(
name="olympic",
type="kubernetes",
storage_configuration=schemas.core.WorkPoolStorageConfiguration(
bundle_upload_step=bundle_upload_step,
bundle_execution_step=bundle_execution_step,
default_result_storage_block_id=default_result_storage_block_id,
),
).model_dump(mode="json")
response = await client.post(
"/work_pools/",
json=data,
)
assert response.status_code == 201
assert response.json()["storage_configuration"] == {
"bundle_upload_step": bundle_upload_step,
"bundle_execution_step": bundle_execution_step,
"default_result_storage_block_id": str(default_result_storage_block_id),
}
async def test_create_work_pool_with_invalid_storage_configuration_key(
self,
client,
):
response = await client.post(
"/work_pools/",
json={"storage_configuration": {"invalid_key": "invalid_value"}},
)
assert response.status_code == 422
async def test_create_work_pool_with_options(self, client):
response = await client.post(
"/work_pools/",
json=dict(name="Pool 1", type="test", is_paused=True, concurrency_limit=5),
)
assert response.status_code == status.HTTP_201_CREATED, response.text
result = parse_obj_as(WorkPool, response.json())
assert result.name == "Pool 1"
assert result.is_paused is True
assert result.concurrency_limit == 5
async def test_create_work_pool_with_template(self, client):
base_job_template = {
"job_configuration": {
"command": "{{ command }}",
},
"variables": {
"properties": {
"command": {
"type": "array",
"title": "Command",
"items": {"type": "string"},
"default": ["echo", "hello"],
}
},
"required": [],
},
}
response = await client.post(
"/work_pools/",
json=dict(name="Pool 1", type="test", base_job_template=base_job_template),
)
assert response.status_code == status.HTTP_201_CREATED, response.text
result = parse_obj_as(WorkPool, response.json())
assert result.base_job_template == base_job_template
async def test_create_duplicate_work_pool(self, client, work_pool):
response = await client.post(
"/work_pools/",
json=dict(name=work_pool.name, type="PROCESS"),
)
assert response.status_code == status.HTTP_409_CONFLICT, response.text
@pytest.mark.parametrize("name", ["", "hi/there", "hi%there"])
async def test_create_work_pool_with_invalid_name(self, client, name):
response = await client.post("/work_pools/", json=dict(name=name))
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, (
response.text
)
@pytest.mark.parametrize("name", ["''", " ", "' ' "])
async def test_create_work_pool_with_emptyish_name(self, client, name):
response = await client.post("/work_pools/", json=dict(name=name))
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, (
response.text
)
assert "name cannot be an empty string" in response.content.decode()
@pytest.mark.parametrize("type", ["PROCESS", "K8S", "AGENT"])
async def test_create_typed_work_pool(self, session, client, type):
response = await client.post(
"/work_pools/", json=dict(name="Pool 1", type=type)
)
assert response.status_code == status.HTTP_201_CREATED, response.text
result = parse_obj_as(WorkPool, response.json())
assert result.type == type
@pytest.mark.parametrize("name", RESERVED_POOL_NAMES)
async def test_create_reserved_pool_fails(self, session, client, name):
response = await client.post("/work_pools/", json=dict(name=name))
assert response.status_code == status.HTTP_403_FORBIDDEN, response.text
assert "reserved for internal use" in response.json()["detail"]
async def test_create_work_pool_template_validation_missing_keys(self, client):
response = await client.post(
"/work_pools/",
json=dict(name="Pool 1", base_job_template={"foo": "bar", "x": ["y"]}),
)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, (
response.text
)
assert (
"The `base_job_template` must contain both a `job_configuration` key and a"
" `variables` key." in response.json()["exception_detail"][0]["msg"]
)
async def test_create_work_pool_template_validation_missing_variables(self, client):
missing_variable_template = {
"job_configuration": {
"command": "{{ other_variable }}",
},
"variables": {
"properties": {
"command": {
"type": "array",
"title": "Command",
"items": {"type": "string"},
"default": ["echo", "hello"],
},
},
"required": [],
},
}
response = await client.post(
"/work_pools/",
json=dict(name="Pool 1", base_job_template=missing_variable_template),
)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, (
response.text
)
assert (
"The variables specified in the job configuration template must be "
"present as properties in the variables schema. "
"Your job configuration uses the following undeclared "
"variable(s): other_variable."
in response.json()["exception_detail"][0]["msg"]
)
async def test_create_work_pool_template_validation_missing_nested_variables(
self, client
):
missing_variable_template = {
"job_configuration": {
"config": {
"command": "{{ missing_variable }}",
}
},
"variables": {
"properties": {
"command": {
"type": "array",
"title": "Command",
"items": {"type": "string"},
"default": ["echo", "hello"],
},
},
"required": [],
},
}
response = await client.post(
"/work_pools/",
json=dict(name="Pool 1", base_job_template=missing_variable_template),
)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, (
response.text
)
assert (
"The variables specified in the job configuration template must be "
"present as properties in the variables schema. "
"Your job configuration uses the following undeclared "
"variable(s): missing_variable."
in response.json()["exception_detail"][0]["msg"]
)
async def test_create_work_pool_template_validation_missing_block_document(
self,
client,
):
missing_block_doc_ref_template = {
"job_configuration": {
"block": "{{ block_string }}",
},
"variables": {
"properties": {
"block_string": {
"type": "string",
"title": "Block String",
"default": {"$ref": {"block_document_id": "non-existing"}},
},
},
"required": ["block_string"],
},
}
response = await client.post(
"/work_pools/",
json=dict(name="Pool 1", base_job_template=missing_block_doc_ref_template),
)
assert response.status_code == status.HTTP_404_NOT_FOUND, response.text
assert "Block not found" in response.json()["detail"]
async def test_create_work_pool_template_validation_rejects_block_document_reference_incorrect_type(
self,
client,
block_document,
):
missing_block_doc_ref_template = {
"job_configuration": {
"block": "{{ block_string }}",
},
"variables": {
"properties": {
"block_string": {
"type": "string",
"title": "Block String",
"default": {
"$ref": {"block_document_id": str(block_document.id)}
},
},
},
"required": ["block_string"],
},
}
response = await client.post(
"/work_pools/",
json=dict(name="Pool 1", base_job_template=missing_block_doc_ref_template),
)
assert (
"Failure reason: {'foo': 'bar'} is not of type 'string'"
in response.json()["detail"]
)
assert response.status_code == 422, response.text
async def test_create_work_pool_template_validation_accepts_valid_block_document_reference(
self,
client,
block_document,
):
missing_block_doc_ref_template = {
"job_configuration": {
"block": "{{ block_object }}",
},
"variables": {
"properties": {
"block_object": {
"type": "object",
"title": "Block Object",
"default": {
"$ref": {"block_document_id": str(block_document.id)}
},
},
},
"required": ["block_object"],
},
}
response = await client.post(
"/work_pools/",
json=dict(name="Pool 1", base_job_template=missing_block_doc_ref_template),
)
assert response.status_code == 201, response.text
async def test_create_work_pool_with_3_3_7_client_version_does_not_include_default_result_storage_block_id(
self,
client: AsyncClient,
):
response = await client.post(
"/work_pools/",
headers={"User-Agent": "prefect/3.3.7 (API 0.8.4)"},
json=schemas.actions.WorkPoolCreate(
name="test",
type="kubernetes",
).model_dump(mode="json"),
)
assert response.status_code == 201
assert response.json()["storage_configuration"] == {
"bundle_upload_step": None,
"bundle_execution_step": None,
}
| TestCreateWorkPool |
python | kamyu104__LeetCode-Solutions | Python/largest-divisible-subset.py | {
"start": 31,
"end": 787
} | class ____(object):
def largestDivisibleSubset(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
if not nums:
return []
nums.sort()
dp = [1] * len(nums)
prev = [-1] * len(nums)
largest_idx = 0
for i in xrange(len(nums)):
for j in xrange(i):
if nums[i] % nums[j] == 0:
if dp[i] < dp[j] + 1:
dp[i] = dp[j] + 1
prev[i] = j
if dp[largest_idx] < dp[i]:
largest_idx = i
result = []
i = largest_idx
while i != -1:
result.append(nums[i])
i = prev[i]
return result[::-1]
| Solution |
python | scikit-learn__scikit-learn | sklearn/linear_model/_ransac.py | {
"start": 1953,
"end": 25782
} | class ____(
MetaEstimatorMixin,
RegressorMixin,
MultiOutputMixin,
BaseEstimator,
):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set.
Read more in the :ref:`User Guide <ransac_regression>`.
Parameters
----------
estimator : object, default=None
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
* `predict(X)`: Returns predicted values using the linear model,
which is used to compute residual error using loss function.
If `estimator` is None, then
:class:`~sklearn.linear_model.LinearRegression` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), default=None
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0])` for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `estimator`. By default a
:class:`~sklearn.linear_model.LinearRegression` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``. This parameter is highly
dependent upon the model, so if a `estimator` other than
:class:`~sklearn.linear_model.LinearRegression` is used, the user must
provide a value.
residual_threshold : float, default=None
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`. Points whose residuals are
strictly equal to the threshold are considered as inliers.
is_data_valid : callable, default=None
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, default=None
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, default=100
Maximum number of iterations for random sample selection.
max_skips : int, default=np.inf
Maximum number of iterations that can be skipped due to finding zero
inliers or invalid data defined by ``is_data_valid`` or invalid models
defined by ``is_model_valid``.
.. versionadded:: 0.19
stop_n_inliers : int, default=np.inf
Stop iteration if at least this number of inliers are found.
stop_score : float, default=np.inf
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], default=0.99
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
loss : str, callable, default='absolute_error'
String inputs, 'absolute_error' and 'squared_error' are supported which
find the absolute error and squared error per sample respectively.
If ``loss`` is a callable, then it should be a function that takes
two arrays as inputs, the true and predicted value and returns a 1-D
array with the i-th value of the array corresponding to the loss
on ``X[i]``.
If the loss on a sample is greater than the ``residual_threshold``,
then this sample is classified as an outlier.
.. versionadded:: 0.18
random_state : int, RandomState instance, default=None
The generator used to initialize the centers.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
estimator_ : object
Final model fitted on the inliers predicted by the "best" model found
during RANSAC sampling (copy of the `estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
n_skips_no_inliers_ : int
Number of iterations skipped due to finding zero inliers.
.. versionadded:: 0.19
n_skips_invalid_data_ : int
Number of iterations skipped due to invalid data defined by
``is_data_valid``.
.. versionadded:: 0.19
n_skips_invalid_model_ : int
Number of iterations skipped due to an invalid model defined by
``is_model_valid``.
.. versionadded:: 0.19
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
HuberRegressor : Linear regression model that is robust to outliers.
TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model.
SGDRegressor : Fitted by minimizing a regularized empirical loss with SGD.
References
----------
.. [1] https://en.wikipedia.org/wiki/RANSAC
.. [2] https://www.sri.com/wp-content/uploads/2021/12/ransac-publication.pdf
.. [3] https://bmva-archive.org.uk/bmvc/2009/Papers/Paper355/Paper355.pdf
Examples
--------
>>> from sklearn.linear_model import RANSACRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(
... n_samples=200, n_features=2, noise=4.0, random_state=0)
>>> reg = RANSACRegressor(random_state=0).fit(X, y)
>>> reg.score(X, y)
0.9885
>>> reg.predict(X[:1,])
array([-31.9417])
For a more detailed example, see
:ref:`sphx_glr_auto_examples_linear_model_plot_ransac.py`
"""
_parameter_constraints: dict = {
"estimator": [HasMethods(["fit", "score", "predict"]), None],
"min_samples": [
Interval(Integral, 1, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="both"),
None,
],
"residual_threshold": [Interval(Real, 0, None, closed="left"), None],
"is_data_valid": [callable, None],
"is_model_valid": [callable, None],
"max_trials": [
Interval(Integral, 0, None, closed="left"),
Options(Real, {np.inf}),
],
"max_skips": [
Interval(Integral, 0, None, closed="left"),
Options(Real, {np.inf}),
],
"stop_n_inliers": [
Interval(Integral, 0, None, closed="left"),
Options(Real, {np.inf}),
],
"stop_score": [Interval(Real, None, None, closed="both")],
"stop_probability": [Interval(Real, 0, 1, closed="both")],
"loss": [StrOptions({"absolute_error", "squared_error"}), callable],
"random_state": ["random_state"],
}
def __init__(
self,
estimator=None,
*,
min_samples=None,
residual_threshold=None,
is_data_valid=None,
is_model_valid=None,
max_trials=100,
max_skips=np.inf,
stop_n_inliers=np.inf,
stop_score=np.inf,
stop_probability=0.99,
loss="absolute_error",
random_state=None,
):
self.estimator = estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.max_skips = max_skips
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.random_state = random_state
self.loss = loss
@_fit_context(
# RansacRegressor.estimator is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y, sample_weight=None, **fit_params):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample
raises error if sample_weight is passed and estimator
fit method does not support it.
.. versionadded:: 0.18
**fit_params : dict
Parameters routed to the `fit` method of the sub-estimator via the
metadata routing API.
.. versionadded:: 1.5
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
self : object
Fitted `RANSACRegressor` estimator.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
# Need to validate separately here. We can't pass multi_output=True
# because that would allow y to be csr. Delay expensive finiteness
# check to the estimator's own input validation.
_raise_for_params(fit_params, self, "fit")
check_X_params = dict(accept_sparse="csr", ensure_all_finite=False)
check_y_params = dict(ensure_2d=False)
X, y = validate_data(
self, X, y, validate_separately=(check_X_params, check_y_params)
)
check_consistent_length(X, y)
if self.estimator is not None:
estimator = clone(self.estimator)
else:
estimator = LinearRegression()
if self.min_samples is None:
if not isinstance(estimator, LinearRegression):
raise ValueError(
"`min_samples` needs to be explicitly set when estimator "
"is not a LinearRegression."
)
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
min_samples = self.min_samples
if min_samples > X.shape[0]:
raise ValueError(
"`min_samples` may not be larger than number "
"of samples: n_samples = %d." % (X.shape[0])
)
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.loss == "absolute_error":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)
else:
loss_function = lambda y_true, y_pred: np.sum(
np.abs(y_true - y_pred), axis=1
)
elif self.loss == "squared_error":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
else:
loss_function = lambda y_true, y_pred: np.sum(
(y_true - y_pred) ** 2, axis=1
)
elif callable(self.loss):
loss_function = self.loss
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
estimator.set_params(random_state=random_state)
except ValueError:
pass
estimator_fit_has_sample_weight = has_fit_parameter(estimator, "sample_weight")
estimator_name = type(estimator).__name__
if sample_weight is not None and not estimator_fit_has_sample_weight:
raise ValueError(
"%s does not support sample_weight. Sample"
" weights are only used for the calibration"
" itself." % estimator_name
)
if sample_weight is not None:
fit_params["sample_weight"] = sample_weight
if _routing_enabled():
routed_params = process_routing(self, "fit", **fit_params)
else:
routed_params = Bunch()
routed_params.estimator = Bunch(fit={}, predict={}, score={})
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
routed_params.estimator.fit = {"sample_weight": sample_weight}
n_inliers_best = 1
score_best = -np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
inlier_best_idxs_subset = None
self.n_skips_no_inliers_ = 0
self.n_skips_invalid_data_ = 0
self.n_skips_invalid_model_ = 0
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
self.n_trials_ = 0
max_trials = self.max_trials
while self.n_trials_ < max_trials:
self.n_trials_ += 1
if (
self.n_skips_no_inliers_
+ self.n_skips_invalid_data_
+ self.n_skips_invalid_model_
) > self.max_skips:
break
# choose random sample set
subset_idxs = sample_without_replacement(
n_samples, min_samples, random_state=random_state
)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if self.is_data_valid is not None and not self.is_data_valid(
X_subset, y_subset
):
self.n_skips_invalid_data_ += 1
continue
# cut `fit_params` down to `subset_idxs`
fit_params_subset = _check_method_params(
X, params=routed_params.estimator.fit, indices=subset_idxs
)
# fit model for current random sample set
estimator.fit(X_subset, y_subset, **fit_params_subset)
# check if estimated model is valid
if self.is_model_valid is not None and not self.is_model_valid(
estimator, X_subset, y_subset
):
self.n_skips_invalid_model_ += 1
continue
# residuals of all data for current random sample model
y_pred = estimator.predict(X)
residuals_subset = loss_function(y, y_pred)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset <= residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
self.n_skips_no_inliers_ += 1
continue
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# cut `fit_params` down to `inlier_idxs_subset`
score_params_inlier_subset = _check_method_params(
X, params=routed_params.estimator.score, indices=inlier_idxs_subset
)
# score of inlier data set
score_subset = estimator.score(
X_inlier_subset,
y_inlier_subset,
**score_params_inlier_subset,
)
# same number of inliers but worse score -> skip current random
# sample
if n_inliers_subset == n_inliers_best and score_subset < score_best:
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
inlier_best_idxs_subset = inlier_idxs_subset
max_trials = min(
max_trials,
_dynamic_max_trials(
n_inliers_best, n_samples, min_samples, self.stop_probability
),
)
# break if sufficient number of inliers or score is reached
if n_inliers_best >= self.stop_n_inliers or score_best >= self.stop_score:
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
if (
self.n_skips_no_inliers_
+ self.n_skips_invalid_data_
+ self.n_skips_invalid_model_
) > self.max_skips:
raise ValueError(
"RANSAC skipped more iterations than `max_skips` without"
" finding a valid consensus set. Iterations were skipped"
" because each randomly chosen sub-sample failed the"
" passing criteria. See estimator attributes for"
" diagnostics (n_skips*)."
)
else:
raise ValueError(
"RANSAC could not find a valid consensus set. All"
" `max_trials` iterations were skipped because each"
" randomly chosen sub-sample failed the passing criteria."
" See estimator attributes for diagnostics (n_skips*)."
)
else:
if (
self.n_skips_no_inliers_
+ self.n_skips_invalid_data_
+ self.n_skips_invalid_model_
) > self.max_skips:
warnings.warn(
(
"RANSAC found a valid consensus set but exited"
" early due to skipping more iterations than"
" `max_skips`. See estimator attributes for"
" diagnostics (n_skips*)."
),
ConvergenceWarning,
)
# estimate final model using all inliers
fit_params_best_idxs_subset = _check_method_params(
X, params=routed_params.estimator.fit, indices=inlier_best_idxs_subset
)
estimator.fit(X_inlier_best, y_inlier_best, **fit_params_best_idxs_subset)
self.estimator_ = estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X, **params):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : {array-like or sparse matrix} of shape (n_samples, n_features)
Input data.
**params : dict
Parameters routed to the `predict` method of the sub-estimator via
the metadata routing API.
.. versionadded:: 1.5
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self)
X = validate_data(
self,
X,
ensure_all_finite=False,
accept_sparse=True,
reset=False,
)
_raise_for_params(params, self, "predict")
if _routing_enabled():
predict_params = process_routing(self, "predict", **params).estimator[
"predict"
]
else:
predict_params = {}
return self.estimator_.predict(X, **predict_params)
def score(self, X, y, **params):
"""Return the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : (array-like or sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
**params : dict
Parameters routed to the `score` method of the sub-estimator via
the metadata routing API.
.. versionadded:: 1.5
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self)
X = validate_data(
self,
X,
ensure_all_finite=False,
accept_sparse=True,
reset=False,
)
_raise_for_params(params, self, "score")
if _routing_enabled():
score_params = process_routing(self, "score", **params).estimator["score"]
else:
score_params = {}
return self.estimator_.score(X, y, **score_params)
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self).add(
estimator=self.estimator,
method_mapping=MethodMapping()
.add(caller="fit", callee="fit")
.add(caller="fit", callee="score")
.add(caller="score", callee="score")
.add(caller="predict", callee="predict"),
)
return router
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
if self.estimator is None:
tags.input_tags.sparse = True # default estimator is LinearRegression
else:
tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse
return tags
| RANSACRegressor |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassTransform2.py | {
"start": 623,
"end": 821
} | class ____(metaclass=ModelMeta):
def __init_subclass__(
cls,
*,
frozen: bool = False,
kw_only: bool = True,
order: bool = True,
) -> None: ...
| ModelBase |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/control_flow/functional_ops_test.py | {
"start": 2502,
"end": 39327
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testFoldl_Simple(self):
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems)
self.assertAllEqual(208, self.evaluate(r))
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems,
initializer=10)
self.assertAllEqual(880, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldl_SingleInputMultiOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array([1, -1.0])
r = functional_ops.foldl(lambda a, x: a + x, elems, initializer)
r_value = self.evaluate(r)
self.assertAllEqual(22, r_value[0])
self.assertAllEqual(20, r_value[1])
@test_util.run_in_graph_and_eager_modes
def testFoldl_MultiInputSingleOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
r = functional_ops.foldl(lambda a, x: a + x[0] + x[1], (elems, -elems),
initializer)
self.assertAllEqual(1, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldl_MultiInputDifferentDimsSingleOutput(self):
elems = np.array([[1.0, 1.0, 1.0], [2.0, 3.0, 4.0]])
other_elems = np.array([-1.0, 1.0])
initializer = np.array([0.0, 0.0, 0.0])
r = functional_ops.foldl(lambda a, x: a + x[0] * x[1],
(elems, other_elems), initializer)
self.assertAllEqual([1.0, 2.0, 3.0], self.evaluate(r))
@test_util.run_deprecated_v1
def testFoldl_Scoped(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldl(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(208, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.foldl(simple_scoped_fn, elems, initializer=10)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(880, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldr_Simple(self):
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems)
self.assertAllEqual(450, self.evaluate(r))
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems,
initializer=10)
self.assertAllEqual(1282, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldr_SingleInputMultiOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array([1, -1.0])
r = functional_ops.foldr(lambda a, x: a + x, elems, initializer)
r_value = self.evaluate(r)
self.assertAllEqual(22, r_value[0])
self.assertAllEqual(20, r_value[1])
@test_util.run_in_graph_and_eager_modes
def testFoldr_MultiInputSingleOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
r = functional_ops.foldr(lambda a, x: a + x[0] + x[1], (elems, -elems),
initializer)
self.assertAllEqual(1, self.evaluate(r))
@test_util.run_deprecated_v1
def testFoldr_Scoped(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldr(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(450, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.foldr(simple_scoped_fn, elems, initializer=10)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(1282, self.evaluate(r))
# pylint: disable=unnecessary-lambda
@test_util.run_deprecated_v1
def testFold_Grad(self):
with self.cached_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(720.0, self.evaluate(r))
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(720.0, self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes
def testScan_Simple(self):
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
# pylint: disable=unnecessary-lambda
r = functional_ops.scan(lambda a, x: math_ops.multiply(a, x), elems)
self.assertAllEqual([1., 2., 6., 24., 120., 720.], self.evaluate(r))
r = functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
self.assertAllEqual([2., 4., 12., 48., 240., 1440.], self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes
def testScan_Reverse(self):
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
# pylint: disable=unnecessary-lambda
r = functional_ops.scan(lambda a, x: math_ops.multiply(a, x), elems,
reverse=True)
self.assertAllEqual([720., 720., 360., 120., 30., 6.], self.evaluate(r))
r = functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v,
reverse=True)
self.assertAllEqual([1440., 1440., 720., 240., 60., 12.],
self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes
def testScan_SingleInputMultiOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = (np.array(1.0), np.array(-1.0))
r = functional_ops.scan(lambda a, x: (a[0] * x, -a[1] * x), elems,
initializer)
r_value = self.evaluate(r)
self.assertAllEqual([1.0, 2.0, 6.0, 24.0, 120.0, 720.0], r_value[0])
self.assertAllEqual([1.0, -2.0, 6.0, -24.0, 120.0, -720.0], r_value[1])
@test_util.run_in_graph_and_eager_modes
def testScan_MultiInputSingleOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
# Multiply a * 1 each time
r = functional_ops.scan(lambda a, x: a * (x[0] + x[1]),
(elems + 1, -elems), initializer)
self.assertAllEqual([1.0, 1.0, 1.0, 1.0, 1.0, 1.0], self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testScan_MultiInputSameTypeOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
r = functional_ops.scan(lambda a, x: (a[0] + x[0], a[1] + x[1]),
(elems, -elems))
r_value = self.evaluate(r)
self.assertAllEqual(np.cumsum(elems), r_value[0])
self.assertAllEqual(np.cumsum(-elems), r_value[1])
@test_util.run_in_graph_and_eager_modes
def testScan_MultiOutputMismatchedInitializer(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
# Multiply a * 1 each time
with self.assertRaisesRegex(
ValueError, "two structures don't have the same nested structure"):
functional_ops.scan(lambda a, x: (a, -a), elems, initializer)
@test_util.run_deprecated_v1
def testScan_Scoped(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.scan(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
results = np.array([1, 6, 18, 44, 98, 208])
self.assertAllEqual(results, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.scan(simple_scoped_fn, elems, initializer=2)
self.assertEqual(len(variables.trainable_variables()), 1)
results = np.array([6, 16, 38, 84, 178, 368])
self.assertAllEqual(results, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testScanFoldl_Nested(self):
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0], name="data")
inner_elems = constant_op.constant([0.5, 0.5], name="data")
def r_inner(a, x):
return functional_ops.foldl(
lambda b, y: b * y * x, inner_elems, initializer=a)
r = functional_ops.scan(r_inner, elems)
# t == 0 (returns 1)
# t == 1, a == 1, x == 2 (returns 1)
# t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1
# t_1 == 1, b == 1, y == 0.5, returns b * y * x = 1
# t == 2, a == 1, x == 3 (returns 1.5*1.5 == 2.25)
# t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1.5
# t_1 == 1, b == 1.5, y == 0.5, returns b * y * x = 1.5*1.5
# t == 3, a == 2.25, x == 4 (returns 9)
# t_0 == 0, b == a == 2.25, y == 0.5, returns b * y * x = 4.5
# t_1 == 1, b == 4.5, y == 0.5, returns b * y * x = 9
self.assertAllClose([1., 1., 2.25, 9.], self.evaluate(r))
@test_util.run_deprecated_v1
def testScan_Control(self):
with self.cached_session() as sess:
s = array_ops.placeholder(dtypes.float32, shape=[None])
b = array_ops.placeholder(dtypes.bool)
with ops.control_dependencies([b]):
c = functional_ops.scan(lambda a, x: x * a, s)
self.assertAllClose(
np.array([1.0, 3.0, 9.0]), sess.run(c, {s: [1, 3, 3],
b: True}))
@test_util.run_deprecated_v1
def testScan_Grad(self):
with self.cached_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
# pylint: disable=unnecessary-lambda
r = functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
# pylint: enable=unnecessary-lambda
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(873.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testScanGradientWithPartStopGradient(self):
a = variables.Variable(0.0, name="a")
b = variables.Variable(0.0, name="b")
elems = array_ops.zeros(5)
l0, l1 = functional_ops.scan(
lambda elem_, input_: (a, b), elems, initializer=(0., 0.))
loss = l0 + array_ops.stop_gradient(l1)
grad = gradients_impl.gradients(ys=[loss], xs=[a, b])
with self.test_session():
self.evaluate(variables.global_variables_initializer())
self.evaluate(grad)
@test_util.run_in_graph_and_eager_modes
def testFoldShape(self):
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
def fn(_, current_input):
return current_input
initializer = constant_op.constant([0, 0, 0])
y = functional_ops.foldl(fn, x, initializer=initializer)
self.assertAllEqual(y.get_shape(), self.evaluate(y).shape)
@test_util.run_in_graph_and_eager_modes
def testScanShape(self):
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
def fn(_, current_input):
return current_input
initializer = constant_op.constant([0, 0, 0])
y = functional_ops.scan(fn, x, initializer=initializer)
self.assertAllEqual(y.get_shape(), self.evaluate(y).shape)
# TODO(akshayka): this test fails in eager: the iterable is of length 0 so
# so the body of the while loop never executes
@test_util.run_deprecated_v1
def testScanEmptyTensor(self):
with self.cached_session():
x = functional_ops.scan(
lambda x, _: x, math_ops.range(0), initializer=array_ops.ones([2, 4]))
self.assertAllEqual([0, 2, 4], x.get_shape())
self.assertAllEqual(x.get_shape(), self.evaluate(x).shape)
@test_util.run_deprecated_v1
def testScanUnknownShape(self):
x = array_ops.placeholder(dtypes.float32)
initializer = array_ops.placeholder(dtypes.float32)
def fn(_, current_input):
return current_input
y = functional_ops.scan(fn, x, initializer=initializer)
self.assertIs(None, y.get_shape().dims)
@test_util.run_deprecated_v1
def testScanVaryingShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 2])
x_t = array_ops.transpose(x)
# scan over dimension 0 (with shape None)
result = functional_ops.scan(lambda a, x: a + x, x)
# scanned over transposed dimension 0 (with shape 2)
result_t = functional_ops.scan(lambda a, x: a + x, x_t, infer_shape=False)
# ensure gradients can be calculated
result_grad = gradients_impl.gradients(result, [x])[0]
result_t_grad = gradients_impl.gradients(result_t, [x_t])[0]
# smoke test to ensure they all evaluate
sess.run([result, result_t, result_grad, result_t_grad],
feed_dict={x: [[1.0, 2.0]]})
@test_util.run_deprecated_v1
def testRemoteFunction(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2
worker, _ = test_util.create_local_cluster(
1, 1, worker_config=worker_config)
@function.Defun(dtypes.int32, dtypes.int32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:ps/task:0"):
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
with ops.device("/job:worker/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.int32],
f=_remote_fn,
target="/job:worker/replica:0/task:0/cpu:1")
with session.Session(worker[0].target) as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, [6])
@test_util.run_deprecated_v1
def testRemoteFunctionDirectSession(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2
@function.Defun(dtypes.int32, dtypes.int32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.int32],
f=_remote_fn,
target="/job:localhost/replica:0/task:0/cpu:1")
with self.test_session(config=worker_config) as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, [6])
@test_util.run_deprecated_v1
def testRemoteFunctionSameDeviceDirectSession(self):
@function.Defun(dtypes.int32, dtypes.int32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/cpu:0"):
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
with ops.device("/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b], Tout=[dtypes.int32], f=_remote_fn, target="/cpu:0")
with self.cached_session() as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, [6])
@test_util.run_deprecated_v1
def testRemoteFunctionCPUGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
@function.Defun(dtypes.float32, dtypes.float32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
a = variables.Variable(2, dtype=dtypes.float32)
b = variables.Variable(3, dtype=dtypes.float32)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.float32],
f=_remote_fn,
target="/job:localhost/replica:0/task:0/device:GPU:0")[0] + 3.0
with self.cached_session() as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, 9.0)
@test_util.run_deprecated_v1
def testRemoteFunctionGPUCPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
@function.Defun(dtypes.float32, dtypes.float32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
a = variables.Variable(2, dtype=dtypes.float32)
b = variables.Variable(3, dtype=dtypes.float32)
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.float32],
f=_remote_fn,
target="/job:localhost/replica:0/task:0/cpu:0")[0] + 3.0
with self.cached_session() as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, 9.0)
@test_util.run_deprecated_v1
def testRemoteFunctionGPUCPUStrings(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
@function.Defun(dtypes.string)
def _remote_fn(inp):
return array_ops.identity(inp)
a = array_ops.constant("a")
with ops.device("/gpu:0"):
remote_op = functional_ops.remote_call(
args=[a], Tout=[dtypes.string], f=_remote_fn, target="/cpu:0")
with self.cached_session() as sess:
ret = self.evaluate(remote_op)
self.assertAllEqual(ret, [b"a"])
@test_util.run_deprecated_v1
def testRemoteFunctionCrossProcess(self):
workers, _ = test_util.create_local_cluster(2, 1)
@function.Defun(dtypes.float32, dtypes.float32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:ps/task:0"):
a = variables.Variable(2, dtype=dtypes.float32)
b = variables.Variable(3, dtype=dtypes.float32)
with ops.device("/job:worker/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.float32],
f=_remote_fn,
target="/job:worker/replica:0/task:1/cpu:0")[0] + 3.0
with session.Session(workers[0].target) as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, 9)
@test_util.run_v2_only
def testRemoteFunctionCancellation(self):
context._reset_context()
logical_devices = []
logical_devices.append(context.LogicalDeviceConfiguration())
logical_devices.append(context.LogicalDeviceConfiguration())
framework_config.set_logical_device_configuration(
framework_config.list_physical_devices("CPU")[0], logical_devices)
@function.Defun(dtypes.float32)
def _remote_fn(v):
# We run two collectives here to make sure we cancel in the middle of the
# RemoteCall. The second one should never finish.
anchor = collective_ops.all_reduce_v2(
v, group_size=2, group_key=1, instance_key=1)
with ops.control_dependencies([anchor]):
return collective_ops.all_reduce_v2(
v, group_size=2, group_key=1, instance_key=2)
@eager_def_function.function
def run():
with ops.device("/cpu:0"):
return functional_ops.remote_call(
args=[constant_op.constant([1.])] + _remote_fn.captured_inputs,
Tout=[dtypes.float32],
f=_remote_fn,
target="/cpu:1")[0]
async_executor = executor.new_executor(enable_async=True)
cancel_mgr = cancellation.CancellationManager()
with context.executor_scope(async_executor):
# This should never finish.
cancel_mgr.get_cancelable_function(run.get_concrete_function())()
with ops.device("/cpu:0"):
collective_ops.all_reduce_v2([1.],
group_size=2,
group_key=1,
instance_key=1)
cancel_mgr.start_cancel()
with self.assertRaises(errors.CancelledError):
async_executor.wait()
@test_util.run_deprecated_v1
def testIf(self):
@function.Defun(dtypes.float32)
def Twice(x):
return x * 2
@function.Defun(dtypes.float32)
def Thrice(x):
return x * 3 + 1
with self.test_session(use_gpu=False) as sess:
x = array_ops.placeholder(dtypes.float32)
ret = functional_ops.If(math_ops.greater(x, 0), [x], Twice, Thrice)[0]
self.assertAllEqual(sess.run(ret, feed_dict={x: 9.}), 18.)
self.assertAllEqual(sess.run(ret, feed_dict={x: -8.}), -23.)
self.assertAllEqual(sess.run(ret, feed_dict={x: 0.}), 1.)
def testWhile(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
def Run(sess, n):
return sess.run(functional_ops.While([n, 0.], Cond, Body))[1]
with self.session(graph=g, use_gpu=use_gpu) as sess:
self.assertAllEqual(Run(sess, 20.), 210.)
self.assertAllEqual(Run(sess, 100.), 5050.)
def testToBool(self):
# For 0D tensors, the truthiness depends on whether the value is "zero".
self.assertAllEqual(gen_functional_ops.to_bool(0), False)
self.assertAllEqual(gen_functional_ops.to_bool(1), True)
self.assertAllEqual(gen_functional_ops.to_bool(42), True)
self.assertAllEqual(gen_functional_ops.to_bool(0.), False)
self.assertAllEqual(gen_functional_ops.to_bool(1.), True)
self.assertAllEqual(gen_functional_ops.to_bool(42.), True)
self.assertAllEqual(gen_functional_ops.to_bool(False), False)
self.assertAllEqual(gen_functional_ops.to_bool(True), True)
# For strings, "zero" is the empty string.
self.assertAllEqual(gen_functional_ops.to_bool(""), False)
self.assertAllEqual(gen_functional_ops.to_bool("a"), True)
# For >0D tensors, the truthiness only depends on whether there are
# elements or not.
self.assertAllEqual(gen_functional_ops.to_bool([]), False)
self.assertAllEqual(gen_functional_ops.to_bool([[]]), False)
self.assertAllEqual(gen_functional_ops.to_bool([[[]]]), False)
self.assertAllEqual(gen_functional_ops.to_bool([0]), True)
self.assertAllEqual(gen_functional_ops.to_bool([1]), True)
self.assertAllEqual(gen_functional_ops.to_bool([[0]]), True)
self.assertAllEqual(gen_functional_ops.to_bool([False]), True)
self.assertAllEqual(gen_functional_ops.to_bool([True]), True)
# Like above, but using int32 in order to ensure that int32 tensors don't get
# copied to the GPU during the application of the while.
def testWhileInt32(self):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.int32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.int32] * 2)
def Body(n, x):
return n - 1, x + n
def Run(sess, n):
return sess.run(functional_ops.While([n, 0], Cond, Body))[1]
with self.session(graph=g, use_gpu=True) as sess:
self.assertAllEqual(Run(sess, 20), 210)
self.assertAllEqual(Run(sess, 100), 5050)
@test_util.run_deprecated_v1
def testWhileLowering(self):
def Run(n, fetch_by_name):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
# outputs: [0, n*(n+1)/2]
outputs = functional_ops.While([n, 0.], Cond, Body, name="my_while")
# `outputs` is the list of output tensors of the While op. We
# arbitrarily choose the 0th tensor to get the While op and set the
# lowering attribute on it.
outputs[0].op._set_attr("_lower_using_switch_merge",
attr_value_pb2.AttrValue(b=True))
if not fetch_by_name:
fetch = outputs[1]
else:
fetch = "my_while:1"
with self.session(graph=g, use_gpu=use_gpu) as sess:
return self.evaluate(fetch)
self.assertAllEqual(Run(20., False), 210.)
self.assertAllEqual(Run(20., True), 210.)
self.assertAllEqual(Run(100., False), 5050.)
self.assertAllEqual(Run(100., True), 5050.)
@test_util.run_v1_only("b/120545219")
@test_util.disable_xla("b/123337890") # Different error message
def testWhileError(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def CondReturnsTooManyArgs(n, x):
return n > 0, x
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
@function.Defun(*[dtypes.float32] * 2)
def BodyReturnsTooManyArgs(n, x):
return n - 1, x + n, x
with self.session(graph=g, use_gpu=use_gpu):
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Expected a single scalar.*got 2 tensors."):
functional_ops.While([5., 0.], CondReturnsTooManyArgs,
Body)[0].eval()
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"While loop body returned 3 arguments. Expected: 2"):
functional_ops.While([5., 0.], Cond,
BodyReturnsTooManyArgs)[0].eval()
def testWhileInMultipleSubgraphs(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, x): # pylint: disable=unused-argument
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
with self.session(graph=g, use_gpu=use_gpu) as sess:
n = array_ops.placeholder(dtypes.float32)
_, result = functional_ops.While([n, 0.], Cond, Body)
c = constant_op.constant(37.)
self.assertAllEqual(210., sess.run(result, feed_dict={n: 20.}))
self.assertAllEqual(5050., sess.run(result, feed_dict={n: 100.}))
# Test that the result is the same when we run a different subgraph.
self.assertAllEqual(5050.,
sess.run([result, c], feed_dict={n: 100.})[0])
# pylint: disable=cell-var-from-loop
def testWhileCapturedInputs(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
v = variables.Variable(1.0)
def TestCond(n, *args):
del args
return n < 10
@function.Defun(*[dtypes.float32] * 2)
def TestUnary(n, x):
return math_ops.add(n, 1), x + n + v
@function.Defun(*[dtypes.float32] * 3)
def TestBinary(n, x, x2):
return math_ops.add(n, 1), x + n + v, x2 + v
with self.session(graph=g, use_gpu=use_gpu) as sess:
result_unary = functional_ops.While(
[1.0, 0.],
function.Defun(*[dtypes.float32] * 2)(TestCond), TestUnary)
result_binary = functional_ops.While(
[1.0, 0., 0.],
function.Defun(*[dtypes.float32] * 3)(TestCond), TestBinary)
self.evaluate(variables.global_variables_initializer())
assert len(result_unary) == 2
self.assertEqual([10.0, 54.0], self.evaluate(result_unary))
assert len(result_binary) == 3
self.assertEqual([10.0, 54.0, 9.0], self.evaluate(result_binary))
def TestCondCapture(n, *args):
del args
return math_ops.cast(n, dtypes.float32) + v < 10
with self.assertRaises(ValueError):
_ = functional_ops.While(
[1],
function.Defun(dtypes.int32)(TestCondCapture),
function.Defun(dtypes.int32, dtypes.float32)(TestUnary))
# pylint: enable=cell-var-from-loop
def _tfSum(self, use_gpu, rewrite_with_while):
with ops.Graph().as_default() as g:
with self.session(graph=g, use_gpu=use_gpu) as sess:
@function.Defun(dtypes.int32, dtypes.float32)
def Body(n, x):
return x + math_ops.cast(n, dtypes.float32)
xs = [
# 1 + 2 + ... + 20
functional_ops.For(
1, 21, 1, [0.], Body, rewrite_with_while=rewrite_with_while)[0],
# 100 + 99 + ... + 1
functional_ops.For(
100, 0, -1, [0.], Body, rewrite_with_while=rewrite_with_while)
[0],
]
xvals = self.evaluate(xs)
self.assertAllEqual(210, xvals[0])
self.assertAllEqual(5050, xvals[1])
def testFor(self):
for use_gpu in (True, False):
self._tfSum(use_gpu, False)
def testForWithWhile(self):
for use_gpu in (True, False):
self._tfSum(use_gpu, True)
def testForWithWhileNaming(self):
g = ops.Graph()
with g.as_default():
@function.Defun(dtypes.int32, dtypes.float32, func_name="TestBody")
def TestBody(n, x):
return x + math_ops.cast(n, dtypes.float32)
_ = functional_ops.For(
1, 21, 1, [0.], TestBody, rewrite_with_while=True)[0]
names = []
for func in g.as_graph_def().library.function:
names.append(func.signature.name)
self.assertTrue("TestBody" in names)
self.assertTrue("TestBody_Cond" in names)
self.assertTrue("TestBody_Body" in names)
@test_util.run_deprecated_v1
def testForCapturedInputs(self):
v = variables.Variable(1.0)
@function.Defun(dtypes.int32)
def TestNullary(n):
v + math_ops.cast(n, dtypes.float32) # pylint: disable=expression-not-assigned
@function.Defun(dtypes.int32, dtypes.float32)
def TestUnary(n, x):
return x + math_ops.cast(n, dtypes.float32) + v
@function.Defun(dtypes.int32, dtypes.float32, dtypes.float32)
def TestBinary(n, x, x2):
return x + math_ops.cast(n, dtypes.float32) + v, x2 + v
for rewrite_with_while in (True, False):
use_gpu = not rewrite_with_while
with self.test_session(use_gpu=use_gpu) as sess:
result_nullary = functional_ops.For(
1, 10, 1, [], TestNullary,
rewrite_with_while=rewrite_with_while)
result_unary = functional_ops.For(
1, 10, 1, [0.], TestUnary,
rewrite_with_while=rewrite_with_while)
result_binary = functional_ops.For(
1, 10, 1, [0., 0.], TestBinary,
rewrite_with_while=rewrite_with_while)
self.evaluate(variables.global_variables_initializer())
assert not result_nullary
# The nullary variant doesn't return anything so we can't easily run it.
# As a total hack, fetch the operation by name and run it.
sess.run(ops.get_default_graph().get_operation_by_name(
"While" if rewrite_with_while else "For"))
assert len(result_unary) == 1
self.assertEqual([54.0], self.evaluate(result_unary))
assert len(result_binary) == 2
self.assertEqual([54.0, 9.0], self.evaluate(result_binary))
def _tfMLP(self, xval, wsval, bsval, rewrite_with_while):
# On GPU, don't rewrite using a while loop.
use_gpu = not rewrite_with_while
with self.test_session(use_gpu=use_gpu):
@function.Defun(dtypes.int32, *[dtypes.float64] * 3)
def MLP(i, a, ws, bs):
a = math_ops.tanh(math_ops.matmul(a, ws[i, :]) + bs[i, :])
return a, ws, bs
ret = functional_ops.For(
0,
wsval.shape[0],
1, [xval, wsval, bsval],
MLP,
rewrite_with_while=rewrite_with_while)[0]
return self.evaluate(ret)
def _npMLP(self, xval, wsval, bsval):
for i in range(wsval.shape[0]):
xval = np.tanh(np.dot(xval, wsval[i, :]) + bsval[i, :])
return xval
def _testForMLP(self, rewrite_with_while):
# We construct a 5-layer Multi-Layer Perceptron network here.
# Each layer have the same number of hidden unites (3), and the
# activation function is tanh(). We feed the input (xval) with
# batch size 2.
xval = np.random.normal(size=(2, 3))
wsval = np.random.normal(size=(5, 3, 3))
bsval = np.random.normal(size=(5, 3))
np_ans = self._npMLP(xval, wsval, bsval)
tf_for_ans = self._tfMLP(xval, wsval, bsval, rewrite_with_while)
self.assertAllClose(np_ans, tf_for_ans)
@test_util.run_deprecated_v1
def testForMLP(self):
self._testForMLP(False)
@test_util.run_deprecated_v1
@test_util.disable_xla(
"Test uses strided slice without compile time constant values")
def testForMLPWhile(self):
self._testForMLP(True)
@test_util.run_v1_only("b/120545219")
def testForError(self):
@function.Defun(dtypes.int32, dtypes.float32)
def Foo(i, v):
return math_ops.cast(i, dtypes.float32) + v
@function.Defun(dtypes.int32, dtypes.float32)
def ReturnsTooManyArgs(unused_i, v):
return v, v
with self.test_session():
with self.assertRaisesRegex(errors.InvalidArgumentError,
"must be a scalar"):
functional_ops.For([0], 10, 1, [0.0], Foo)[0].eval()
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Invalid start/limit/delta"):
functional_ops.For(0, 10, -1, [0.0], Foo)[0].eval()
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"For loop body returned 2 arguments. Expected: 1"):
functional_ops.For(0, 10, 1, [0.0], ReturnsTooManyArgs)[0].eval()
@test_util.run_deprecated_v1
def testGradient(self):
@function.Defun(dtypes.float32)
def Poly(x):
# y = 2x^3+3x^2+4x+8
return 2 * x * x * x + 3 * x * x + 4 * x + 8
@function.Defun(dtypes.float32)
def Grad(x):
# dy/dx = dy/dy * dy/dx = 1.0 * (6x^2+6x+4)
return functional_ops.Gradient([x, 1.0], Poly)[0]
with self.test_session(use_gpu=False) as sess:
a = constant_op.constant(0.)
avals = [Poly(a), Grad(a)]
b = constant_op.constant(1.)
bvals = [Poly(b), Grad(b)]
self.assertAllEqual(self.evaluate(avals), [8., 4.])
self.assertAllEqual(self.evaluate(bvals), [17., 16.])
@test_util.run_v2_only
def testCollective(self):
context._reset_context()
logical_devices = []
logical_devices.append(context.LogicalDeviceConfiguration())
logical_devices.append(context.LogicalDeviceConfiguration())
framework_config.set_logical_device_configuration(
framework_config.list_physical_devices("CPU")[0], logical_devices)
@function.Defun(dtypes.float32)
def collective_fn(t):
# Run a dummy collective of group size 1 to test the setup.
return collective_ops.all_reduce_v2(
t, group_size=1, group_key=1, instance_key=1)
@eager_def_function.function
def run():
with ops.device("/cpu:0"):
return functional_ops.remote_call(
args=[constant_op.constant([1.])] + collective_fn.captured_inputs,
Tout=[dtypes.float32],
f=collective_fn,
target="/cpu:1")
self.assertAllEqual(run(), [[1.]])
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
| FunctionalOpsTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.