language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | openai__openai-python | src/openai/types/responses/response_input_item_param.py | {
"start": 10615,
"end": 11322
} | class ____(TypedDict, total=False):
call_id: Required[str]
"""The unique ID of the apply patch tool call generated by the model."""
status: Required[Literal["completed", "failed"]]
"""The status of the apply patch tool call output. One of `completed` or `failed`."""
type: Required[Literal["apply_patch_call_output"]]
"""The type of the item. Always `apply_patch_call_output`."""
id: Optional[str]
"""The unique ID of the apply patch tool call output.
Populated when this item is returned via API.
"""
output: Optional[str]
"""
Optional human-readable log text from the apply patch tool (e.g., patch results
or errors).
"""
| ApplyPatchCallOutput |
python | pytorch__pytorch | torch/testing/_comparison.py | {
"start": 16719,
"end": 17244
} | class ____(Pair):
"""Pair for ``None`` inputs."""
def __init__(self, actual: Any, expected: Any, **other_parameters: Any) -> None:
if not (actual is None or expected is None):
self._inputs_not_supported()
super().__init__(actual, expected, **other_parameters)
def compare(self) -> None:
if not (self.actual is None and self.expected is None):
self._fail(
AssertionError, f"None mismatch: {self.actual} is not {self.expected}"
)
| NonePair |
python | Pylons__pyramid | tests/test_security.py | {
"start": 16863,
"end": 17477
} | class ____:
def __init__(self, result):
self.result = result
def identity(self, request):
return self.result
def authenticated_userid(self, request):
return self.result
def permits(self, request, context, permission):
return self.result
def remember(self, request, userid, **kw):
headers = [(_TEST_HEADER, userid)]
self._header_remembered = headers[0]
return headers
def forget(self, request, **kw):
headers = [(_TEST_HEADER, 'logout')]
self._header_forgotten = headers[0]
return headers
| DummySecurityPolicy |
python | tensorflow__tensorflow | tensorflow/python/framework/tensor.py | {
"start": 44018,
"end": 50116
} | class ____(TensorSpec, trace_type.Serializable):
"""A `TensorSpec` that specifies minimum and maximum values.
Example usage:
```python
spec = tensor_spec.BoundedTensorSpec((1, 2, 3), tf.float32, 0, (5, 5, 5))
tf_minimum = tf.convert_to_tensor(spec.minimum, dtype=spec.dtype)
tf_maximum = tf.convert_to_tensor(spec.maximum, dtype=spec.dtype)
```
Bounds are meant to be inclusive. This is especially important for
integer types. The following spec will be satisfied by tensors
with values in the set {0, 1, 2}:
```python
spec = tensor_spec.BoundedTensorSpec((3, 5), tf.int32, 0, 2)
```
"""
__slots__ = ("_minimum", "_maximum")
def __init__(self, shape, dtype, minimum, maximum, name=None):
"""Initializes a new `BoundedTensorSpec`.
Args:
shape: Value convertible to `tf.TensorShape`. The shape of the tensor.
dtype: Value convertible to `tf.DType`. The type of the tensor values.
minimum: Number or sequence specifying the minimum element bounds
(inclusive). Must be broadcastable to `shape`.
maximum: Number or sequence specifying the maximum element bounds
(inclusive). Must be broadcastable to `shape`.
name: Optional string containing a semantic name for the corresponding
array. Defaults to `None`.
Raises:
ValueError: If `minimum` or `maximum` are not provided or not
broadcastable to `shape`.
TypeError: If the shape is not an iterable or if the `dtype` is an invalid
numpy dtype.
"""
super(BoundedTensorSpec, self).__init__(shape, dtype, name)
if minimum is None:
raise ValueError("`minimum` can not be None.")
if maximum is None:
raise ValueError("`maximum` can not be None.")
try:
minimum_shape = np.shape(minimum)
common_shapes.broadcast_shape(
tensor_shape.TensorShape(minimum_shape), self.shape)
except ValueError as exception:
raise ValueError(
f"`minimum` {minimum} is not compatible with shape {self.shape}."
) from exception
try:
maximum_shape = np.shape(maximum)
common_shapes.broadcast_shape(
tensor_shape.TensorShape(maximum_shape), self.shape)
except ValueError as exception:
raise ValueError(
f"`maximum` {maximum} is not compatible with shape {self.shape}."
) from exception
self._minimum = np.array(minimum, dtype=self.dtype.as_numpy_dtype)
self._minimum.setflags(write=False)
self._maximum = np.array(maximum, dtype=self.dtype.as_numpy_dtype)
self._maximum.setflags(write=False)
@classmethod
def experimental_type_proto(cls) -> Type[struct_pb2.BoundedTensorSpecProto]:
"""Returns the type of proto associated with BoundedTensorSpec serialization."""
return struct_pb2.BoundedTensorSpecProto
@classmethod
def experimental_from_proto(
cls, proto: struct_pb2.BoundedTensorSpecProto) -> "BoundedTensorSpec":
"""Returns a BoundedTensorSpec instance based on the serialized proto."""
return BoundedTensorSpec(
shape=tensor_shape.TensorShape.experimental_from_proto(proto.shape),
dtype=proto.dtype,
minimum=tensor_util.MakeNdarray(proto.minimum),
maximum=tensor_util.MakeNdarray(proto.maximum),
name=proto.name if proto.name else None)
def experimental_as_proto(self) -> struct_pb2.BoundedTensorSpecProto:
"""Returns a proto representation of the BoundedTensorSpec instance."""
return struct_pb2.BoundedTensorSpecProto(
shape=self.shape.experimental_as_proto(),
dtype=self.dtype.experimental_as_proto().datatype,
minimum=tensor_util.make_tensor_proto(self._minimum),
maximum=tensor_util.make_tensor_proto(self._maximum),
name=self.name)
@classmethod
def from_spec(cls, spec):
"""Returns a `TensorSpec` with the same shape and dtype as `spec`.
If `spec` is a `BoundedTensorSpec`, then the new spec's bounds are set to
`spec.minimum` and `spec.maximum`; otherwise, the bounds are set to
`spec.dtype.min` and `spec.dtype.max`.
>>> spec = tf.TensorSpec(shape=[8, 3], dtype=tf.int32, name="x")
>>> BoundedTensorSpec.from_spec(spec)
BoundedTensorSpec(shape=(8, 3), dtype=tf.int32, name='x',
minimum=array(-2147483648, dtype=int32),
maximum=array(2147483647, dtype=int32))
Args:
spec: The `TypeSpec` used to create the new `BoundedTensorSpec`.
"""
dtype = dtypes.as_dtype(spec.dtype)
minimum = getattr(spec, "minimum", dtype.min)
maximum = getattr(spec, "maximum", dtype.max)
return BoundedTensorSpec(spec.shape, dtype, minimum, maximum, spec.name)
@property
def minimum(self):
"""Returns a NumPy array specifying the minimum bounds (inclusive)."""
return self._minimum
@property
def maximum(self):
"""Returns a NumPy array specifying the maximum bounds (inclusive)."""
return self._maximum
def cast(self, value, casting_context):
if casting_context.allow_specs and isinstance(value, BoundedTensorSpec):
assert value.is_subtype_of(self), f"Can not cast {value!r} to {self!r}"
return self
actual_spec = TensorSpec(shape=self.shape, dtype=self.dtype, name=self.name)
return actual_spec.cast(value, casting_context) # pylint: disable=protected-access
def __repr__(self):
s = "BoundedTensorSpec(shape={}, dtype={}, name={}, minimum={}, maximum={})"
return s.format(self.shape, repr(self.dtype), repr(self.name),
repr(self.minimum), repr(self.maximum))
def __eq__(self, other):
tensor_spec_eq = super(BoundedTensorSpec, self).__eq__(other)
return (tensor_spec_eq and np.allclose(self.minimum, other.minimum) and
np.allclose(self.maximum, other.maximum))
def __hash__(self):
return hash((self._shape, self.dtype))
def __reduce__(self):
return BoundedTensorSpec, (self._shape, self._dtype, self._minimum,
self._maximum, self._name)
def _serialize(self):
return (self._shape, self._dtype, self._minimum, self._maximum, self._name)
| BoundedTensorSpec |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 25284,
"end": 25495
} | class ____(BaseModel):
"""Get the number of (mapped) XCom values available."""
key: str
dag_id: str
run_id: str
task_id: str
type: Literal["GetNumberXComs"] = "GetNumberXComs"
| GetXComCount |
python | kubernetes-client__python | kubernetes/client/models/v1_flow_distinguisher_method.py | {
"start": 383,
"end": 3798
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str'
}
attribute_map = {
'type': 'type'
}
def __init__(self, type=None, local_vars_configuration=None): # noqa: E501
"""V1FlowDistinguisherMethod - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self.discriminator = None
self.type = type
@property
def type(self):
"""Gets the type of this V1FlowDistinguisherMethod. # noqa: E501
`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required. # noqa: E501
:return: The type of this V1FlowDistinguisherMethod. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1FlowDistinguisherMethod.
`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required. # noqa: E501
:param type: The type of this V1FlowDistinguisherMethod. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1FlowDistinguisherMethod):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1FlowDistinguisherMethod):
return True
return self.to_dict() != other.to_dict()
| V1FlowDistinguisherMethod |
python | walkccc__LeetCode | solutions/1720. Decode XORed Array/1720.py | {
"start": 0,
"end": 168
} | class ____:
def decode(self, encoded: list[int], first: int) -> list[int]:
ans = [first]
for e in encoded:
ans.append(e ^ ans[-1])
return ans
| Solution |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-nomic/llama_index/embeddings/nomic/base.py | {
"start": 821,
"end": 926
} | class ____(str, Enum):
REMOTE = "remote"
LOCAL = "local"
DYNAMIC = "dynamic"
| NomicInferenceMode |
python | h5py__h5py | h5py/tests/test_selections.py | {
"start": 692,
"end": 2224
} | class ____(BaseSelection):
"""
Internal feature: Determine output types from dataset dtype and fields.
"""
def test_simple(self):
""" Non-compound types are handled appropriately """
dt = np.dtype('i')
out, format = sel2.read_dtypes(dt, ())
self.assertEqual(out, format)
self.assertEqual(out, np.dtype('i'))
def test_simple_fieldexc(self):
""" Field names for non-field types raises ValueError """
dt = np.dtype('i')
with self.assertRaises(ValueError):
out, format = sel2.read_dtypes(dt, ('a',))
def test_compound_simple(self):
""" Compound types with elemental subtypes """
dt = np.dtype( [('a','i'), ('b','f'), ('c','|S10')] )
# Implicit selection of all fields -> all fields
out, format = sel2.read_dtypes(dt, ())
self.assertEqual(out, format)
self.assertEqual(out, dt)
# Explicit selection of fields -> requested fields
out, format = sel2.read_dtypes(dt, ('a','b'))
self.assertEqual(out, format)
self.assertEqual(out, np.dtype( [('a','i'), ('b','f')] ))
# Explicit selection of exactly one field -> no fields
out, format = sel2.read_dtypes(dt, ('a',))
self.assertEqual(out, np.dtype('i'))
self.assertEqual(format, np.dtype( [('a','i')] ))
# Field does not appear in named typed
with self.assertRaises(ValueError):
out, format = sel2.read_dtypes(dt, ('j', 'k'))
| TestTypeGeneration |
python | tensorflow__tensorflow | tensorflow/python/saved_model/load_test.py | {
"start": 97818,
"end": 117832
} | class ____(test.TestCase, parameterized.TestCase):
def test_load_with_tags(self, use_cpp_bindings):
if use_cpp_bindings:
self.skipTest("Cpp bindings do not support Tags.")
root = autotrackable.AutoTrackable()
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
with self.assertRaises(ValueError):
load.load(path, tags=[tag_constants.EVAL])
load.load(path, tags=[tag_constants.SERVING])
load.load(path, tags=tag_constants.SERVING)
load.load(path, tags=set([tag_constants.SERVING]))
def test_save_load_contains_with_fspath(self, use_cpp_bindings):
if use_cpp_bindings:
self.skipTest("Cpp bindings cannot work with pathlib object.")
root = autotrackable.AutoTrackable()
path = pathlib.Path(tempfile.mkdtemp(prefix=self.get_temp_dir()))
save.save(root, path)
self.assertTrue(loader_impl.contains_saved_model(path))
test_load(path, use_cpp_bindings=use_cpp_bindings)
def test_single_restore_op_used(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = module.Module()
root.v1 = variables.Variable(1.0)
root.v2 = variables.Variable(2.0)
root.v3 = variables.Variable(3.0)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
restore_count = 0
def _count_restores(op_type, *unused_args, **unused_kwargs):
nonlocal restore_count
if op_type == b"RestoreV2":
restore_count += 1
op_callbacks.add_op_callback(_count_restores)
save.save(root, path)
test_load(path, use_cpp_bindings=use_cpp_bindings)
op_callbacks.remove_op_callback(_count_restores)
self.assertEqual(1, restore_count)
def test_docstring_examples(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
exported = checkpoint.Checkpoint(v=variables.Variable(3.0))
exported.f = def_function.function(
lambda x: exported.v * x,
input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)
],
)
save.save(exported, path)
imported = test_load(path)
self.assertEqual(3.0, imported.v.numpy())
self.assertEqual(6.0, imported.f(x=constant_op.constant(2.0)).numpy())
save.save(exported, path, exported.f.get_concrete_function())
imported = test_load(path, use_cpp_bindings=use_cpp_bindings)
f = imported.signatures["serving_default"]
self.assertAllEqual(
[[-3.0]], f(x=constant_op.constant([[-1.0]]))["output_0"].numpy()
)
def test_object_with_extra_dependencies(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class Extra(autotrackable.AutoTrackable):
def _trackable_children(self, save_type, **kwargs):
children = super(Extra, self)._trackable_children(save_type, **kwargs)
children["a"] = variables.Variable(5.0)
return children
root = Extra()
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
imported = test_load(path)
self.assertEqual(5, self.evaluate(imported.a))
def test_save_cached_variable(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
with ops.Graph().as_default(), session_lib.Session() as session:
obj = autotrackable.AutoTrackable()
obj.v = variables.Variable(2.0, caching_device=lambda op: op.device)
obj.w = variables.Variable(3.0)
session.run([obj.v.initializer, obj.w.initializer])
@def_function.function
def total():
return obj.v + obj.w
@def_function.function(input_signature=[tensor_spec.TensorSpec([])])
def wrapped_total(x):
return total() + x
@def_function.function
def increment_v(x):
obj.v.assign_add(x)
return x
session.run(increment_v(constant_op.constant(3.0))) # generate signatures
self.assertAllClose(8, total())
self.assertAllClose(13, wrapped_total(constant_op.constant(5.0)))
obj.total = total
obj.wrapped_total = wrapped_total.get_concrete_function()
obj.increment_v = increment_v
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(obj, save_dir, signatures=total.get_concrete_function())
imported = test_load(save_dir)
session.run(variables.global_variables_initializer())
self.assertAllClose(8, imported.total())
session.run(imported.increment_v(4))
self.assertAllClose(12, imported.total())
self.assertAllClose(15, imported.wrapped_total(constant_op.constant(3.0)))
self.assertAllClose(
{"output_0": 12}, imported.signatures["serving_default"]()
)
# Try loading and running the function in eager mode
imported = test_load(save_dir)
self.assertAllClose(8, imported.total())
imported.increment_v(5)
self.assertAllClose(13, imported.total())
self.assertAllClose(13.5, imported.wrapped_total(constant_op.constant(0.5)))
self.assertAllClose(
{"output_0": 13}, imported.signatures["serving_default"]()
)
# TODO(allenl, kkb): Use the new memory checker here once it's fast enough (3
# iterations took hundreds of seconds). It would be really nice to check
# allocations at a lower level.
@test_util.assert_no_new_pyobjects_executing_eagerly()
def test_functions_cleaned(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
if sys.version_info.major < 3:
self.skipTest("Not working in Python 2")
root = module.Module()
root.v = variables.Variable(1.0)
root.f = def_function.function(
lambda x: x + root.v,
input_signature=[
tensor_spec.TensorSpec(shape=[], dtype=dtypes.float32)
],
)
cycle(root, 1, use_cpp_bindings=use_cpp_bindings)
def test_load_partial_object(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = module.Module()
root.variables_holder = module.Module()
root.variables_holder.v = variables.Variable(1.0)
class Adder(module.Module):
@def_function.function(input_signature=[tensor_spec.TensorSpec(shape=[])])
def __call__(self, y):
root.variables_holder.v.assign_add(y)
return 1
root.adder = Adder()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
imported = load.load_partial(
save_dir, ["root.variables_holder.v", "root.adder"]
)
v = imported["root.variables_holder.v"]
adder = imported["root.adder"]
self.assertEqual(self.evaluate(v), 1)
adder(5)
self.assertEqual(self.evaluate(v), 6)
with self.assertRaisesRegex(
ValueError, "does not include all required objects for loading"
):
imported = load.load_partial(save_dir, ["root.adder"])
def test_load_partial_checkpoint(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = module.Module()
root.variables_holder = module.Module()
root.variables_holder.v = variables.Variable(1.0)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
loaded = module.Module()
loaded.v = variables.Variable(2.0)
load.load_partial(
save_dir,
{"root": loaded},
options=load_options.LoadOptions(allow_partial_checkpoint=True),
)
self.assertEqual(loaded.variables_holder.v.numpy(), 1)
with self.assertRaisesRegex(AssertionError, "were not bound"):
load.load_partial(save_dir, {"root": loaded})
def test_call_untraced_function_raises_error(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class ObjWithFunction(module.Module):
@def_function.function
def foo(self, a):
return a
root = ObjWithFunction()
with self.assertLogs(level="INFO") as logs:
loaded = cycle(root, 1, use_cpp_bindings=use_cpp_bindings)
expected_save_message = (
"INFO:absl:Found untraced functions such as foo while saving "
"(showing 1 of 1). These functions will not be directly callable after "
"loading."
)
self.assertIn(expected_save_message, logs.output)
with self.assertRaisesRegex(
ValueError, "Found zero restored functions for caller function."
):
loaded.foo(1)
def test_restored_function_execute_eagerly(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
try:
def_function.run_functions_eagerly(True)
class MyModel(module.Module):
@def_function.function
def __call__(self, inputs, training=False):
return math_ops.multiply(0.5, inputs)
model = MyModel()
model.__call__.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.float32)
)
loaded = cycle(model, 1, use_cpp_bindings=use_cpp_bindings)
# Calling the function should not throw an exception.
loaded(constant_op.constant([1.0]))
finally:
def_function.run_functions_eagerly(False)
def test_restored_model_concrete_function_is_deterministic(
self, use_cpp_bindings
):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
previous_concrete_function = None
for _ in range(100):
class MyModel(module.Module):
@def_function.function
def __call__(self, x):
return x * constant_op.constant(3.0)
model = MyModel()
model(array_ops.ones((7, 3), dtype=dtypes.float32))
model.__call__.get_concrete_function(
tensor_spec.TensorSpec([None, 3], dtypes.float32)
)
loaded = cycle(model, 1, use_cpp_bindings=use_cpp_bindings)
# Ensure the newly loaded concrete function is the same as the previous
# after a cycle of serialization / deserialization.
new_concrete_function = loaded.__call__.get_concrete_function(
tensor_spec.TensorSpec([None, 3], dtypes.float32)
)
if previous_concrete_function is not None:
self.assertEqual(
previous_concrete_function.pretty_printed_signature(),
new_concrete_function.pretty_printed_signature(),
)
previous_concrete_function = new_concrete_function
def test_garbage_collection_capturable_resource_doesnt_raise_exception(
self, use_cpp_bindings
):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
model = module.Module()
model.mapping = lookup_ops.StaticHashTable(
lookup_ops.KeyValueTensorInitializer(
keys=math_ops.range(1, dtype=dtypes.int32), values=["foo"]
),
"default_value",
)
loaded = cycle(model, 1, use_cpp_bindings=use_cpp_bindings)
del model
del loaded
# Exceptions raised during garbage collection are simply printed to stderr
# and ignored, and we have no way to access them. We'll capture stdout
# during the garbage collection process and inspect to see if any
# exceptions were raised.
stderr = io.StringIO()
with contextlib.redirect_stderr(stderr):
gc.collect()
if "Exception ignored in" in stderr.getvalue():
raise Exception(stderr.getvalue())
def test_captured_dataset_with_asset(self, use_cpp_bindings):
# TODO(b/264869753) Fix SingleCycleTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class HasDataset(module.Module):
def __init__(self, temp_dir, file_name):
super(HasDataset, self).__init__()
file = os.path.join(temp_dir, file_name)
with tf_record.TFRecordWriter(file, "GZIP") as f:
for v in ["a", "aa", "aaa"]:
f.write(str(v))
self.dataset = readers.TFRecordDataset([file], compression_type="GZIP")
@def_function.function
def __call__(self, x):
current_sum = array_ops.zeros([], dtype=dtypes.int32)
for element in self.dataset:
current_sum += x * string_ops.string_length(element)
return current_sum
temp_dir = self.get_temp_dir()
file_name = "tf_record_asset.tfrecord.gz"
root = HasDataset(temp_dir, file_name)
self.assertEqual(
18, # 3 * (1 + 2 + 3)
root(constant_op.constant(3, dtype=dtypes.int32)).numpy(),
)
save_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save(root, save_dir)
file_io.delete_file(os.path.join(temp_dir, file_name))
asset_path = os.path.join(save_dir, "assets/{}".format(file_name))
self.assertTrue(file_io.file_exists(asset_path))
load_dir = os.path.join(self.get_temp_dir(), "load_dir")
file_io.rename(save_dir, load_dir)
loaded = test_load(load_dir, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(
18, # 3 * (1 + 2 + 3)
loaded(constant_op.constant(3, dtype=dtypes.int32)).numpy(),
)
def test_function_aliases(self, use_cpp_bindings):
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.f = def_function.function(
lambda x: 2 * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
options = save_options.SaveOptions(function_aliases={
"my_func": root.f,
})
save.save(root, save_dir, root.f, options=options)
loaded = test_load(
save_dir,
use_cpp_bindings=use_cpp_bindings,
options=load_options.LoadOptions(
experimental_load_function_aliases=True
),
)
self.assertLen(loaded.function_aliases, 1)
self.assertIn("my_func", loaded.function_aliases)
self.assertEqual(loaded.function_aliases["my_func"](1.0).numpy(), 2.0)
def test_function_aliases_with_non_saved_function(self, use_cpp_bindings):
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# `f` below will be aliased but not saved because is not tracked
f = def_function.function(lambda x: 2 * x)
root = autotrackable.AutoTrackable()
root.g = def_function.function(lambda x: 2 * f(x))
# Create two traces
root.g(constant_op.constant(1))
root.g(constant_op.constant(1.0, dtype=dtypes.float32))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
options = save_options.SaveOptions(
function_aliases={
"my_func": f,
}
)
save.save(root, save_dir, options=options)
loaded = test_load(
save_dir,
use_cpp_bindings=use_cpp_bindings,
options=load_options.LoadOptions(
experimental_load_function_aliases=True
),
)
self.assertLen(loaded.function_aliases, 1)
self.assertIn("my_func", loaded.function_aliases)
self.assertLen(loaded.function_aliases["my_func"], 2)
self.assertIsInstance(
loaded.function_aliases["my_func"][0], types_core.ConcreteFunction
)
self.assertIsInstance(
loaded.function_aliases["my_func"][1], types_core.ConcreteFunction
)
@unittest.skip("skip until unexpected retracing is fixed/handled b/280121368")
def test_function_aliases_with_concrete_function(self, use_cpp_bindings):
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# `f` below will be aliased but not saved because is not tracked
f = def_function.function(lambda x: 2 * x)
root = autotrackable.AutoTrackable()
root.g = def_function.function(lambda x: 2 * f(x))
# Create two traces
root.g(constant_op.constant(1))
root.g(constant_op.constant(1.0, dtype=dtypes.float32))
self.assertLen(f._list_all_concrete_functions(), 2)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
options = save_options.SaveOptions(
function_aliases={
"my_func": f.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.float32)
),
}
)
self.assertLen(f._list_all_concrete_functions(), 2)
save.save(root, save_dir, options=options)
loaded = test_load(
save_dir,
use_cpp_bindings=use_cpp_bindings,
options=load_options.LoadOptions(
experimental_load_function_aliases=True
),
)
self.assertLen(loaded.function_aliases, 1)
self.assertIn("my_func", loaded.function_aliases)
self.assertLen(loaded.function_aliases["my_func"], 1)
self.assertIsInstance(
loaded.function_aliases["my_func"][0], types_core.ConcreteFunction
)
@unittest.skip("skip until unexpected retracing is fixed/handled b/280121368")
def test_function_aliases_with_concrete_functions(self, use_cpp_bindings):
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# `f` below will be aliased but not saved because is not tracked
f = def_function.function(lambda x: 2 * x)
root = autotrackable.AutoTrackable()
root.g = def_function.function(lambda x: 2 * f(x))
# Create 3 traces for g, which will in turn create 3 traces for f.
root.g(x=constant_op.constant(1))
root.g(x=constant_op.constant(1.0, dtype=dtypes.float32))
root.g(x=constant_op.constant(1.0, dtype=dtypes.float16))
self.assertLen(f._list_all_concrete_functions(), 3)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
options = save_options.SaveOptions(
function_aliases={
# Alias 2 out of 3 traces of f
"my_func": [
f.get_concrete_function(
x=tensor_spec.TensorSpec([], dtypes.int32)
),
f.get_concrete_function(
x=tensor_spec.TensorSpec([], dtypes.float32)
),
],
}
)
self.assertLen(f._list_all_concrete_functions(), 3)
save.save(root, save_dir, options=options)
loaded = test_load(
save_dir,
use_cpp_bindings=use_cpp_bindings,
options=load_options.LoadOptions(
experimental_load_function_aliases=True
),
)
self.assertLen(loaded.function_aliases, 1)
self.assertIn("my_func", loaded.function_aliases)
self.assertLen(loaded.function_aliases["my_func"], 2)
self.assertIsInstance(
loaded.function_aliases["my_func"][0], types_core.ConcreteFunction
)
self.assertIsInstance(
loaded.function_aliases["my_func"][1], types_core.ConcreteFunction
)
def test_function_aliases_name_collision(self, use_cpp_bindings):
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.function_aliases = variables.Variable(1.0)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
options = save_options.SaveOptions(function_aliases={
"my_func": root.f,
})
save.save(root, save_dir, root.f, options=options)
with self.assertRaisesRegex(
ValueError, "Could not load with experimental_load_function_aliases"
):
test_load(
save_dir,
use_cpp_bindings=use_cpp_bindings,
options=load_options.LoadOptions(
experimental_load_function_aliases=True
),
)
# TODO(b/264882754) Support Cpp bindings DeferredInitModuleVariablesTest
| SingleCycleTests |
python | joke2k__faker | tests/providers/test_bank.py | {
"start": 2974,
"end": 3440
} | class ____:
"""Test sk_SK bank provider"""
def test_bban(self, faker, num_samples):
for _ in range(num_samples):
assert re.fullmatch(r"\d{20}", faker.bban())
def test_iban(self, faker, num_samples):
for _ in range(num_samples):
iban = faker.iban()
assert is_valid_iban(iban)
assert iban[:2] == SkSKBankProvider.country_code
assert re.fullmatch(r"\d{2}\d{20}", iban[2:])
| TestSkSk |
python | google__jax | jax/_src/pallas/mosaic_gpu/core.py | {
"start": 33222,
"end": 34148
} | class ____(state_types.Transform):
"""Bitcasts the underlying ref at the given offset to the given shape and dtype."""
dtype: dtypes.DType
shape: tuple[int, ...]
offset: int
# TMEM-specific params
layout: tcgen05.TMEMLayout | None
@classmethod
def from_transformed_ref(
cls,
ref: pallas_core.TransformedRef,
byte_offset: int,
layout: tcgen05.TMEMLayout | None = None,
):
return cls(dtypes.dtype(ref.dtype), ref.ref.shape, byte_offset, layout)
def transform_shape(self, shape):
if shape is None:
return None
return self.shape
def transform_dtype(self, dtype):
del dtype # Unused.
return self.dtype
def tree_flatten(self):
return (), (self.dtype, self.shape, self.offset, self.layout)
@classmethod
def tree_unflatten(cls, metadata, arrays):
assert not arrays
return cls(*metadata)
@dataclasses.dataclass(frozen=True)
| ExtractAliasedRef |
python | getlogbook__logbook | src/logbook/handlers.py | {
"start": 11090,
"end": 11531
} | class ____(Handler):
"""A handler that does nothing.
Useful to silence logs above a certain location in the handler stack::
handler = NullHandler()
handler.push_application()
NullHandlers swallow all logs sent to them, and do not bubble them onwards.
"""
blackhole = True
def __init__(self, level=NOTSET, filter=None):
super().__init__(level=level, filter=filter, bubble=False)
| NullHandler |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarTuple16.py | {
"start": 462,
"end": 539
} | class ____(Base[int, str]): ...
Child.method1(1, "")
Child.method2(1, "")
| Child |
python | PyCQA__pylint | tests/functional/u/unsupported/unsupported_binary_operation.py | {
"start": 1409,
"end": 1738
} | class ____(A3):
def __radd__(self, other):
return NotImplemented
A3() + B3() # [unsupported-binary-operation]
# Augmented
FFF = 1
FFF += A() # [unsupported-binary-operation]
TTT = 1
TTT += [] # [unsupported-binary-operation]
# Don't emit for this case since we don't know what unknown is.
from unknown import Unknown
| B3 |
python | jina-ai__jina | jina/proto/serializer.py | {
"start": 2853,
"end": 3529
} | class ____:
"""Since the serializer is replacing the `jina_pb2 to know how to exactly serialize messages, this is just a placeholder that
delegates the serializing and deserializing to the internal protobuf structure with no extra optimization.
"""
@staticmethod
def SerializeToString(x):
"""
# noqa: DAR101
# noqa: DAR102
# noqa: DAR201
"""
return x.SerializeToString()
@staticmethod
def FromString(x: bytes):
"""
# noqa: DAR101
# noqa: DAR102
# noqa: DAR201
"""
sp = jina_pb2.StatusProto()
sp.ParseFromString(x)
return sp
| StatusProto |
python | openai__openai-python | src/openai/types/webhooks/response_incomplete_webhook_event.py | {
"start": 327,
"end": 782
} | class ____(BaseModel):
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the model response was interrupted."""
data: Data
"""Event data payload."""
type: Literal["response.incomplete"]
"""The type of the event. Always `response.incomplete`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| ResponseIncompleteWebhookEvent |
python | realpython__materials | wordcount/tests/fixtures.py | {
"start": 1463,
"end": 5603
} | class ____:
files: list[FakeFile]
def __iter__(self):
return iter(self.files)
def __len__(self):
return len(self.files)
@cached_property
def paths(self):
return [str(file.path) for file in self.files]
@cached_property
def expected(self):
if len(self.files) > 1:
return self.file_lines + self.total_line
else:
return self.file_lines
@cached_property
def file_lines(self):
return b"".join(file.format_line() for file in self.files)
@cached_property
def total_line(self):
totals = [sum(file.counts[i] for file in self.files) for i in range(4)]
md = len(str(max(totals)))
return f"{totals[0]:{md}} {totals[1]:{md}} {totals[3]:{md}} total\n".encode(
"utf-8"
)
@pytest.fixture(scope="session")
def small_file():
temp_file = TempFile(content=b"caffe\n", counts=(1, 1, 6, 6))
try:
yield temp_file
finally:
temp_file.delete()
@pytest.fixture(scope="session")
def big_file():
temp_file = TempFile(
content=(
b"Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\n"
b"tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\n"
b"quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo\n"
b"consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse\n"
b"cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non\n"
b"proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n"
),
counts=(6, 69, 447, 447),
)
try:
yield temp_file
finally:
temp_file.delete()
@pytest.fixture(scope="session")
def file1():
temp_file = TempFile(content=b"caffe latte\n", counts=(1, 2, 12, 12))
try:
yield temp_file
finally:
temp_file.delete()
@pytest.fixture(scope="session")
def file2():
temp_file = TempFile(
content=b"Lorem ipsum dolor sit amet\n", counts=(1, 5, 27, 27)
)
try:
yield temp_file
finally:
temp_file.delete()
@pytest.fixture(scope="session")
def unicode_file():
temp_file = TempFile(
content="Zażółć gęślą jaźń\n".encode("utf-8"), counts=(1, 3, 18, 27)
)
try:
yield temp_file
finally:
temp_file.delete()
@pytest.fixture(scope="session")
def small_files():
temp_files = [
TempFile(content=b"Mocha", counts=(0, 1, 5, 5)),
TempFile(content=b"Espresso\n", counts=(1, 1, 9, 9)),
TempFile(content=b"Cappuccino\n", counts=(1, 1, 11, 11)),
TempFile(content=b"Frappuccino", counts=(0, 1, 11, 11)),
TempFile(content=b"Flat White\n", counts=(1, 2, 11, 11)),
TempFile(content=b"Turkish Coffee", counts=(0, 2, 14, 14)),
TempFile(content=b"Irish Coffee Drink\n", counts=(1, 3, 19, 19)),
TempFile(content=b"Espresso con Panna", counts=(0, 3, 18, 18)),
]
try:
yield Files(temp_files)
finally:
for file in temp_files:
file.delete()
@pytest.fixture(scope="session")
def medium_files(file1, file2, unicode_file):
return Files([file1, file2, unicode_file])
@pytest.fixture(scope="session")
def wc():
def function(*args, stdin: bytes | None = None) -> bytes:
process = run(["wordcount", *args], capture_output=True, input=stdin)
return process.stdout
return function
@pytest.fixture(scope="session")
def fake_dir():
with TemporaryDirectory(delete=False) as directory:
path = Path(directory)
try:
yield path
finally:
path.rmdir()
@pytest.fixture(scope="function")
def random_name():
return make_random_name()
def make_random_name(length=10):
return "".join(random.choices(string.ascii_lowercase, k=length))
@pytest.fixture(scope="session")
def runner(wc, small_file, unicode_file, big_file, fake_dir):
return Runner(
wc, small_file, unicode_file, big_file, fake_dir, make_random_name()
)
@dataclass
| Files |
python | conda__conda | conda/core/path_actions.py | {
"start": 37697,
"end": 38213
} | class ____(PrefixPathAction, metaclass=ABCMeta):
def __init__(
self, transaction_context, linked_package_data, target_prefix, target_short_path
):
super().__init__(transaction_context, target_prefix, target_short_path)
self.linked_package_data = linked_package_data
def verify(self):
# inability to remove will trigger a rollback
# can't definitely know if path can be removed until it's attempted and failed
self._verified = True
| RemoveFromPrefixPathAction |
python | getsentry__sentry | src/sentry/tempest/serializers.py | {
"start": 1487,
"end": 2462
} | class ____(serializers.ModelSerializer):
clientId = serializers.CharField(source="client_id")
clientSecret = serializers.CharField(source="client_secret")
message = serializers.CharField(read_only=True)
messageType = serializers.CharField(source="message_type", read_only=True)
latestFetchedItemId = serializers.CharField(source="latest_fetched_item_id", read_only=True)
createdById = serializers.CharField(source="created_by_id", read_only=True)
dateAdded = serializers.DateTimeField(source="date_added", read_only=True)
dateUpdated = serializers.DateTimeField(source="date_updated", read_only=True)
class Meta:
model = TempestCredentials
fields = [
"id",
"clientId",
"clientSecret",
"message",
"messageType",
"latestFetchedItemId",
"createdById",
"dateAdded",
"dateUpdated",
]
| DRFTempestCredentialsSerializer |
python | pydantic__pydantic | tests/mypy/modules/plugin_success.py | {
"start": 6794,
"end": 6946
} | class ____(BaseModel):
model_config = ConfigDict(populate_by_name=True)
my_field: str = Field(alias='my_alias')
m5 = Model5(my_field='foo')
| Model5 |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF009_attrs.py | {
"start": 2172,
"end": 2208
} | class ____:
foo: int = 1
@define
| D |
python | google__pytype | pytype_extensions/__init__.py | {
"start": 1471,
"end": 5187
} | class ____(Protocol[T]):
"""Protocol that matches any dataclass (or instance thereof).
Can be used to match any dataclass. Example (modulo pytype bugs):
@dataclasses.dataclass
class Foo:
x: str
y: int
@dataclasses.dataclass
class Bar:
x: str
y: str
class Baz:
x: str
y: int
def foo(item: Dataclass):
pass
def bar(item: Dataclass[str]):
pass
def baz(item: Dataclass[Union[int, str]]):
pass
foo(Foo(x='yes', y=1)) # ok
foo(Bar(x='yes', y='no')) # ok
foo(Baz(x='yes', y=1)) # error, not a dataclass
bar(Foo(x='yes', y=1)) # error, has a non-str field
bar(Bar(x='yes', y='no')) # ok
bar(Baz(x='yes', y=1)) # error, not a dataclass
baz(Foo(x='yes', y=1)) # ok
baz(Bar(x='yes', y='no')) # ok
baz(Baz(x='yes', y=1)) # error, not a dataclass
The only way to identify a dataclass is to test for the presence of the
`__dataclass_fields__` member; that is what `dataclasses.is_dataclass` uses:
https://github.com/python/cpython/blob/3.7/Lib/dataclasses.py#L1036.
"""
__dataclass_fields__: Dict[str, 'dataclasses.Field[T]']
if typing.TYPE_CHECKING:
_GenericCallable = TypeVar('_GenericCallable', bound=Callable[..., Any])
class Decorator:
"""A type annotation for decorators that do not change signatures.
This is a stand-in for using `Callable[[T], T]` to represent a decorator.
Given a decorator function, which takes in a callable and returns a callable
with the same signature, apply this class as a decorator to that function.
This can also be used for decorator factories.
Examples:
Plain decorator (decorator matches Callable[[T], T]):
>>> @pytype_extensions.Decorator
... def MyDecorator(func):
... def wrapper(...):
... ...
... return wrapper
Decorator factory (factory matches Callable[..., Callable[[T], T]]):
>>> def MyDecoratorFactory(foo: int) -> pytype_extensions.Decorator:
... @pytype_extensions.Decorator
... def MyDecorator(func):
... def Wrapper(*args, **kwargs):
... return func(foo, *args, **kwargs)
... return Wrapper
... return MyDecorator
Note for the above example: the return type annotation (first line) is the
most important one; it indicates to callers that MyDecoratorFactory is
returning a decorator. The "@pytype_extensions.Decorator" annotation (second
line) indicates to pytype that MyDecorator is a Decorator; without it, you
would need to add "pytype: disable=bad-return-type" on the final line.
This class only exists at build time, for typechecking. At runtime, the
'Decorator' member of this module is a simple identity function (see below).
More information: pytype-decorators
Shortlink: pytype_extensions.Decorator
"""
# pylint: disable=line-too-long, unused-argument
def __init__(self, decorator: Callable[[_GenericCallable], _GenericCallable]):
...
def __call__(self, func: _GenericCallable) -> _GenericCallable:
... # pytype: disable=bad-return-type
else:
# At runtime, Decorator is a simple identity function that returns its input.
Decorator = lambda d: d
def assert_type(__x, __t) -> None: # pylint: disable=invalid-name
"""Prevent runtime errors from assert_type statements.
assert_type is handled internally by pytype at type-checking time; it should
do nothing at runtime.
Usage example:
```
import pytype_extensions
assert_type = pytype_extensions.assert_type
x = 3
assert_type(x, int)
```
Args:
__x: The object to make the type assertion about.
__t: The type we want to assert.
"""
del __x, __t # Unused.
| Dataclass |
python | getsentry__sentry | src/sentry/sentry_metrics/indexer/base.py | {
"start": 14267,
"end": 18674
} | class ____(Service):
"""
Provides integer IDs for metric names, tag keys and tag values
and the corresponding reverse lookup.
Check `sentry.snuba.metrics` for convenience functions.
"""
__all__ = (
"record",
"resolve",
"reverse_resolve",
"bulk_record",
"resolve_shared_org",
"reverse_shared_org_resolve",
"bulk_reverse_resolve",
)
def bulk_record(
self, strings: Mapping[UseCaseID, Mapping[OrgId, set[str]]]
) -> UseCaseKeyResults:
"""
Takes in a mapping with use case IDs mapped to Org IDs mapped to set of strings.
Ultimately returns a mapping of those use case IDs mapped to Org IDs mapped to
string -> ID mapping, for each string in the each set.
There are three steps to getting the ids for strings:
0. ids from static strings (StaticStringIndexer)
1. ids from cache (CachingIndexer)
2. ids from existing db records (postgres/spanner)
3. ids that have been rate limited (postgres/spanner)
4. ids from newly created db records (postgres/spanner)
Each step will start off with a UseCaseKeyCollection and UseCaseKeyResults:
keys = UseCaseKeyCollection(mapping)
results = UseCaseKeyResults()
Then the work to get the ids (either from cache, db, etc)
.... # work to add results to UseCaseKeyResults()
Those results will be added to `mapped_results` which can
be retrieved
results.get_mapped_results()
Remaining unmapped keys get turned into a new
UseCaseKeyCollection for the next step:
new_keys = results.get_unmapped_keys(mapping)
When the last step is reached or a step resolves all the remaining
unmapped keys the key_results objects are merged and returned:
e.g. return cache_key_results.merge(db_read_key_results)
"""
raise NotImplementedError()
def record(self, use_case_id: UseCaseID, org_id: int, string: str) -> int | None:
"""Store a string and return the integer ID generated for it
With every call to this method, the lifetime of the entry will be
prolonged.
"""
raise NotImplementedError()
@metric_path_key_compatible_resolve
def resolve(self, use_case_id: UseCaseID, org_id: int, string: str) -> int | None:
"""Lookup the integer ID for a string.
Does not affect the lifetime of the entry.
This function is backwards compatible with UseCaseKey while call sites that still uses
UseCaseKey are being cleaned up, but callers should always use UseCaseID from now on.
Returns None if the entry cannot be found.
"""
raise NotImplementedError()
@metric_path_key_compatible_rev_resolve
def reverse_resolve(self, use_case_id: UseCaseID, org_id: int, id: int) -> str | None:
"""Lookup the stored string for a given integer ID.
This function is backwards compatible with UseCaseKey while call sites that still uses
UseCaseKey are being cleaned up, but callers should always use UseCaseID from now on.
Returns None if the entry cannot be found.
"""
raise NotImplementedError()
def bulk_reverse_resolve(
self, use_case_id: UseCaseID, org_id: int, ids: Collection[int]
) -> Mapping[int, str]:
"""Lookup the stored strings for multiple ids belonging to the same use-case and organization.
Returns a mapping between the ids and their corresponding string values.
If an id can't be mapped the return value will not contain any entry for this id.
"""
raise NotImplementedError()
def resolve_shared_org(self, string: str) -> int | None:
"""
Look up the index for a shared (cross organization) string.
Typically, this function will only lookup strings that are statically defined but
regardless of the mechanism these are strings that are not organization or use-case specific.
"""
raise NotImplementedError()
def reverse_shared_org_resolve(self, id: int) -> str | None:
"""Lookup the stored string given integer for a shared (cross organization) ID.
Returns None if the entry cannot be found.
"""
raise NotImplementedError()
| StringIndexer |
python | spack__spack | lib/spack/spack/mirrors/layout.py | {
"start": 812,
"end": 2242
} | class ____(MirrorLayout):
def __init__(self, alias_path: str, digest_path: Optional[str] = None) -> None:
# When we have a digest, it is used as the primary storage location. If not, then we use
# the human-readable alias. In case of mirrors of a VCS checkout, we currently do not have
# a digest, that's why an alias is required and a digest optional.
super().__init__(path=digest_path or alias_path)
self.alias = alias_path
self.digest_path = digest_path
def make_alias(self, root: str) -> None:
"""Symlink a human readable path in our mirror to the actual storage location."""
# We already use the human-readable path as the main storage location.
if not self.digest_path:
return
alias, digest = os.path.join(root, self.alias), os.path.join(root, self.digest_path)
alias_dir = os.path.dirname(alias)
relative_dst = os.path.relpath(digest, start=alias_dir)
mkdirp(alias_dir)
tmp = f"{alias}.tmp"
symlink(relative_dst, tmp)
try:
os.rename(tmp, alias)
except OSError:
# Clean up the temporary if possible
try:
os.unlink(tmp)
except OSError:
pass
raise
def __iter__(self):
if self.digest_path:
yield self.digest_path
yield self.alias
| DefaultLayout |
python | encode__django-rest-framework | tests/test_middleware.py | {
"start": 678,
"end": 789
} | class ____(APIView):
def post(self, request):
return Response(data=request.data, status=200)
| PostView |
python | apache__airflow | providers/databricks/src/airflow/providers/databricks/operators/databricks.py | {
"start": 16646,
"end": 29928
} | class ____(BaseOperator):
"""
Submits a Spark job run to Databricks using the api/2.1/jobs/runs/submit API endpoint.
See: https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunsSubmit
There are three ways to instantiate this operator.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DatabricksSubmitRunOperator`
:param tasks: Array of Objects(RunSubmitTaskSettings) <= 100 items.
.. seealso::
https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunsSubmit
:param json: A JSON object containing API parameters which will be passed
directly to the ``api/2.1/jobs/runs/submit`` endpoint. The other named parameters
(i.e. ``spark_jar_task``, ``notebook_task``..) to this operator will
be merged with this json dictionary if they are provided.
If there are conflicts during the merge, the named parameters will
take precedence and override the top level json keys. (templated)
.. seealso::
For more information about templating see :ref:`concepts:jinja-templating`.
https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunsSubmit
:param spark_jar_task: The main class and parameters for the JAR task. Note that
the actual JAR is specified in the ``libraries``.
*EITHER* ``spark_jar_task`` *OR* ``notebook_task`` *OR* ``spark_python_task``
*OR* ``spark_submit_task`` *OR* ``pipeline_task`` *OR* ``dbt_task`` should be specified.
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/2.0/jobs.html#jobssparkjartask
:param notebook_task: The notebook path and parameters for the notebook task.
*EITHER* ``spark_jar_task`` *OR* ``notebook_task`` *OR* ``spark_python_task``
*OR* ``spark_submit_task`` *OR* ``pipeline_task`` *OR* ``dbt_task`` should be specified.
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/2.0/jobs.html#jobsnotebooktask
:param spark_python_task: The python file path and parameters to run the python file with.
*EITHER* ``spark_jar_task`` *OR* ``notebook_task`` *OR* ``spark_python_task``
*OR* ``spark_submit_task`` *OR* ``pipeline_task`` *OR* ``dbt_task`` should be specified.
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/2.0/jobs.html#jobssparkpythontask
:param spark_submit_task: Parameters needed to run a spark-submit command.
*EITHER* ``spark_jar_task`` *OR* ``notebook_task`` *OR* ``spark_python_task``
*OR* ``spark_submit_task`` *OR* ``pipeline_task`` *OR* ``dbt_task`` should be specified.
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/2.0/jobs.html#jobssparksubmittask
:param pipeline_task: Parameters needed to execute a Delta Live Tables pipeline task.
The provided dictionary must contain at least ``pipeline_id`` field!
*EITHER* ``spark_jar_task`` *OR* ``notebook_task`` *OR* ``spark_python_task``
*OR* ``spark_submit_task`` *OR* ``pipeline_task`` *OR* ``dbt_task`` should be specified.
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/2.0/jobs.html#jobspipelinetask
:param dbt_task: Parameters needed to execute a dbt task.
The provided dictionary must contain at least the ``commands`` field and the
``git_source`` parameter also needs to be set.
*EITHER* ``spark_jar_task`` *OR* ``notebook_task`` *OR* ``spark_python_task``
*OR* ``spark_submit_task`` *OR* ``pipeline_task`` *OR* ``dbt_task`` should be specified.
This field will be templated.
:param new_cluster: Specs for a new cluster on which this task will be run.
*EITHER* ``new_cluster`` *OR* ``existing_cluster_id`` should be specified
(except when ``pipeline_task`` is used).
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/2.0/jobs.html#jobsclusterspecnewcluster
:param existing_cluster_id: ID for existing cluster on which to run this task.
*EITHER* ``new_cluster`` *OR* ``existing_cluster_id`` should be specified
(except when ``pipeline_task`` is used).
This field will be templated.
:param libraries: Libraries which this run will use.
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/2.0/jobs.html#managedlibrarieslibrary
:param run_name: The run name used for this task.
By default this will be set to the Airflow ``task_id``. This ``task_id`` is a
required parameter of the superclass ``BaseOperator``.
This field will be templated.
:param idempotency_token: an optional token that can be used to guarantee the idempotency of job run
requests. If a run with the provided token already exists, the request does not create a new run but
returns the ID of the existing run instead. This token must have at most 64 characters.
:param access_control_list: optional list of dictionaries representing Access Control List (ACL) for
a given job run. Each dictionary consists of following field - specific subject (``user_name`` for
users, or ``group_name`` for groups), and ``permission_level`` for that subject. See Jobs API
documentation for more details.
:param wait_for_termination: if we should wait for termination of the job run. ``True`` by default.
:param timeout_seconds: The timeout for this run. By default a value of 0 is used
which means to have no timeout.
This field will be templated.
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
By default and in the common case this will be ``databricks_default``. To use
token based authentication, provide the key ``token`` in the extra field for the
connection and create the key ``host`` and leave the ``host`` field empty. (templated)
:param polling_period_seconds: Controls the rate which we poll for the result of
this run. By default the operator will poll every 30 seconds.
:param databricks_retry_limit: Amount of times retry if the Databricks backend is
unreachable. Its value must be greater than or equal to 1.
:param databricks_retry_delay: Number of seconds to wait between retries (it
might be a floating point number).
:param databricks_retry_args: An optional dictionary with arguments passed to ``tenacity.Retrying`` class.
:param do_xcom_push: Whether we should push run_id and run_page_url to xcom.
:param git_source: Optional specification of a remote git repository from which
supported task types are retrieved.
:param deferrable: Run operator in the deferrable mode.
.. seealso::
https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunsSubmit
"""
# Used in airflow.models.BaseOperator
template_fields: Sequence[str] = ("json", "databricks_conn_id")
template_ext: Sequence[str] = (".json-tpl",)
# Databricks brand color (blue) under white text
ui_color = "#1CB1C2"
ui_fgcolor = "#fff"
operator_extra_links = (DatabricksJobRunLink(),)
def __init__(
self,
*,
json: Any | None = None,
tasks: list[object] | None = None,
spark_jar_task: dict[str, str] | None = None,
notebook_task: dict[str, str] | None = None,
spark_python_task: dict[str, str | list[str]] | None = None,
spark_submit_task: dict[str, list[str]] | None = None,
pipeline_task: dict[str, str] | None = None,
dbt_task: dict[str, str | list[str]] | None = None,
new_cluster: dict[str, object] | None = None,
existing_cluster_id: str | None = None,
libraries: list[dict[str, Any]] | None = None,
run_name: str | None = None,
timeout_seconds: int | None = None,
databricks_conn_id: str = "databricks_default",
polling_period_seconds: int = 30,
databricks_retry_limit: int = 3,
databricks_retry_delay: int = 1,
databricks_retry_args: dict[Any, Any] | None = None,
do_xcom_push: bool = True,
idempotency_token: str | None = None,
access_control_list: list[dict[str, str]] | None = None,
wait_for_termination: bool = True,
git_source: dict[str, str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
"""Create a new ``DatabricksSubmitRunOperator``."""
super().__init__(**kwargs)
self.json = json or {}
self.databricks_conn_id = databricks_conn_id
self.polling_period_seconds = polling_period_seconds
self.databricks_retry_limit = databricks_retry_limit
self.databricks_retry_delay = databricks_retry_delay
self.databricks_retry_args = databricks_retry_args
self.wait_for_termination = wait_for_termination
self.deferrable = deferrable
if tasks is not None:
self.json["tasks"] = tasks
if spark_jar_task is not None:
self.json["spark_jar_task"] = spark_jar_task
if notebook_task is not None:
self.json["notebook_task"] = notebook_task
if spark_python_task is not None:
self.json["spark_python_task"] = spark_python_task
if spark_submit_task is not None:
self.json["spark_submit_task"] = spark_submit_task
if pipeline_task is not None:
self.json["pipeline_task"] = pipeline_task
if dbt_task is not None:
self.json["dbt_task"] = dbt_task
if new_cluster is not None:
self.json["new_cluster"] = new_cluster
if existing_cluster_id is not None:
self.json["existing_cluster_id"] = existing_cluster_id
if libraries is not None:
self.json["libraries"] = libraries
if run_name is not None:
self.json["run_name"] = run_name
if timeout_seconds is not None:
self.json["timeout_seconds"] = timeout_seconds
if "run_name" not in self.json:
self.json["run_name"] = run_name or kwargs["task_id"]
if idempotency_token is not None:
self.json["idempotency_token"] = idempotency_token
if access_control_list is not None:
self.json["access_control_list"] = access_control_list
if git_source is not None:
self.json["git_source"] = git_source
if "dbt_task" in self.json and "git_source" not in self.json:
raise AirflowException("git_source is required for dbt_task")
if pipeline_task is not None and "pipeline_id" in pipeline_task and "pipeline_name" in pipeline_task:
raise AirflowException("'pipeline_name' is not allowed in conjunction with 'pipeline_id'")
# This variable will be used in case our task gets killed.
self.run_id: int | None = None
self.do_xcom_push = do_xcom_push
@cached_property
def _hook(self):
return self._get_hook(caller="DatabricksSubmitRunOperator")
def _get_hook(self, caller: str) -> DatabricksHook:
return DatabricksHook(
self.databricks_conn_id,
retry_limit=self.databricks_retry_limit,
retry_delay=self.databricks_retry_delay,
retry_args=self.databricks_retry_args,
caller=caller,
)
def execute(self, context: Context):
if (
"pipeline_task" in self.json
and self.json["pipeline_task"].get("pipeline_id") is None
and self.json["pipeline_task"].get("pipeline_name")
):
# If pipeline_id is not provided, we need to fetch it from the pipeline_name
pipeline_name = self.json["pipeline_task"]["pipeline_name"]
self.json["pipeline_task"]["pipeline_id"] = self._hook.find_pipeline_id_by_name(pipeline_name)
del self.json["pipeline_task"]["pipeline_name"]
json_normalised = normalise_json_content(self.json)
self.run_id = self._hook.submit_run(json_normalised)
if self.deferrable:
_handle_deferrable_databricks_operator_execution(self, self._hook, self.log, context)
else:
_handle_databricks_operator_execution(self, self._hook, self.log, context)
def on_kill(self):
if self.run_id:
self._hook.cancel_run(self.run_id)
self.log.info(
"Task: %s with run_id: %s was requested to be cancelled.", self.task_id, self.run_id
)
else:
self.log.error("Error: Task: %s with invalid run_id was requested to be cancelled.", self.task_id)
def execute_complete(self, context: dict | None, event: dict):
_handle_deferrable_databricks_operator_completion(event, self.log)
| DatabricksSubmitRunOperator |
python | mlflow__mlflow | dev/remove_experimental_decorators.py | {
"start": 347,
"end": 6146
} | class ____:
version: str
line_number: int
end_line_number: int
column: int
age_days: int
content: str
def get_tracked_python_files() -> list[Path]:
"""Get all tracked Python files in the repository."""
result = subprocess.check_output(["git", "ls-files", "*.py"], text=True)
return [Path(f) for f in result.strip().split("\n") if f]
def get_mlflow_release_dates() -> dict[str, datetime]:
"""Fetch MLflow release dates from PyPI API."""
with urlopen("https://pypi.org/pypi/mlflow/json") as response:
data = json.loads(response.read().decode())
release_dates: dict[str, datetime] = {}
for version, releases in data["releases"].items():
if releases: # Some versions might have empty release lists
# Get the earliest release date for this version
upload_times: list[str] = [r["upload_time"] for r in releases if "upload_time" in r]
if upload_times:
earliest_time = min(upload_times)
# Parse ISO format datetime and convert to UTC
release_date = datetime.fromisoformat(earliest_time.replace("Z", "+00:00"))
if release_date.tzinfo is None:
release_date = release_date.replace(tzinfo=timezone.utc)
release_dates[version] = release_date
return release_dates
def find_experimental_decorators(
file_path: Path, release_dates: dict[str, datetime], now: datetime
) -> list[ExperimentalDecorator]:
"""
Find all @experimental decorators in a Python file using AST and return their information
with computed age.
"""
content = file_path.read_text()
tree = ast.parse(content)
decorators: list[ExperimentalDecorator] = []
for node in ast.walk(tree):
if not isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
continue
for decorator in node.decorator_list:
if not isinstance(decorator, ast.Call):
continue
if not (isinstance(decorator.func, ast.Name) and decorator.func.id == "experimental"):
continue
version = _extract_version_from_ast_decorator(decorator)
if not version or version not in release_dates:
continue
release_date = release_dates[version]
age_days = (now - release_date).days
decorator_info = ExperimentalDecorator(
version=version,
line_number=decorator.lineno,
end_line_number=decorator.end_lineno or decorator.lineno,
column=decorator.col_offset + 1, # 1-indexed
age_days=age_days,
content=ast.unparse(decorator),
)
decorators.append(decorator_info)
return decorators
def _extract_version_from_ast_decorator(decorator: ast.Call) -> str | None:
"""Extract version string from AST decorator node."""
for keyword in decorator.keywords:
if keyword.arg == "version" and isinstance(keyword.value, ast.Constant):
return str(keyword.value.value)
return None
def remove_decorators_from_file(
file_path: Path,
decorators_to_remove: list[ExperimentalDecorator],
dry_run: bool,
) -> list[ExperimentalDecorator]:
if not decorators_to_remove:
return []
lines = file_path.read_text().splitlines(keepends=True)
# Create a set of line numbers to remove for quick lookup (handle ranges)
lines_to_remove: set[int] = set()
for decorator in decorators_to_remove:
lines_to_remove.update(range(decorator.line_number, decorator.end_line_number + 1))
new_lines: list[str] = []
for line_num, line in enumerate(lines, 1):
if line_num not in lines_to_remove:
new_lines.append(line)
if not dry_run:
file_path.write_text("".join(new_lines))
return decorators_to_remove
def main() -> None:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Remove @experimental decorators older than 6 months"
)
parser.add_argument(
"--dry-run", action="store_true", help="Show what would be removed without making changes"
)
parser.add_argument(
"files", nargs="*", help="Python files to process (defaults to all tracked Python files)"
)
args = parser.parse_args()
release_dates = get_mlflow_release_dates()
# Calculate cutoff date (6 months ago from now)
now = datetime.now(timezone.utc)
cutoff_date = now - timedelta(days=6 * 30) # Approximate 6 months
print(f"Cutoff date: {cutoff_date.strftime('%Y-%m-%d %H:%M:%S UTC')}")
python_files = [Path(f) for f in args.files] if args.files else get_tracked_python_files()
for file_path in python_files:
if not file_path.exists():
continue
# First, find all experimental decorators in the file with computed ages
decorators = find_experimental_decorators(file_path, release_dates, now)
if not decorators:
continue
# Filter to only decorators that should be removed (older than 6 months)
old_decorators = [d for d in decorators if d.age_days > 6 * 30] # 6 months approx
if not old_decorators:
continue
# Remove old decorators
if removed := remove_decorators_from_file(file_path, old_decorators, args.dry_run):
for decorator in removed:
action = "Would remove" if args.dry_run else "Removed"
print(
f"{file_path}:{decorator.line_number}:{decorator.column}: "
f"{action} {decorator.content} (age: {decorator.age_days} days)"
)
if __name__ == "__main__":
main()
| ExperimentalDecorator |
python | wandb__wandb | wandb/vendor/pygments/lexers/tcl.py | {
"start": 450,
"end": 5398
} | class ____(RegexLexer):
"""
For Tcl source code.
.. versionadded:: 0.10
"""
keyword_cmds_re = words((
'after', 'apply', 'array', 'break', 'catch', 'continue', 'elseif', 'else', 'error',
'eval', 'expr', 'for', 'foreach', 'global', 'if', 'namespace', 'proc', 'rename', 'return',
'set', 'switch', 'then', 'trace', 'unset', 'update', 'uplevel', 'upvar', 'variable',
'vwait', 'while'), prefix=r'\b', suffix=r'\b')
builtin_cmds_re = words((
'append', 'bgerror', 'binary', 'cd', 'chan', 'clock', 'close', 'concat', 'dde', 'dict',
'encoding', 'eof', 'exec', 'exit', 'fblocked', 'fconfigure', 'fcopy', 'file',
'fileevent', 'flush', 'format', 'gets', 'glob', 'history', 'http', 'incr', 'info', 'interp',
'join', 'lappend', 'lassign', 'lindex', 'linsert', 'list', 'llength', 'load', 'loadTk',
'lrange', 'lrepeat', 'lreplace', 'lreverse', 'lsearch', 'lset', 'lsort', 'mathfunc',
'mathop', 'memory', 'msgcat', 'open', 'package', 'pid', 'pkg::create', 'pkg_mkIndex',
'platform', 'platform::shell', 'puts', 'pwd', 're_syntax', 'read', 'refchan',
'regexp', 'registry', 'regsub', 'scan', 'seek', 'socket', 'source', 'split', 'string',
'subst', 'tell', 'time', 'tm', 'unknown', 'unload'), prefix=r'\b', suffix=r'\b')
name = 'Tcl'
aliases = ['tcl']
filenames = ['*.tcl', '*.rvt']
mimetypes = ['text/x-tcl', 'text/x-script.tcl', 'application/x-tcl']
def _gen_command_rules(keyword_cmds_re, builtin_cmds_re, context=""):
return [
(keyword_cmds_re, Keyword, 'params' + context),
(builtin_cmds_re, Name.Builtin, 'params' + context),
(r'([\w.-]+)', Name.Variable, 'params' + context),
(r'#', Comment, 'comment'),
]
tokens = {
'root': [
include('command'),
include('basic'),
include('data'),
(r'\}', Keyword), # HACK: somehow we miscounted our braces
],
'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re),
'command-in-brace': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-brace"),
'command-in-bracket': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-bracket"),
'command-in-paren': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-paren"),
'basic': [
(r'\(', Keyword, 'paren'),
(r'\[', Keyword, 'bracket'),
(r'\{', Keyword, 'brace'),
(r'"', String.Double, 'string'),
(r'(eq|ne|in|ni)\b', Operator.Word),
(r'!=|==|<<|>>|<=|>=|&&|\|\||\*\*|[-+~!*/%<>&^|?:]', Operator),
],
'data': [
(r'\s+', Text),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'0[0-7]+', Number.Oct),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\$([\w.:-]+)', Name.Variable),
(r'([\w.:-]+)', Text),
],
'params': [
(r';', Keyword, '#pop'),
(r'\n', Text, '#pop'),
(r'(else|elseif|then)\b', Keyword),
include('basic'),
include('data'),
],
'params-in-brace': [
(r'\}', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-paren': [
(r'\)', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-bracket': [
(r'\]', Keyword, ('#pop', '#pop')),
include('params')
],
'string': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\])', String.Double),
(r'"', String.Double, '#pop')
],
'string-square': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|\\\n|[^\]\\])', String.Double),
(r'\]', String.Double, '#pop')
],
'brace': [
(r'\}', Keyword, '#pop'),
include('command-in-brace'),
include('basic'),
include('data'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('command-in-paren'),
include('basic'),
include('data'),
],
'bracket': [
(r'\]', Keyword, '#pop'),
include('command-in-bracket'),
include('basic'),
include('data'),
],
'comment': [
(r'.*[^\\]\n', Comment, '#pop'),
(r'.*\\\n', Comment),
],
}
def analyse_text(text):
return shebang_matches(text, r'(tcl)')
| TclLexer |
python | langchain-ai__langchain | libs/partners/fireworks/tests/unit_tests/test_standard.py | {
"start": 284,
"end": 1361
} | class ____(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatFireworks
@property
def chat_model_params(self) -> dict:
return {
"model": "accounts/fireworks/models/llama-v3p1-70b-instruct",
"api_key": "test_api_key",
}
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
return (
{
"FIREWORKS_API_KEY": "api_key",
"FIREWORKS_API_BASE": "https://base.com",
},
{
"model": "accounts/fireworks/models/llama-v3p1-70b-instruct",
},
{
"fireworks_api_key": "api_key",
"fireworks_api_base": "https://base.com",
},
)
def test_profile() -> None:
"""Test that model profile is loaded correctly."""
model = ChatFireworks(
model="accounts/fireworks/models/gpt-oss-20b",
api_key="test_key", # type: ignore[arg-type]
)
assert model.profile
| TestFireworksStandard |
python | conda__conda | conda/common/configuration.py | {
"start": 49915,
"end": 64278
} | class ____(metaclass=ConfigurationType):
def __init__(self, search_path=(), app_name=None, argparse_args=None, **kwargs):
# Currently, __init__ does a **full** disk reload of all files.
# A future improvement would be to cache files that are already loaded.
self.raw_data = {}
self._cache_ = {}
self._reset_callbacks = IndexedSet()
self._validation_errors = defaultdict(list)
self._set_search_path(search_path, **kwargs)
self._set_env_vars(app_name)
self._set_argparse_args(argparse_args)
@classmethod
def _set_parameter_names_and_aliases(cls):
"""Build parameter_names_and_aliases from the class's parameter loaders."""
cls.parameter_names_and_aliases = tuple(
alias_name
for p in cls._parameter_loaders.values()
for alias_name in (p._names or ())
)
@staticmethod
def _expand_search_path(
search_path: PathsType,
**kwargs,
) -> Iterable[Path]:
for search in search_path:
# use custom_expandvars instead of os.path.expandvars so additional variables can be
# passed in without mutating os.environ
if isinstance(search, Path):
path = search
else:
template = custom_expandvars(str(search), environ, **kwargs)
path = Path(template).expanduser()
if path.is_file() and (
path.name in CONDARC_FILENAMES or path.suffix in YAML_EXTENSIONS
):
yield path
elif path.is_dir():
yield from (
subpath
for subpath in sorted(path.iterdir())
if subpath.is_file() and subpath.suffix in YAML_EXTENSIONS
)
@classmethod
def _load_search_path(
cls,
search_path: Iterable[Path],
) -> Iterable[tuple[Path, dict]]:
for path in search_path:
try:
yield path, YamlRawParameter.make_raw_parameters_from_file(path)
except ConfigurationLoadError as err:
log.warning(
"Ignoring configuration file (%s) due to error:\n%s",
path,
err,
)
def _set_search_path(self, search_path: PathsType, **kwargs):
self._search_path = IndexedSet(self._expand_search_path(search_path, **kwargs))
self._set_raw_data(dict(self._load_search_path(self._search_path)))
self._reset_cache()
return self
def _set_env_vars(self, app_name=None):
self._app_name = app_name
# remove existing source so "insert" order is correct
source = EnvRawParameter.source
if source in self.raw_data:
del self.raw_data[source]
if app_name:
self.raw_data[source] = EnvRawParameter.make_raw_parameters(app_name)
self._reset_cache()
return self
def _set_argparse_args(self, argparse_args):
# the argparse_args we store internally in this class as self._argparse_args
# will be a mapping type, not a non-`dict` object like argparse_args is natively
if hasattr(argparse_args, "__dict__"):
# the argparse_args from argparse will be an object with a __dict__ attribute
# and not a mapping type like this method will turn it into
items = vars(argparse_args).items()
elif not argparse_args:
# argparse_args can be initialized as `None`
items = ()
else:
# we're calling this method with argparse_args that are a mapping type, likely
# already having been processed by this method before
items = argparse_args.items()
self._argparse_args = argparse_args = AttrDict(
{k: v for k, v in items if v is not NULL}
)
# remove existing source so "insert" order is correct
source = ArgParseRawParameter.source
if source in self.raw_data:
del self.raw_data[source]
self.raw_data[source] = ArgParseRawParameter.make_raw_parameters(argparse_args)
self._reset_cache()
return self
def _set_raw_data(self, raw_data: Mapping[Hashable, dict]):
self.raw_data.update(raw_data)
self._reset_cache()
return self
def name_for_alias(self, alias: str, ignore_private: bool = True) -> str | None:
"""
Find the canonical parameter name for a given alias.
This method searches through all configuration parameters to find the canonical
parameter name that corresponds to the given alias. It's useful for resolving
parameter aliases to their primary names in configuration contexts.
Args:
alias (str): The parameter alias to look up.
ignore_private (bool, optional): If True (default), exclude private parameters
(those starting with underscore) from the search. If False, include all
parameters regardless of privacy.
Returns:
str | None: The canonical parameter name if the alias is found, otherwise None.
Example:
>>> config = Configuration()
>>> config.name_for_alias("channel_priority")
'channel_priority'
>>> config.name_for_alias("unknown_alias")
None
"""
return next(
(
p._name
for p in self._parameter_loaders.values()
if alias in p.aliases
and (not ignore_private or not p._name.startswith("_"))
),
None,
)
def _get_parameter_loader(self, parameter_name):
"""Get parameter loader with fallback for missing parameters."""
loaders = self._parameter_loaders
if parameter_name in loaders:
return loaders[parameter_name]
# Try with underscore prefix for private parameters
private_name = "_" + parameter_name
if private_name in loaders:
return loaders[private_name]
# Last resort: search through __dict__ for any ParameterLoader
for name, param in self.__class__.__dict__.items():
if isinstance(param, ParameterLoader) and param._name == parameter_name:
return param
if isinstance(param, ParameterLoader) and parameter_name in getattr(
param, "_names", ()
):
return param
return None
def _reset_cache(self):
self._cache_ = {}
for callback in self._reset_callbacks:
callback()
return self
def register_reset_callaback(self, callback):
self._reset_callbacks.add(callback)
def check_source(self, source):
# this method ends up duplicating much of the logic of Parameter.__get__
# I haven't yet found a way to make it more DRY though
typed_values = {}
validation_errors = []
raw_parameters = self.raw_data[source]
for key in self.parameter_names:
parameter = self._get_parameter_loader(key)
if parameter is None:
continue # Skip parameters that can't be found
match, multikey_error = parameter._raw_parameters_from_single_source(
raw_parameters
)
if multikey_error:
validation_errors.append(multikey_error)
if match is not None:
loaded_parameter = parameter.type.load(key, match)
# untyped_value = loaded_parameter.value
# if untyped_value is None:
# if isinstance(parameter, SequenceLoadedParameter):
# untyped_value = ()
# elif isinstance(parameter, MapLoadedParameter):
# untyped_value = {}
try:
typed_value = loaded_parameter.typify(match.source)
except CustomValidationError as e:
validation_errors.append(e)
else:
collected_errors = loaded_parameter.collect_errors(
self, typed_value, match.source
)
if collected_errors:
validation_errors.extend(collected_errors)
else:
typed_values[match.key] = typed_value
else:
# this situation will happen if there is a multikey_error and none of the
# matched keys is the primary key
pass
return typed_values, validation_errors
def validate_all(self):
validation_errors = list(
chain.from_iterable(
self.check_source(source)[1] for source in self.raw_data
)
)
raise_errors(validation_errors)
self.validate_configuration()
@staticmethod
def _collect_validation_error(func, *args, **kwargs):
try:
func(*args, **kwargs)
except ConfigurationError as e:
return (e.errors if hasattr(e, "errors") else e,)
return ()
def validate_configuration(self):
errors = chain.from_iterable(
Configuration._collect_validation_error(getattr, self, name)
for name in self.parameter_names
)
post_errors = self.post_build_validation()
raise_errors(tuple(chain.from_iterable((errors, post_errors))))
def post_build_validation(self):
return ()
def collect_all(self) -> dict[str | Path, dict]:
typed_values = {}
validation_errors = {}
for source in self.raw_data:
typed_values[source], validation_errors[source] = self.check_source(source)
raise_errors(tuple(chain.from_iterable(validation_errors.values())))
return {k: v for k, v in typed_values.items() if v}
def describe_parameter(self, parameter_name):
# TODO, in Parameter base class, rename element_type to value_type
if parameter_name not in self.parameter_names:
parameter_name = "_" + parameter_name
parameter_loader = self._get_parameter_loader(parameter_name)
if parameter_loader is None:
raise KeyError(parameter_name)
parameter = parameter_loader.type
if not isinstance(parameter, Parameter):
raise TypeError(
f"Name '{parameter_name}' did not return a Parameter object."
)
# dedupe leading underscore from name
name = parameter_loader.name.lstrip("_")
aliases = tuple(alias for alias in parameter_loader.aliases if alias != name)
description = self.get_descriptions().get(name, "")
et = parameter._element_type
if type(et) == EnumMeta: # noqa: E721
et = [et]
if not isiterable(et):
et = [et]
if isinstance(parameter._element_type, Parameter) or isinstance(
parameter._element_type, ConfigurationObject
):
element_types = tuple(
_et.__class__.__name__.lower().replace("parameter", "") for _et in et
)
else:
element_types = tuple(_et.__name__ for _et in et)
details = {
"parameter_type": parameter.__class__.__name__.lower().replace(
"parameter", ""
),
"name": name,
"aliases": aliases,
"element_types": element_types,
"default_value": parameter.default.typify("<<describe>>"),
"description": description.replace("\n", " ").strip(),
}
if isinstance(parameter, SequenceParameter):
details["string_delimiter"] = parameter.string_delimiter
return details
def list_parameters(self, aliases: bool = False):
if aliases:
return tuple(
dict.fromkeys(
name for p in self._parameter_loaders.values() for name in p._names
)
)
return tuple(sorted(name.lstrip("_") for name in self.parameter_names))
def typify_parameter(self, parameter_name, value, source):
# return a tuple with correct parameter name and typed-value
if parameter_name not in self.parameter_names:
parameter_name = "_" + parameter_name
parameter_loader = self._get_parameter_loader(parameter_name)
if parameter_loader is None:
raise KeyError(parameter_name)
parameter = parameter_loader.type
if not isinstance(parameter, Parameter):
raise TypeError(
f"Name '{parameter_name}' did not return a Parameter object."
)
return parameter.typify(parameter_name, source, value)
def get_descriptions(self):
raise NotImplementedError()
def unique_sequence_map(*, unique_key: str):
"""
Used to validate properties on :class:`Configuration` subclasses defined as a
``SequenceParameter(MapParameter())`` where the map contains a single key that
should be regarded as unique. This decorator will handle removing duplicates and
merging to a single sequence.
"""
def inner_wrap(func):
@wraps(func)
def wrapper(*args, **kwargs):
sequence_map = func(*args, **kwargs)
new_sequence_mapping = {}
for mapping in sequence_map:
unique_key_value = mapping.get(unique_key)
if unique_key_value is None:
log.error(
f'Configuration: skipping {mapping} for "{func.__name__}"; unique key '
f'"{unique_key}" not present on mapping'
)
continue
if unique_key_value in new_sequence_mapping:
log.error(
f'Configuration: skipping {mapping} for "{func.__name__}"; value '
f'"{unique_key_value}" already present'
)
continue
new_sequence_mapping[unique_key_value] = mapping
return tuple(new_sequence_mapping.values())
return wrapper
return inner_wrap
| Configuration |
python | apache__thrift | lib/py/src/transport/THeaderTransport.py | {
"start": 1564,
"end": 2081
} | class ____(object):
ZLIB = 0x01
READ_TRANSFORMS_BY_ID = {
THeaderTransformID.ZLIB: zlib.decompress,
}
WRITE_TRANSFORMS_BY_ID = {
THeaderTransformID.ZLIB: zlib.compress,
}
def _readString(trans):
size = readVarint(trans)
if size < 0:
raise TTransportException(
TTransportException.NEGATIVE_SIZE,
"Negative length"
)
return trans.read(size)
def _writeString(trans, value):
writeVarint(trans, len(value))
trans.write(value)
| THeaderTransformID |
python | huggingface__transformers | src/transformers/models/efficientnet/modeling_efficientnet.py | {
"start": 4832,
"end": 6192
} | class ____(nn.Module):
r"""
This corresponds to the depthwise convolution phase of each block in the original implementation.
"""
def __init__(
self,
config: EfficientNetConfig,
in_dim: int,
stride: int,
kernel_size: int,
adjust_padding: bool,
):
super().__init__()
self.stride = stride
conv_pad = "valid" if self.stride == 2 else "same"
padding = correct_pad(kernel_size, adjust=adjust_padding)
self.depthwise_conv_pad = nn.ZeroPad2d(padding=padding)
self.depthwise_conv = EfficientNetDepthwiseConv2d(
in_dim, kernel_size=kernel_size, stride=stride, padding=conv_pad, bias=False
)
self.depthwise_norm = nn.BatchNorm2d(
num_features=in_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum
)
self.depthwise_act = ACT2FN[config.hidden_act]
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
# Depthwise convolution
if self.stride == 2:
hidden_states = self.depthwise_conv_pad(hidden_states)
hidden_states = self.depthwise_conv(hidden_states)
hidden_states = self.depthwise_norm(hidden_states)
hidden_states = self.depthwise_act(hidden_states)
return hidden_states
| EfficientNetDepthwiseLayer |
python | Netflix__metaflow | metaflow/_vendor/click/core.py | {
"start": 4679,
"end": 23695
} | class ____(object):
"""The context is a special internal object that holds state relevant
for the script execution at every single level. It's normally invisible
to commands unless they opt-in to getting access to it.
The context is useful as it can pass internal objects around and can
control special execution features such as reading data from
environment variables.
A context can be used as context manager in which case it will call
:meth:`close` on teardown.
.. versionadded:: 2.0
Added the `resilient_parsing`, `help_option_names`,
`token_normalize_func` parameters.
.. versionadded:: 3.0
Added the `allow_extra_args` and `allow_interspersed_args`
parameters.
.. versionadded:: 4.0
Added the `color`, `ignore_unknown_options`, and
`max_content_width` parameters.
.. versionadded:: 7.1
Added the `show_default` parameter.
:param command: the command class for this context.
:param parent: the parent context.
:param info_name: the info name for this invocation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it is usually
the name of the script, for commands below it it's
the name of the script.
:param obj: an arbitrary object of user data.
:param auto_envvar_prefix: the prefix to use for automatic environment
variables. If this is `None` then reading
from environment variables is disabled. This
does not affect manually set environment
variables which are always read.
:param default_map: a dictionary (like object) with default values
for parameters.
:param terminal_width: the width of the terminal. The default is
inherit from parent context. If no context
defines the terminal width then auto
detection will be applied.
:param max_content_width: the maximum width for content rendered by
Click (this currently only affects help
pages). This defaults to 80 characters if
not overridden. In other words: even if the
terminal is larger than that, Click will not
format things wider than 80 characters by
default. In addition to that, formatters might
add some safety mapping on the right.
:param resilient_parsing: if this flag is enabled then Click will
parse without any interactivity or callback
invocation. Default values will also be
ignored. This is useful for implementing
things such as completion support.
:param allow_extra_args: if this is set to `True` then extra arguments
at the end will not raise an error and will be
kept on the context. The default is to inherit
from the command.
:param allow_interspersed_args: if this is set to `False` then options
and arguments cannot be mixed. The
default is to inherit from the command.
:param ignore_unknown_options: instructs click to ignore options it does
not know and keeps them for later
processing.
:param help_option_names: optionally a list of strings that define how
the default help parameter is named. The
default is ``['--help']``.
:param token_normalize_func: an optional function that is used to
normalize tokens (options, choices,
etc.). This for instance can be used to
implement case insensitive behavior.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are used in texts that Click prints which is by
default not the case. This for instance would affect
help output.
:param show_default: if True, shows defaults for all options.
Even if an option is later created with show_default=False,
this command-level setting overrides it.
"""
def __init__(
self,
command,
parent=None,
info_name=None,
obj=None,
auto_envvar_prefix=None,
default_map=None,
terminal_width=None,
max_content_width=None,
resilient_parsing=False,
allow_extra_args=None,
allow_interspersed_args=None,
ignore_unknown_options=None,
help_option_names=None,
token_normalize_func=None,
color=None,
show_default=None,
):
#: the parent context or `None` if none exists.
self.parent = parent
#: the :class:`Command` for this context.
self.command = command
#: the descriptive information name
self.info_name = info_name
#: the parsed parameters except if the value is hidden in which
#: case it's not remembered.
self.params = {}
#: the leftover arguments.
self.args = []
#: protected arguments. These are arguments that are prepended
#: to `args` when certain parsing scenarios are encountered but
#: must be never propagated to another arguments. This is used
#: to implement nested parsing.
self.protected_args = []
if obj is None and parent is not None:
obj = parent.obj
#: the user object stored.
self.obj = obj
self._meta = getattr(parent, "meta", {})
#: A dictionary (-like object) with defaults for parameters.
if (
default_map is None
and parent is not None
and parent.default_map is not None
):
default_map = parent.default_map.get(info_name)
self.default_map = default_map
#: This flag indicates if a subcommand is going to be executed. A
#: group callback can use this information to figure out if it's
#: being executed directly or because the execution flow passes
#: onwards to a subcommand. By default it's None, but it can be
#: the name of the subcommand to execute.
#:
#: If chaining is enabled this will be set to ``'*'`` in case
#: any commands are executed. It is however not possible to
#: figure out which ones. If you require this knowledge you
#: should use a :func:`resultcallback`.
self.invoked_subcommand = None
if terminal_width is None and parent is not None:
terminal_width = parent.terminal_width
#: The width of the terminal (None is autodetection).
self.terminal_width = terminal_width
if max_content_width is None and parent is not None:
max_content_width = parent.max_content_width
#: The maximum width of formatted content (None implies a sensible
#: default which is 80 for most things).
self.max_content_width = max_content_width
if allow_extra_args is None:
allow_extra_args = command.allow_extra_args
#: Indicates if the context allows extra args or if it should
#: fail on parsing.
#:
#: .. versionadded:: 3.0
self.allow_extra_args = allow_extra_args
if allow_interspersed_args is None:
allow_interspersed_args = command.allow_interspersed_args
#: Indicates if the context allows mixing of arguments and
#: options or not.
#:
#: .. versionadded:: 3.0
self.allow_interspersed_args = allow_interspersed_args
if ignore_unknown_options is None:
ignore_unknown_options = command.ignore_unknown_options
#: Instructs click to ignore options that a command does not
#: understand and will store it on the context for later
#: processing. This is primarily useful for situations where you
#: want to call into external programs. Generally this pattern is
#: strongly discouraged because it's not possibly to losslessly
#: forward all arguments.
#:
#: .. versionadded:: 4.0
self.ignore_unknown_options = ignore_unknown_options
if help_option_names is None:
if parent is not None:
help_option_names = parent.help_option_names
else:
help_option_names = ["--help"]
#: The names for the help options.
self.help_option_names = help_option_names
if token_normalize_func is None and parent is not None:
token_normalize_func = parent.token_normalize_func
#: An optional normalization function for tokens. This is
#: options, choices, commands etc.
self.token_normalize_func = token_normalize_func
#: Indicates if resilient parsing is enabled. In that case Click
#: will do its best to not cause any failures and default values
#: will be ignored. Useful for completion.
self.resilient_parsing = resilient_parsing
# If there is no envvar prefix yet, but the parent has one and
# the command on this level has a name, we can expand the envvar
# prefix automatically.
if auto_envvar_prefix is None:
if (
parent is not None
and parent.auto_envvar_prefix is not None
and self.info_name is not None
):
auto_envvar_prefix = "{}_{}".format(
parent.auto_envvar_prefix, self.info_name.upper()
)
else:
auto_envvar_prefix = auto_envvar_prefix.upper()
if auto_envvar_prefix is not None:
auto_envvar_prefix = auto_envvar_prefix.replace("-", "_")
self.auto_envvar_prefix = auto_envvar_prefix
if color is None and parent is not None:
color = parent.color
#: Controls if styling output is wanted or not.
self.color = color
self.show_default = show_default
self._close_callbacks = []
self._depth = 0
def __enter__(self):
self._depth += 1
push_context(self)
return self
def __exit__(self, exc_type, exc_value, tb):
self._depth -= 1
if self._depth == 0:
self.close()
pop_context()
@contextmanager
def scope(self, cleanup=True):
"""This helper method can be used with the context object to promote
it to the current thread local (see :func:`get_current_context`).
The default behavior of this is to invoke the cleanup functions which
can be disabled by setting `cleanup` to `False`. The cleanup
functions are typically used for things such as closing file handles.
If the cleanup is intended the context object can also be directly
used as a context manager.
Example usage::
with ctx.scope():
assert get_current_context() is ctx
This is equivalent::
with ctx:
assert get_current_context() is ctx
.. versionadded:: 5.0
:param cleanup: controls if the cleanup functions should be run or
not. The default is to run these functions. In
some situations the context only wants to be
temporarily pushed in which case this can be disabled.
Nested pushes automatically defer the cleanup.
"""
if not cleanup:
self._depth += 1
try:
with self as rv:
yield rv
finally:
if not cleanup:
self._depth -= 1
@property
def meta(self):
"""This is a dictionary which is shared with all the contexts
that are nested. It exists so that click utilities can store some
state here if they need to. It is however the responsibility of
that code to manage this dictionary well.
The keys are supposed to be unique dotted strings. For instance
module paths are a good choice for it. What is stored in there is
irrelevant for the operation of click. However what is important is
that code that places data here adheres to the general semantics of
the system.
Example usage::
LANG_KEY = f'{__name__}.lang'
def set_language(value):
ctx = get_current_context()
ctx.meta[LANG_KEY] = value
def get_language():
return get_current_context().meta.get(LANG_KEY, 'en_US')
.. versionadded:: 5.0
"""
return self._meta
def make_formatter(self):
"""Creates the formatter for the help and usage output."""
return HelpFormatter(
width=self.terminal_width, max_width=self.max_content_width
)
def call_on_close(self, f):
"""This decorator remembers a function as callback that should be
executed when the context tears down. This is most useful to bind
resource handling to the script execution. For instance, file objects
opened by the :class:`File` type will register their close callbacks
here.
:param f: the function to execute on teardown.
"""
self._close_callbacks.append(f)
return f
def close(self):
"""Invokes all close callbacks."""
for cb in self._close_callbacks:
cb()
self._close_callbacks = []
@property
def command_path(self):
"""The computed command path. This is used for the ``usage``
information on the help page. It's automatically created by
combining the info names of the chain of contexts to the root.
"""
rv = ""
if self.info_name is not None:
rv = self.info_name
if self.parent is not None:
rv = "{} {}".format(self.parent.command_path, rv)
return rv.lstrip()
def find_root(self):
"""Finds the outermost context."""
node = self
while node.parent is not None:
node = node.parent
return node
def find_object(self, object_type):
"""Finds the closest object of a given type."""
node = self
while node is not None:
if isinstance(node.obj, object_type):
return node.obj
node = node.parent
def ensure_object(self, object_type):
"""Like :meth:`find_object` but sets the innermost object to a
new instance of `object_type` if it does not exist.
"""
rv = self.find_object(object_type)
if rv is None:
self.obj = rv = object_type()
return rv
def lookup_default(self, name):
"""Looks up the default for a parameter name. This by default
looks into the :attr:`default_map` if available.
"""
if self.default_map is not None:
rv = self.default_map.get(name)
if callable(rv):
rv = rv()
return rv
def fail(self, message):
"""Aborts the execution of the program with a specific error
message.
:param message: the error message to fail with.
"""
raise UsageError(message, self)
def abort(self):
"""Aborts the script."""
raise Abort()
def exit(self, code=0):
"""Exits the application with a given exit code."""
raise Exit(code)
def get_usage(self):
"""Helper method to get formatted usage string for the current
context and command.
"""
return self.command.get_usage(self)
def get_help(self):
"""Helper method to get formatted help page for the current
context and command.
"""
return self.command.get_help(self)
def invoke(*args, **kwargs): # noqa: B902
"""Invokes a command callback in exactly the way it expects. There
are two ways to invoke this method:
1. the first argument can be a callback and all other arguments and
keyword arguments are forwarded directly to the function.
2. the first argument is a click command object. In that case all
arguments are forwarded as well but proper click parameters
(options and click arguments) must be keyword arguments and Click
will fill in defaults.
Note that before Click 3.2 keyword arguments were not properly filled
in against the intention of this code and no context was created. For
more information about this change and why it was done in a bugfix
release see :ref:`upgrade-to-3.2`.
"""
self, callback = args[:2]
ctx = self
# It's also possible to invoke another command which might or
# might not have a callback. In that case we also fill
# in defaults and make a new context for this command.
if isinstance(callback, Command):
other_cmd = callback
callback = other_cmd.callback
ctx = Context(other_cmd, info_name=other_cmd.name, parent=self)
if callback is None:
raise TypeError(
"The given command does not have a callback that can be invoked."
)
for param in other_cmd.params:
if param.name not in kwargs and param.expose_value:
kwargs[param.name] = param.get_default(ctx)
args = args[2:]
with augment_usage_errors(self):
with ctx:
return callback(*args, **kwargs)
def forward(*args, **kwargs): # noqa: B902
"""Similar to :meth:`invoke` but fills in default keyword
arguments from the current context if the other command expects
it. This cannot invoke callbacks directly, only other commands.
"""
self, cmd = args[:2]
# It's also possible to invoke another command which might or
# might not have a callback.
if not isinstance(cmd, Command):
raise TypeError("Callback is not a command.")
for param in self.params:
if param not in kwargs:
kwargs[param] = self.params[param]
return self.invoke(cmd, **kwargs)
| Context |
python | kamyu104__LeetCode-Solutions | Python/maximal-square.py | {
"start": 2114,
"end": 3811
} | class ____(object):
# @param {character[][]} matrix
# @return {integer}
def maximalSquare(self, matrix):
if not matrix:
return 0
H, W = 0, 1
# DP table stores (h, w) for each (i, j).
table = [[[0, 0] for j in xrange(len(matrix[0]))] \
for i in xrange(len(matrix))]
for i in reversed(xrange(len(matrix))):
for j in reversed(xrange(len(matrix[i]))):
# Find the largest h such that (i, j) to (i + h - 1, j) are feasible.
# Find the largest w such that (i, j) to (i, j + w - 1) are feasible.
if matrix[i][j] == '1':
h, w = 1, 1
if i + 1 < len(matrix):
h = table[i + 1][j][H] + 1
if j + 1 < len(matrix[i]):
w = table[i][j + 1][W] + 1
table[i][j] = [h, w]
# A table stores the length of largest square for each (i, j).
s = [[0 for j in xrange(len(matrix[0]))] \
for i in xrange(len(matrix))]
max_square_area = 0
for i in reversed(xrange(len(matrix))):
for j in reversed(xrange(len(matrix[i]))):
side = min(table[i][j][H], table[i][j][W])
if matrix[i][j] == '1':
# Get the length of largest square with bottom-left corner (i, j).
if i + 1 < len(matrix) and j + 1 < len(matrix[i + 1]):
side = min(s[i + 1][j + 1] + 1, side)
s[i][j] = side
max_square_area = max(max_square_area, side * side)
return max_square_area
| Solution3 |
python | ray-project__ray | python/ray/dashboard/modules/node/datacenter.py | {
"start": 370,
"end": 1012
} | class ____:
# {node id hex(str): node stats(dict of GetNodeStatsReply
# in node_manager.proto)}
node_stats = {}
# {node id hex(str): node physical stats(dict from reporter_agent.py)}
node_physical_stats = {}
# {actor id hex(str): actor table data(dict of ActorTableData
# in gcs.proto)}
actors = {}
# {node id hex(str): gcs node info(dict of GcsNodeInfo in gcs.proto)}
nodes = {}
# {node id hex(str): worker list}
node_workers = {}
# {node id hex(str): {actor id hex(str): actor table data}}
node_actors = {}
# {worker id(str): core worker stats}
core_worker_stats = {}
| DataSource |
python | walkccc__LeetCode | solutions/2389. Longest Subsequence With Limited Sum/2389.py | {
"start": 0,
"end": 363
} | class ____:
def answerQueries(self, nums: list[int], queries: list[int]) -> list[int]:
nums.sort()
def numOfElementsLessThan(query: int) -> int:
summ = 0
for i, num in enumerate(nums):
summ += num
if summ > query:
return i
return len(nums)
return [numOfElementsLessThan(query) for query in queries]
| Solution |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 54102,
"end": 54533
} | class ____(Elemwise):
_projection_passthrough = True
_filter_passthrough = True
_parameters = ["frame", "mapper", "index", "columns", "axis"]
_defaults = {
"mapper": no_default,
"index": no_default,
"columns": no_default,
"axis": 0,
}
_keyword_only = ["mapper", "index", "columns", "axis"]
operation = M.rename_axis
_preserves_partitioning_information = True
| RenameAxis |
python | pytorch__pytorch | torch/distributed/checkpoint/_experimental/checkpoint_writer.py | {
"start": 1654,
"end": 5270
} | class ____:
"""
Handles writing state dictionaries to storage.
This class is responsible for writing model state dictionaries to storage according
to the specified checkpoint layout. It supports synchronization barriers to ensure
all ranks in a distributed setting complete their checkpoint operations.
"""
def __init__(
self,
config: CheckpointWriterConfig,
rank_info: RankInfo,
barrier: Optional[Barrier] = None,
commit_hook: Optional[WriterHook] = None,
):
"""
Initialize a CheckpointWriter.
Args:
config: Configuration options for the checkpoint writer.
rank_info: Information about the current rank in a distributed setting.
barrier: Optional synchronization barrier for distributed checkpointing.
Note: The barrier should be initialized with the appropriate barrier_prefix
and timeout_secs parameters.
commit_hook: Optional hook for custom actions before and after checkpoint commits.
"""
self._config = config
self._rank_info = rank_info
self._commit_hook = commit_hook
self._barrier = barrier
def write(
self,
path: str,
state_dict: STATE_DICT,
**kwargs: dict[str, Any],
) -> Optional[Future[None]]:
"""
Writes the state_dict to storage.
Args:
path (str): The path to write the checkpoint to.
state_dict (STATE_DICT): The state_dict to write.
**kwargs: Additional keyword arguments passed to hooks.
Returns:
Optional[Future[None]]: A future for tracking the write operation, if applicable.
"""
logger.debug(
"Writing checkpoint to %s for rank %s",
path,
self._rank_info.global_rank,
)
dir_path = Path(path)
full_path = dir_path / f"checkpoint_{self._rank_info.global_rank}.pt"
os.makedirs(
os.path.dirname(full_path),
exist_ok=True,
)
torch.save(state_dict, full_path)
logger.debug("Successfully saved checkpoint file to %s", full_path)
# Execute pre-commit hook if available
commit_hook = self._commit_hook
if commit_hook is not None:
logger.debug("Executing pre-commit hook for %s", path)
commit_hook.pre_commit(path, **kwargs)
# Wait for all ranks to finish writing if barrier is available
barrier = self._barrier
if barrier is not None:
logger.info(
"Waiting for all ranks at barrier with timeout %ss",
self._config.write_barrier_timeout_secs,
)
barrier.execute_barrier()
logger.info("All ranks passed barrier")
else:
logger.info("No barrier configured, skipping synchronization")
# Execute commit hook if available
if commit_hook is not None:
logger.debug("Executing commit hook for %s", path)
commit_hook.post_commit(path, **kwargs)
logger.info(
"Successfully wrote checkpoint to %s for rank %s",
path,
self._rank_info.global_rank,
)
return None
def close(self) -> None:
"""
Close the writer and release any resources.
This is a no-op for the base CheckpointWriter but may be overridden
by subclasses that need to perform cleanup.
"""
logger.debug("Closing checkpoint writer")
| CheckpointWriter |
python | pytorch__pytorch | torch/distributed/fsdp/_fully_shard/_fsdp_param_group.py | {
"start": 4844,
"end": 39082
} | class ____:
"""This class represents a parameter group to communicate together."""
_orig_dtype: Optional[torch.dtype]
_reduce_dtype: Optional[torch.dtype]
def __init__(
self,
params: list[nn.Parameter],
modules: tuple[nn.Module, ...],
mesh_info: FSDPMeshInfo,
post_forward_mesh_info: Optional[FSDPMeshInfo],
device: torch.device,
shard_placement_fn: Optional[Callable[[nn.Parameter], Optional[Shard]]],
mp_policy: MixedPrecisionPolicy,
offload_policy: OffloadPolicy,
):
self.modules = modules # permit ref cycle because 1:1 lifetime
param_module_infos = _get_param_module_infos(params, modules)
self.fsdp_params = [
FSDPParam(
param,
module_info,
mesh_info,
post_forward_mesh_info,
device,
shard_placement_fn,
mp_policy,
offload_policy,
)
for param, module_info in zip(params, param_module_infos)
]
self.mesh_info = mesh_info
self.post_forward_mesh_info = post_forward_mesh_info
# pyrefly: ignore [read-only]
self.device = device
self.device_handle = _get_device_handle(device.type)
self.mp_policy = mp_policy
self.offload_policy = offload_policy
self._training_state = TrainingState.IDLE
# Group's sharded state always matches its parameters' sharded states
self._sharded_state = ShardedState.SHARDED
self._module_fqn: Optional[str] = None # prefixed from root module
# Only consider resetting sharded parameters once in lazy init since it
# can incur nontrivial overhead to reset them
self._reset_sharded_params: bool = False
# - Hook state
self._module_to_pre_save_state_dict_hook_handle: _ModuleToHandleDict = {}
self._module_to_pre_load_state_dict_hook_handle: _ModuleToHandleDict = {}
self._all_reduce_hook: Optional[Callable[[torch.Tensor], None]] = None
self._all_gather_comm: AllGather = DefaultAllGather()
self._all_gather_output = torch.empty(0, device=self.device)
self._reduce_scatter_comm: ReduceScatter = DefaultReduceScatter()
# Optional stream to run the user-defined all-reduce hook in
# Saved here and not in the comm. context because we allow the user to
# specify it, possibly at construction time before lazy init
self._all_reduce_hook_stream: Optional[torch.cuda.Stream] = None
# - Communication and communication/computation overlap
self.comm_ctx = FSDPCommContext()
# Group's indices in the shared post-forward order
self._post_forward_indices: list[int] = []
# Whether to reduce gradients at all (whether for FSDP or HSDP)
self.reduce_grads: bool = True
# Whether to all-reduce gradients for HSDP; only used if
# `self.reduce_grads` is true, in which case setting this to false
# means reduce-scatter but no all-reduce
self.all_reduce_grads: bool = True
# Whether to reshard parameters after backward (only useful for
# gradient accumulation)
self.reshard_after_backward: bool = True
# Optional custom factor for the gradient reduction op (e.g. to divide
# by a factor other than the world size)
self.gradient_divide_factor: Optional[float] = None
# Whether reduce-scatter and all-reduce should be issued using only
# summations, potentially with separate pre-/post-scaling.
self.force_sum_reduction_for_comms: bool = False
# `async_op` arg used for pre-forward/pre-backward unshard; can be
# overridden to only do explicit prefetching and avoid inter-stream
# fragmentation from using separate unshard streams
self.unshard_async_op: bool = False
# Whether to unshard in backward: can be overridden by the user if the
# parameters in this group are not needed for backward (e.g. embedding)
self.unshard_in_backward: bool = True
# - CUDA events for stream synchronization
# Holds the all-gather output buffer, sync objects, and metadata
self._all_gather_result: Optional[AllGatherResult] = None
# Holds the reduce-scatter/all-reduce view-out CUDA event that marks the end of
# the group's post-backward (e.g. reduce-scatter, all-reduce and div), which
# should be waited on at the end of backward
self._post_reduce_event: Optional[torch.Event] = None
# Holds the reshard-after-forward CUDA event when resharding to a
# different world size, which should be waited on in the next unshard
self._reshard_after_forward_event: Optional[torch.Event] = None
# Only for HSDP, if accumulating gradients without all-reduce, save the
# partial reduce output (only reduce-scattered but not all-reduced)
self._partial_reduce_output: Optional[torch.Tensor] = None
# Holds the all-reduce input and all-reduce event to keep it alive
# until the end of backward (critical when doing bf16 reduction with
# fp32 parameters since the all-reduce input is allocated in the RS
# stream and will have no refs to it after being upcast to fp32)
self._all_reduce_state: Optional[AllReduceState] = None
# Initialization #
def _init_mp_dtypes(self) -> None:
for fsdp_param in self.fsdp_params:
fsdp_param.init_dtype_attrs(self.mp_policy)
trainable_params: list[FSDPParam] = [
p for p in self.fsdp_params if p.sharded_param.requires_grad
]
orig_dtypes = {p.orig_dtype for p in trainable_params}
reduce_dtypes = {p.reduce_dtype for p in trainable_params}
if len(trainable_params) > 0 and len(orig_dtypes) != 1:
# Models may have no grad params
raise AssertionError(
f"FSDP expects uniform original parameter dtype but got {orig_dtypes}"
)
self._orig_dtype = next(iter(orig_dtypes)) if trainable_params else None
if len(trainable_params) > 0 and len(reduce_dtypes) != 1:
# This can be relaxed if we issue one reduce-scatter per reduce
# dtype (but we would need a way for users to specify multiple
# reduce dtypes)
raise AssertionError(
f"FSDP expects uniform reduce dtype but got {reduce_dtypes}"
)
self._reduce_dtype = next(iter(reduce_dtypes)) if trainable_params else None
def lazy_init(self):
# Lazy init should be idempotent
# Users may change or register parameters after construction time.
# For example, DoRA (https://arxiv.org/abs/2402.09353) initializes linear magnitudes based on
# other parameters (e.g. loaded from the state dict).
if not hasattr(self.comm_ctx, "device_handle"):
self.comm_ctx.device_handle = _get_device_handle(self.device.type)
if self.is_sharded and not self._reset_sharded_params:
for fsdp_param in self.fsdp_params:
fsdp_param.reset_sharded_param()
fsdp_param._init_extensions() # allow monkey patch after init
self._reset_sharded_params = True
self._validate_no_meta_params()
self._validate_cpu_offload_params()
# Initialize mixed precision attributes lazily in case the user changes
# the parameter dtypes after construction time but before forward
self._init_mp_dtypes()
self._register_state_dict_hooks()
def set_allocate_memory_from_process_group(self, enable: bool) -> None:
"""
Whether to (try to) use the ProcessGroup's allocate_tensor method for
the staging buffers for collective comms.
"""
if not isinstance(
self._all_gather_comm, (DefaultAllGather | ProcessGroupAllocAllGather)
):
raise AssertionError(
"cannot call set_allocate_memory_from_process_group() "
f"when all gather comm is custom: {self._all_gather_comm.__class__.__name__}"
)
self._all_gather_comm = (
ProcessGroupAllocAllGather(self._all_gather_process_group)
if enable
else DefaultAllGather()
)
if not isinstance(
self._reduce_scatter_comm,
(DefaultReduceScatter | ProcessGroupAllocReduceScatter),
):
raise AssertionError(
"cannot call set_allocate_memory_from_process_group() "
f"when reduce scatter comm is custom: {self._reduce_scatter_comm.__class__.__name__}"
)
self._reduce_scatter_comm = (
ProcessGroupAllocReduceScatter(self._reduce_scatter_process_group)
if enable
else DefaultReduceScatter()
)
# Runtime #
def unshard(self, async_op: bool = False):
if self._all_gather_result is not None: # already called, pending wait
return
if self.is_unsharded:
return # no-op
if (
not self.unshard_in_backward
and self._training_state == TrainingState.PRE_BACKWARD
):
return
if self._reshard_after_forward_event is not None:
# Resharded parameter data is allocated in the default stream and
# used in the all-gather streams
self._wait_all_gather_streams_on_event(self._reshard_after_forward_event)
self._reshard_after_forward_event = None
if isinstance(self.mesh_info, FSDPMeshInfo):
world_size = self._all_gather_process_group.size()
else:
world_size = 1
if world_size == 1:
# can't skip due to early return in wait_for_unshard if
# no self._all_gather_result
self._all_gather_result = AllGatherResult(
all_gather_output=self._all_gather_output,
all_gather_event=self.device_handle.Event().record(),
all_gather_work=None,
param_all_gather_input_dtypes=[],
param_all_gather_input_numels=[],
all_gather_input_split_sizes=[],
)
return
with record_function(self._with_fqn("FSDP::all_gather")):
self._all_gather_result = foreach_all_gather(
self.fsdp_params,
self._all_gather_process_group,
async_op,
*self.comm_ctx.get_all_gather_streams(async_op, self._training_state),
self.device,
self._all_gather_comm,
)
def wait_for_unshard(self):
"""
1. In forward with implicit prefetching, to overlap the current copy-out
with the next all-gather, we save a reference to the current all-gather
result to free after the next copy-out.
2. Otherwise (explicit prefetching or in backward), we free the
all-gather result immediately after the current copy-out since we can
already overlap the current copy-out with the previous reduce-scatter.
"""
if not self._all_gather_result:
return # no preceding unshard
async_op = self._all_gather_result.all_gather_work is not None
if self._training_state == TrainingState.FORWARD: # implicit prefetch
if prev_all_gather_state := self.comm_ctx.all_gather_state:
self._wait_all_gather_streams_on_event(prev_all_gather_state.event)
self.comm_ctx.all_gather_state = None # free the all-gather result
if isinstance(self.mesh_info, FSDPMeshInfo):
world_size = self._all_gather_process_group.size()
else:
world_size = 1
if world_size == 1:
# directly initialize unsharded parameters from sharded parameters
for fsdp_param in self.fsdp_params:
# Use all_gather_inputs which already handles conversion to param_dtype
# This is consistent with the world_size > 1 path
all_gather_input = fsdp_param.all_gather_inputs[0]
# Make sure the all_gather_outputs has proper storage size before using it
# First ensure we have at least one tensor in all_gather_outputs
fsdp_param.init_all_gather_outputs(
[all_gather_input.numel()],
[all_gather_input.dtype],
world_size,
self.device,
force_recreate=False,
)
tensor = fsdp_param.all_gather_outputs[0]
alloc_storage(tensor)
# find alternative way to check if tensor.is_inference
with torch.autograd._unsafe_preserve_version_counter(tensor):
tensor.copy_(all_gather_input)
else:
with record_function(self._with_fqn("FSDP::all_gather_copy_out")):
foreach_all_gather_copy_out(
self._all_gather_result,
self.fsdp_params,
self._all_gather_process_group,
)
for fsdp_param in self.fsdp_params:
fsdp_param.init_unsharded_param()
self._to_unsharded()
all_gather_copy_out_event = self.device_handle.Event()
all_gather_copy_out_event.record()
if (
not async_op
and self._training_state == TrainingState.FORWARD
and world_size > 1
):
# Defer free to allow for overlap of this copy-out with next
# all-gather collective
self.comm_ctx.all_gather_state = AllGatherState(
self._all_gather_result, all_gather_copy_out_event
)
else:
self._wait_all_gather_streams_on_event(all_gather_copy_out_event)
self._all_gather_result = None # free unless saved in `all_gather_state`
def _wait_all_gather_streams_on_event(self, event: Optional[torch.Event]):
# Calling `unshard` before lazy init means streams are not initialized
if hasattr(self.comm_ctx, "all_gather_copy_in_stream") and event is not None:
self.comm_ctx.all_gather_copy_in_stream.wait_event(event)
if hasattr(self.comm_ctx, "all_gather_stream") and event is not None:
self.comm_ctx.all_gather_stream.wait_event(event)
def reshard(self):
if self._training_state == TrainingState.FORWARD:
if not self._reshard_after_forward:
return
if self._use_post_forward_mesh:
self._to_sharded_post_forward()
self._reshard_after_forward_event = self.device_handle.Event()
if self._reshard_after_forward_event is not None:
self._reshard_after_forward_event.record()
return
self._to_sharded()
def pre_forward(
self, module: nn.Module, args: tuple[Any, ...], kwargs: dict[str, Any]
) -> tuple[tuple[Any, ...], dict[str, Any]]:
if not compiled_autograd_enabled():
logger.debug("%s", self._with_fqn("FSDP::pre_forward"))
with record_function(self._with_fqn("FSDP::pre_forward")):
self._training_state = TrainingState.FORWARD
self.unshard(self.unshard_async_op)
self.wait_for_unshard()
args, kwargs = self._register_post_backward_hook(args, kwargs)
return args, kwargs
def post_forward(self, module: nn.Module, input: Any, output: Any):
if not compiled_autograd_enabled():
logger.debug("%s", self._with_fqn("FSDP::post_forward"))
with record_function(self._with_fqn("FSDP::post_forward")):
if not compiled_autograd_enabled():
# for AC(fully_shard(model)), AC runs fsdp's _pre_forward
# it shouldn't change post_forward_order
if not is_bw():
self.reshard()
self._record_post_forward()
else:
self.reshard()
self._record_post_forward()
self._training_state = TrainingState.IDLE
return output
def _record_post_forward(self) -> None:
# Since a group has one pre-backward unshard for each forward call
# before the backward, we record each usage (with multiplicity)
post_forward_index = len(self.comm_ctx.post_forward_order)
self.comm_ctx.post_forward_order.append(self)
self._post_forward_indices.append(post_forward_index)
def pre_backward(self, default_prefetch: bool, *unused: Any):
if (
compiled_autograd_enabled()
and self._training_state == TrainingState.PRE_BACKWARD
):
# Traceable FSDP2 cannot trigger the param group's `post_backward` immediately after param usage;
# instead it relies on this to trigger the previously unexecuted `post_backward`.
self.post_backward()
if self._training_state == TrainingState.PRE_BACKWARD:
return
if not compiled_autograd_enabled():
logger.debug("%s", self._with_fqn("FSDP::pre_backward"))
with record_function(self._with_fqn("FSDP::pre_backward")):
self._training_state = TrainingState.PRE_BACKWARD
self.unshard(self.unshard_async_op) # no-op if prefetched
self.wait_for_unshard()
if default_prefetch and not compiled_autograd_enabled():
self._backward_prefetch()
def post_backward(self, *unused: Any):
# This method should be idempotent and safe to call even when this
# FSDP parameter group was not used in backward (should be a no-op)
if not compiled_autograd_enabled():
logger.debug("%s", self._with_fqn("FSDP::post_backward"))
self._training_state = TrainingState.POST_BACKWARD
with record_function(self._with_fqn("FSDP::post_backward_accumulate")):
for fsdp_param in self.fsdp_params:
fsdp_param.accumulate_unsharded_grad_if_needed()
with record_function(self._with_fqn("FSDP::post_backward_reshard")):
if not self.reduce_grads:
if self.reshard_after_backward:
self.reshard()
for fsdp_param in self.fsdp_params:
fsdp_param.to_accumulated_grad_if_needed()
return
# Save the autograd-computed gradients before resharding to only
# access the unsharded parameters when their data is present
fsdp_params_with_grad: list[FSDPParam] = []
unsharded_grads: list[torch.Tensor] = []
for fsdp_param in self.fsdp_params:
if not hasattr(fsdp_param, "_unsharded_param"):
continue
# May have an accumulated gradient of the reduce dtype if the
# previous backward did not reduce-scatter
if fsdp_param.unsharded_accumulated_grad is not None:
fsdp_params_with_grad.append(fsdp_param)
unsharded_grads.append(fsdp_param.unsharded_accumulated_grad_data)
fsdp_param.unsharded_accumulated_grad = None
elif fsdp_param.unsharded_param.grad is not None:
fsdp_params_with_grad.append(fsdp_param)
unsharded_grads.append(fsdp_param.unsharded_grad_data)
fsdp_param.unsharded_param.grad = None
if self.reshard_after_backward:
self.reshard()
if len(fsdp_params_with_grad) == 0:
return
with record_function(self._with_fqn("FSDP::post_backward_reduce")):
if (
self.comm_ctx.reduce_scatter_state is not None
and self.comm_ctx.reduce_scatter_state.event is not None
):
self.device_handle.current_stream().wait_event(
self.comm_ctx.reduce_scatter_state.event
)
self.comm_ctx.reduce_scatter_state = None
all_reduce_pg = (
self._all_reduce_process_group
if isinstance(self.mesh_info, DDPMeshInfo)
else None
)
all_reduce_stream: torch.cuda.Stream
if all_reduce_pg is None and self._all_reduce_hook_stream is not None:
# this means the native HSDP is not enabled,
# but user may want to have a custom HSDP setup
if self._all_reduce_hook is None:
raise AssertionError(
"all reduce hook stream is specified but hook itself is missing."
)
all_reduce_stream = self._all_reduce_hook_stream
else:
all_reduce_stream = self.comm_ctx.all_reduce_stream
self._wait_for_post_backward()
(
reduce_scatter_input,
reduce_scatter_event,
self._post_reduce_event,
all_reduce_input,
all_reduce_event,
self._partial_reduce_output,
) = foreach_reduce(
fsdp_params_with_grad,
unsharded_grads,
(
self._reduce_scatter_process_group
if isinstance(self.mesh_info, FSDPMeshInfo)
else None # pyre-fixme[6]
),
self.comm_ctx.reduce_scatter_stream,
self._reduce_scatter_comm,
self._orig_dtype,
self._reduce_dtype,
self.device,
self.gradient_divide_factor,
(
self._all_reduce_process_group
if isinstance(self.mesh_info, DDPMeshInfo)
else None
),
all_reduce_stream,
self.all_reduce_grads,
self._partial_reduce_output,
self._all_reduce_hook,
self.force_sum_reduction_for_comms,
)
self.comm_ctx.reduce_scatter_state = ReduceScatterState(
reduce_scatter_input, reduce_scatter_event
)
if all_reduce_input is not None:
if self.device.type != "cpu":
if all_reduce_event is None:
raise AssertionError(
"Expected all_reduce_event to be set for non-CPU device"
)
self._all_reduce_state = AllReduceState(
all_reduce_input, all_reduce_event
)
def finalize_backward(self):
self._wait_for_post_backward()
for fsdp_param in self.fsdp_params:
if fsdp_param.grad_offload_event is not None:
fsdp_param.grad_offload_event.synchronize()
fsdp_param.grad_offload_event = None
if self._all_gather_result is not None:
# If there was a mistargeted unshard without a corresponding wait,
# then we wait here and clear the unshard
if (event := self._all_gather_result.all_gather_event) is not None:
torch.accelerator.current_stream().wait_event(event)
work = self._all_gather_result.all_gather_work
if isinstance(work, dist.distributed_c10d.Work):
work.wait()
self._all_gather_result = None
self._post_forward_indices.clear()
def _wait_for_post_backward(self):
if self._post_reduce_event is not None:
self.device_handle.current_stream().wait_event(self._post_reduce_event)
self._post_reduce_event = None
if (
self._all_reduce_state is not None
and self._all_reduce_state.event is not None
):
self.device_handle.current_stream().wait_event(self._all_reduce_state.event)
self._all_reduce_state = None
def _backward_prefetch(self) -> None:
if self._training_state == TrainingState.PRE_BACKWARD:
if not self._post_forward_indices:
# Can be cleared if running multiple `backward`s
return
curr_index = self._post_forward_indices.pop()
if (target_index := curr_index - 1) < 0:
return
# Prefetch naively using the reverse post-forward order, which may
# have mistargeted prefetches if not all modules used in forward
# are used in this backward
# pyrefly: ignore [unbound-name]
target_fsdp_param_group = self.comm_ctx.post_forward_order[target_index]
self._prefetch_unshard(target_fsdp_param_group, "backward")
@staticmethod
def _prefetch_unshard(
target_fsdp_param_group: "FSDPParamGroup", pass_type: str
) -> None:
if pass_type == "backward":
training_state = TrainingState.PRE_BACKWARD
elif pass_type == "forward":
training_state = TrainingState.FORWARD
else:
raise ValueError(f"Unknown pass type: {pass_type}")
target_fqn = target_fsdp_param_group._module_fqn
with (
record_function(f"FSDP::{pass_type}_prefetch for {target_fqn}"),
target_fsdp_param_group.use_training_state(training_state),
):
async_op = target_fsdp_param_group.unshard_async_op
target_fsdp_param_group.unshard(async_op)
# Utilities #
def _to_sharded(self):
if not self.is_sharded:
for fsdp_param in self.fsdp_params:
fsdp_param.to_sharded()
self._sharded_state = ShardedState.SHARDED
def _to_sharded_post_forward(self):
if not self.is_sharded_post_forward:
for fsdp_param in self.fsdp_params:
fsdp_param.to_sharded_post_forward()
self._sharded_state = ShardedState.SHARDED_POST_FORWARD
def _to_unsharded(self):
if not self.is_unsharded:
for fsdp_param in self.fsdp_params:
fsdp_param.to_unsharded()
self._sharded_state = ShardedState.UNSHARDED
@property
def is_sharded(self) -> bool:
return self._sharded_state == ShardedState.SHARDED
@property
def is_sharded_post_forward(self) -> bool:
return self._sharded_state == ShardedState.SHARDED_POST_FORWARD
@property
def is_unsharded(self) -> bool:
return self._sharded_state == ShardedState.UNSHARDED
@contextlib.contextmanager
def use_training_state(self, training_state: TrainingState):
old_training_state = self._training_state
self._training_state = training_state
try:
yield
finally:
self._training_state = old_training_state
# Hook Registration #
def _register_post_backward_hook(
self, args: tuple[Any, ...], kwargs: dict[str, Any]
) -> tuple[tuple[Any, ...], dict[str, Any]]:
# Traceable FSDP2 relies on `root_post_backward_callback` to call each
# `FSDPParamGroup.post_backward`
if (not torch._dynamo.config.skip_fsdp_hooks) or compiled_autograd_enabled():
return args, kwargs
if not torch.is_grad_enabled():
return args, kwargs
args_list, args_spec = tree_flatten(args)
kwargs_list, kwargs_spec = tree_flatten(kwargs)
args_kwargs_list = list(args_list) + list(kwargs_list)
inp_tensor_indices: list[int] = []
inp_tensors: list[torch.Tensor] = []
for i, obj in enumerate(args_kwargs_list):
if torch.is_tensor(obj) and obj.requires_grad:
inp_tensor_indices.append(i)
inp_tensors.append(obj)
if len(inp_tensors) == 0:
return args, kwargs # no tensors that require gradients
inp_tensors = RegisterPostBackwardFunction.apply(self, *inp_tensors)
for inp_tensor_idx, inp_tensor in zip(inp_tensor_indices, inp_tensors):
args_kwargs_list[inp_tensor_idx] = inp_tensor
args_list = args_kwargs_list[: len(args_list)]
kwargs_list = args_kwargs_list[len(args_list) :]
args = tree_unflatten(args_list, args_spec)
kwargs = tree_unflatten(kwargs_list, kwargs_spec)
return args, kwargs
def _register_state_dict_hooks(self) -> None:
num_pre_save_hooks = len(self._module_to_pre_save_state_dict_hook_handle)
num_pre_load_hooks = len(self._module_to_pre_load_state_dict_hook_handle)
if num_pre_save_hooks != num_pre_load_hooks:
raise AssertionError(
f"Pre-save: {num_pre_save_hooks} pre-load: {num_pre_load_hooks}"
)
if num_pre_save_hooks > 0:
return # already registered
modules_with_fsdp_params: set[nn.Module] = {
fsdp_param._module_info.module for fsdp_param in self.fsdp_params
}
def to_sharded_hook(*args: Any, **kwargs: Any) -> None:
self._to_sharded()
for module in modules_with_fsdp_params:
self._module_to_pre_save_state_dict_hook_handle[module] = (
module.register_state_dict_pre_hook(to_sharded_hook)
)
self._module_to_pre_load_state_dict_hook_handle[module] = (
module._register_load_state_dict_pre_hook(to_sharded_hook)
)
# Properties #
@property
def _reshard_after_forward(self) -> bool:
return self.post_forward_mesh_info is not None
@property
def _use_post_forward_mesh(self) -> bool:
return (
self._reshard_after_forward
and self.mesh_info != self.post_forward_mesh_info
)
@property
def _is_hsdp(self) -> bool:
return isinstance(self.mesh_info, HSDPMeshInfo)
@property
def _all_gather_process_group(self) -> dist.ProcessGroup:
mesh_info = (
cast(FSDPMeshInfo, self.post_forward_mesh_info)
if self.is_sharded_post_forward
else self.mesh_info
)
if not isinstance(mesh_info, FSDPMeshInfo):
raise AssertionError(
f"Expected mesh_info to be FSDPMeshInfo, got {type(mesh_info)}"
)
return mesh_info.shard_process_group
@property
def _reduce_scatter_process_group(self) -> dist.ProcessGroup:
if not isinstance(self.mesh_info, FSDPMeshInfo):
raise AssertionError(
f"Expected mesh_info to be FSDPMeshInfo, got {type(self.mesh_info)}"
)
return self.mesh_info.shard_process_group
@property
def _all_reduce_process_group(self) -> dist.ProcessGroup:
if not isinstance(self.mesh_info, DDPMeshInfo):
raise AssertionError(
f"Expected mesh_info to be DDPMeshInfo or HSDPMeshInfo, got {type(self.mesh_info)}"
)
return self.mesh_info.replicate_process_group
def _with_fqn(self, label: str) -> str:
if self._module_fqn:
return f"{label} ({self._module_fqn})"
return label
def __repr__(self):
return f"FSDPParamGroup(fqn={self._module_fqn})"
def _validate_no_meta_params(self):
param_names_on_meta = [
fsdp_param._param_fqn
for fsdp_param in self.fsdp_params
if fsdp_param.sharded_param.device.type == "meta"
]
if param_names_on_meta:
raise RuntimeError(
"FSDP parameters should be materialized from meta device before training, "
f"but the following were still on meta device: {param_names_on_meta}\n"
"For example, call module.to_empty(device) to materialize to device and "
"call module.reset_parameters() on each module to initialize values."
)
def _validate_cpu_offload_params(self):
if not isinstance(self.offload_policy, CPUOffloadPolicy):
return
fsdp_params_not_on_cpu = [
fsdp_param
for fsdp_param in self.fsdp_params
if fsdp_param.sharded_param.device.type != "cpu"
]
if fsdp_params_not_on_cpu:
raise RuntimeError(
"FSDP parameters should be materialized on CPU when enabling CPU offloading. "
'For example, load a CPU state dict or call module.to_empty(device="cpu"). '
"Found following parameters on non-CPU device: "
f"{[(fsdp_param._param_fqn, fsdp_param.sharded_param.device) for fsdp_param in fsdp_params_not_on_cpu]}\n"
)
def _get_param_module_infos(
params: list[nn.Parameter], modules: tuple[nn.Module, ...]
) -> list[ParamModuleInfo]:
"""
Shared parameter: lin1.weight = lin2.weight
Shared module: mlp.lin1 = mlp.lin2
We do not remove duplicates when traversing both modules and parameters to
find shared modules' parameters and shared parameters within a module.
"""
params_set = set(params)
param_to_module_info: dict[nn.Parameter, ParamModuleInfo] = {}
for module in modules:
for _, submodule in module.named_modules(remove_duplicate=False):
for param_name, param in _named_parameters_with_duplicates(
submodule, recurse=False
):
if param in params_set:
if param not in param_to_module_info:
param_to_module_info[param] = ParamModuleInfo(
submodule, param_name
)
else:
param_to_module_info[param].shared_modules.append(submodule)
param_to_module_info[param].shared_param_names.append(
param_name
)
if len(param_to_module_info) != len(params):
raise AssertionError(f"Some parameters are not in the module tree of {modules}")
return [param_to_module_info[param] for param in params]
| FSDPParamGroup |
python | prakhar1989__Algorithms | tests/gcd_test.py | {
"start": 45,
"end": 516
} | class ____(unittest.TestCase):
def test_gcd(self):
self.assertEqual(fractions.gcd(30,50),GCD.greatest_common_divisor(30,50))
self.assertEqual(fractions.gcd(55555,123450),GCD.greatest_common_divisor(55555,123450))
self.assertEqual(fractions.gcd(-30,-50),GCD.greatest_common_divisor(-30,-50))
self.assertEqual(fractions.gcd(-1234,1234),GCD.greatest_common_divisor(-1234,1234))
if __name__ == "__main__":
unittest.main()
| TestEuclideanGCD |
python | getsentry__sentry | src/sentry/relay/config/__init__.py | {
"start": 8609,
"end": 8802
} | class ____(TypedDict):
id: str
passive: NotRequired[bool]
window: SlidingWindow
limit: int
scope: Literal["organization", "project"]
namespace: str | None
| CardinalityLimit |
python | dask__dask | dask/dataframe/shuffle.py | {
"start": 298,
"end": 7795
} | class ____:
"""
If serialized, will return non-buffered partd. Otherwise returns a buffered partd
"""
def __init__(self, encode_cls=None, buffer=True, tempdir=None):
self.tempdir = tempdir or config.get("temporary_directory", None)
self.buffer = buffer
self.compression = config.get("dataframe.shuffle.compression", None)
self.encode_cls = encode_cls
if encode_cls is None:
import partd
self.encode_cls = partd.PandasBlocks
def __reduce__(self):
if self.tempdir:
return (maybe_buffered_partd, (self.encode_cls, False, self.tempdir))
else:
return (maybe_buffered_partd, (self.encode_cls, False))
def __call__(self, *args, **kwargs):
import partd
path = tempfile.mkdtemp(suffix=".partd", dir=self.tempdir)
try:
partd_compression = (
getattr(partd.compressed, self.compression)
if self.compression
else None
)
except AttributeError as e:
raise ImportError(
f"Not able to import and load {self.compression} as compression algorithm."
"Please check if the library is installed and supported by Partd."
) from e
file = partd.File(path)
partd.file.cleanup_files.append(path)
# Envelope partd file with compression, if set and available
if partd_compression:
file = partd_compression(file)
if self.buffer:
return self.encode_cls(partd.Buffer(partd.Dict(), file))
else:
return self.encode_cls(file)
########################################################
# Various convenience functions to be run by the above #
########################################################
def partitioning_index(df, npartitions, cast_dtype=None):
"""
Computes a deterministic index mapping each record to a partition.
Identical rows are mapped to the same partition.
Parameters
----------
df : DataFrame/Series/Index
npartitions : int
The number of partitions to group into.
cast_dtype : dtype, optional
The dtype to cast to to avoid nullability issues
Returns
-------
partitions : ndarray
An array of int64 values mapping each record to a partition.
"""
if cast_dtype is not None:
# Fixme: astype raises with strings in numeric columns, but raising
# here might be very noisy
df = df.astype(cast_dtype, errors="ignore")
res = hash_object_dispatch(df, index=False) % int(npartitions)
# Note: Use a signed integer since pandas is more efficient at handling
# this since there is not always a fastpath for uints
return res.astype(np.min_scalar_type(-(npartitions - 1)))
def barrier(args):
list(args)
return 0
def collect(p, part, meta, barrier_token):
"""Collect partitions from partd, yield dataframes"""
with ensure_cleanup_on_exception(p):
res = p.get(part)
return res if len(res) > 0 else meta
def set_partitions_pre(s, divisions, ascending=True, na_position="last"):
try:
if ascending:
partitions = divisions.searchsorted(s, side="right") - 1
else:
partitions = len(divisions) - divisions.searchsorted(s, side="right") - 1
except (TypeError, ValueError):
# `searchsorted` fails if either `divisions` or `s` contains nulls and strings
partitions = np.empty(len(s), dtype="int32")
not_null = s.notna()
divisions_notna = divisions[divisions.notna()]
if ascending:
partitions[not_null] = (
divisions_notna.searchsorted(s[not_null], side="right") - 1
)
else:
partitions[not_null] = (
len(divisions)
- divisions_notna.searchsorted(s[not_null], side="right")
- 1
)
partitions[(partitions < 0) | (partitions >= len(divisions) - 1)] = (
len(divisions) - 2 if ascending else 0
)
nas = s.isna()
# We could be a ndarray already (datetime dtype)
nas = getattr(nas, "values", nas)
partitions[nas] = len(divisions) - 2 if na_position == "last" else 0
return partitions
def shuffle_group_2(df, cols, ignore_index, nparts):
if not len(df):
return {}, df
if isinstance(cols, str):
cols = [cols]
if cols and cols[0] == "_partitions":
ind = df[cols[0]].astype(np.int32)
else:
ind = (
hash_object_dispatch(df[cols] if cols else df, index=False) % int(nparts)
).astype(np.int32)
n = ind.max() + 1
result2 = group_split_dispatch(df, ind, n, ignore_index=ignore_index)
return result2, df.iloc[:0]
def shuffle_group_get(g_head, i):
g, head = g_head
if i in g:
return g[i]
else:
return head
def shuffle_group(df, cols, stage, k, npartitions, ignore_index, nfinal):
"""Splits dataframe into groups
The group is determined by their final partition, and which stage we are in
in the shuffle
Parameters
----------
df: DataFrame
cols: str or list
Column name(s) on which to split the dataframe. If ``cols`` is not
"_partitions", hashing will be used to determine target partition
stage: int
We shuffle dataframes with many partitions we in a few stages to avoid
a quadratic number of tasks. This number corresponds to which stage
we're in, starting from zero up to some small integer
k: int
Desired number of splits from this dataframe
npartition: int
Total number of output partitions for the full dataframe
nfinal: int
Total number of output partitions after repartitioning
Returns
-------
out: Dict[int, DataFrame]
A dictionary mapping integers in {0..k} to dataframes such that the
hash values of ``df[col]`` are well partitioned.
"""
if isinstance(cols, str):
cols = [cols]
if cols and cols[0] == "_partitions":
ind = df[cols[0]]
else:
ind = hash_object_dispatch(df[cols] if cols else df, index=False)
if nfinal and nfinal != npartitions:
ind = ind % int(nfinal)
typ = np.min_scalar_type(npartitions * 2)
# Here we convert the final output index `ind` into the output index
# for the current stage.
kwargs = {} if PANDAS_GE_300 else {"copy": False}
ind = (ind % npartitions).astype(typ, **kwargs) // k**stage % k
return group_split_dispatch(df, ind, k, ignore_index=ignore_index)
@contextlib.contextmanager
def ensure_cleanup_on_exception(p):
"""Ensure a partd.File is cleaned up.
We have several tasks referring to a `partd.File` instance. We want to
ensure that the file is cleaned up if and only if there's an exception
in the tasks using the `partd.File`.
"""
try:
yield
except Exception:
# the function (e.g. shuffle_group_3) had an internal exception.
# We'll cleanup our temporary files and re-raise.
try:
p.drop()
except Exception:
logger.exception("ignoring exception in ensure_cleanup_on_exception")
raise
def drop_overlap(df, index):
return df.drop(index) if index in df.index else df
def get_overlap(df, index):
return df.loc[[index]] if index in df.index else df._constructor()
| maybe_buffered_partd |
python | tiangolo__fastapi | docs_src/extra_models/tutorial001.py | {
"start": 114,
"end": 236
} | class ____(BaseModel):
username: str
password: str
email: EmailStr
full_name: Union[str, None] = None
| UserIn |
python | python-poetry__poetry | src/poetry/console/application.py | {
"start": 3353,
"end": 22268
} | class ____(BaseApplication):
def __init__(self) -> None:
super().__init__("poetry", __version__)
self._poetry: Poetry | None = None
self._io: IO | None = None
self._disable_plugins = False
self._disable_cache = False
self._plugins_loaded = False
self._working_directory = Path.cwd()
self._project_directory: Path | None = None
dispatcher = EventDispatcher()
dispatcher.add_listener(COMMAND, self.register_command_loggers)
dispatcher.add_listener(COMMAND, self.configure_env)
dispatcher.add_listener(COMMAND, self.configure_installer_for_event)
self.set_event_dispatcher(dispatcher)
command_loader = CommandLoader({name: load_command(name) for name in COMMANDS})
self.set_command_loader(command_loader)
@property
def _default_definition(self) -> Definition:
from cleo.io.inputs.option import Option
definition = super()._default_definition
definition.add_option(
Option("--no-plugins", flag=True, description="Disables plugins.")
)
definition.add_option(
Option(
"--no-cache", flag=True, description="Disables Poetry source caches."
)
)
definition.add_option(
Option(
"--project",
"-P",
flag=False,
description=(
"Specify another path as the project root."
" All command-line arguments will be resolved relative to the current working directory."
),
)
)
definition.add_option(
Option(
"--directory",
"-C",
flag=False,
description=(
"The working directory for the Poetry command (defaults to the"
" current working directory). All command-line arguments will be"
" resolved relative to the given directory."
),
)
)
return definition
@property
def project_directory(self) -> Path:
return self._project_directory or self._working_directory
@property
def poetry(self) -> Poetry:
from poetry.factory import Factory
if self._poetry is not None:
return self._poetry
self._poetry = Factory().create_poetry(
cwd=self.project_directory,
io=self._io,
disable_plugins=self._disable_plugins,
disable_cache=self._disable_cache,
)
return self._poetry
@property
def command_loader(self) -> CommandLoader:
command_loader = self._command_loader
assert isinstance(command_loader, CommandLoader)
return command_loader
def reset_poetry(self) -> None:
self._poetry = None
def create_io(
self,
input: Input | None = None,
output: Output | None = None,
error_output: Output | None = None,
) -> IO:
io = super().create_io(input, output, error_output)
# Set our own CLI styles
formatter = io.output.formatter
formatter.set_style("c1", Style("cyan"))
formatter.set_style("c2", Style("default", options=["bold"]))
formatter.set_style("info", Style("blue"))
formatter.set_style("comment", Style("green"))
formatter.set_style("warning", Style("yellow"))
formatter.set_style("debug", Style("default", options=["dark"]))
formatter.set_style("success", Style("green"))
# Dark variants
formatter.set_style("c1_dark", Style("cyan", options=["dark"]))
formatter.set_style("c2_dark", Style("default", options=["bold", "dark"]))
formatter.set_style("success_dark", Style("green", options=["dark"]))
io.output.set_formatter(formatter)
io.error_output.set_formatter(formatter)
self._io = io
return io
def _run(self, io: IO) -> int:
# we do this here and not inside the _configure_io implementation in order
# to ensure the users are not exposed to a stack trace for providing invalid values to
# the options --directory or --project, configuring the options here allow cleo to trap and
# display the error cleanly unless the user uses verbose or debug
self._configure_global_options(io)
with directory(self._working_directory):
self._load_plugins(io)
exit_code: int = 1
try:
exit_code = super()._run(io)
except PoetryRuntimeError as e:
io.write_error_line("")
e.write(io)
io.write_error_line("")
except CleoCommandNotFoundError as e:
command = self._get_command_name(io)
if command is not None and (
message := COMMAND_NOT_FOUND_MESSAGES.get(command)
):
io.write_error_line("")
io.write_error_line(COMMAND_NOT_FOUND_PREFIX_MESSAGE)
io.write_error_line(message)
return 1
if command is not None and command in self.get_namespaces():
sub_commands = []
for key in self._commands:
if key.startswith(f"{command} "):
sub_commands.append(key)
io.write_error_line(
f"The requested command does not exist in the <c1>{command}</> namespace."
)
suggested_names = find_similar_names(command, sub_commands)
self._error_write_command_suggestions(
io, suggested_names, f"#{command}"
)
return 1
if command is not None:
suggested_names = find_similar_names(
command, list(self._commands.keys())
)
io.write_error_line(
f"The requested command <c1>{command}</> does not exist."
)
self._error_write_command_suggestions(io, suggested_names)
return 1
raise e
return exit_code
def _error_write_command_suggestions(
self, io: IO, suggested_names: list[str], doc_tag: str | None = None
) -> None:
if suggested_names:
suggestion_lines = [
f"<c1>{name.replace(' ', '</> <b>', 1)}</>: {self._commands[name].description}"
for name in suggested_names
]
suggestions = "\n ".join(["", *sorted(suggestion_lines)])
io.write_error_line(
f"\n<error>Did you mean one of these perhaps?</>{suggestions}"
)
io.write_error_line(
"\n<b>Documentation: </>"
f"<info>https://python-poetry.org/docs/cli/{doc_tag or ''}</>"
)
def _configure_global_options(self, io: IO) -> None:
"""
Configures global options for the application by setting up the relevant
directories, disabling plugins or cache, and managing the working and
project directories. This method ensures that all directories are valid
paths and handles the resolution of the project directory relative to the
working directory if necessary.
:param io: The IO instance whose input and options are being read.
:return: Nothing.
"""
self._disable_plugins = io.input.option("no-plugins")
self._disable_cache = io.input.option("no-cache")
# we use ensure_path for the directories to make sure these are valid paths
# this will raise an exception if the path is invalid
self._working_directory = ensure_path(
io.input.option("directory") or Path.cwd(), is_directory=True
)
self._project_directory = io.input.option("project")
if self._project_directory is not None:
self._project_directory = Path(self._project_directory)
self._project_directory = ensure_path(
self._project_directory
if self._project_directory.is_absolute()
else self._working_directory.joinpath(self._project_directory).resolve(
strict=False
),
is_directory=True,
)
def _sort_global_options(self, io: IO) -> None:
"""
Sorts global options of the provided IO instance according to the
definition of the available options, reordering and parsing arguments
to ensure consistency in input handling.
The function interprets the options and their corresponding values
using an argument parser, constructs a sorted list of tokens, and
recreates the input with the rearranged sequence while maintaining
compatibility with the initially provided input stream.
If using in conjunction with `_configure_run_command`, it is recommended that
it be called first in order to correctly handling cases like
`poetry run -V python -V`.
:param io: The IO instance whose input and options are being processed
and reordered.
:return: Nothing.
"""
original_input = cast("ArgvInput", io.input)
tokens: list[str] = original_input._tokens
parser = argparse.ArgumentParser(add_help=False)
for option in self.definition.options:
parser.add_argument(
f"--{option.name}",
*([f"-{option.shortcut}"] if option.shortcut else []),
action="store_true" if option.is_flag() else "store",
)
args, remaining_args = parser.parse_known_args(tokens)
tokens = []
for option in self.definition.options:
key = option.name.replace("-", "_")
value = getattr(args, key, None)
if value is not None:
if value: # is truthy
tokens.append(f"--{option.name}")
if option.accepts_value():
tokens.append(str(value))
sorted_input = ArgvInput([self._name or "", *tokens, *remaining_args])
# this is required to ensure stdin is transferred
sorted_input.set_stream(original_input.stream)
# this is required as cleo internally checks for `io.input._interactive`
# when configuring io, and cleo's test applications overrides this attribute
# explicitly causing test setups to fail
sorted_input.interactive(io.input.is_interactive())
with suppress(CleoError):
sorted_input.bind(self.definition)
io.set_input(sorted_input)
def _configure_run_command(self, io: IO) -> None:
"""
Configures the input for the "run" command to properly handle cases where the user
executes commands such as "poetry run -- <subcommand>". This involves reorganizing
input tokens to ensure correct parsing and execution of the run command.
"""
with suppress(CleoError):
io.input.bind(self.definition)
command_name = io.input.first_argument
if command_name == "run":
original_input = cast("ArgvInput", io.input)
tokens: list[str] = original_input._tokens
if "--" in tokens:
# this means the user has done the right thing and used "poetry run -- echo hello"
# in this case there is not much we need to do, we can skip the rest
return
# find the correct command index, in some cases this might not be first occurrence
# eg: poetry -C run run echo
command_index = tokens.index(command_name)
while command_index < (len(tokens) - 1):
try:
# try parsing the tokens so far
_ = ArgvInput(
[self._name or "", *tokens[: command_index + 1]],
definition=self.definition,
)
break
except CleoError:
# parsing failed, try finding the next "run" token
try:
command_index += (
tokens[command_index + 1 :].index(command_name) + 1
)
except ValueError:
command_index = len(tokens)
else:
# looks like we reached the end of the road, let cleo deal with it
return
# fetch tokens after the "run" command
tokens_without_command = tokens[command_index + 1 :]
# we create a new input for parsing the subcommand pretending
# it is poetry command
without_command = ArgvInput(
[self._name or "", *tokens_without_command], None
)
with suppress(CleoError):
# we want to bind the definition here so that cleo knows what should be
# parsed, and how
without_command.bind(self.definition)
# the first argument here is the subcommand
subcommand = without_command.first_argument
subcommand_index = (
(tokens_without_command.index(subcommand) if subcommand else 0)
+ command_index
+ 1
)
# recreate the original input reordering in the following order
# - all tokens before "run" command
# - all tokens after "run" command but before the subcommand
# - the "run" command token
# - the "--" token to normalise the form
# - all remaining tokens starting with the subcommand
run_input = ArgvInput(
[
self._name or "",
*tokens[:command_index],
*tokens[command_index + 1 : subcommand_index],
command_name,
"--",
*tokens[subcommand_index:],
]
)
run_input.set_stream(original_input.stream)
with suppress(CleoError):
run_input.bind(self.definition)
# reset the input to our constructed form
io.set_input(run_input)
def _configure_io(self, io: IO) -> None:
self._configure_run_command(io)
self._sort_global_options(io)
super()._configure_io(io)
def register_command_loggers(
self, event: Event, event_name: str, _: EventDispatcher
) -> None:
from poetry.console.logging.filters import POETRY_FILTER
from poetry.console.logging.io_formatter import IOFormatter
from poetry.console.logging.io_handler import IOHandler
assert isinstance(event, ConsoleCommandEvent)
command = event.command
if not isinstance(command, Command):
return
io = event.io
loggers = [
"poetry.packages.locker",
"poetry.packages.package",
"poetry.utils.password_manager",
]
loggers += command.loggers
handler = IOHandler(io)
handler.setFormatter(IOFormatter())
level = logging.WARNING
if io.is_debug():
level = logging.DEBUG
elif io.is_very_verbose() or io.is_verbose():
level = logging.INFO
logging.basicConfig(level=level, handlers=[handler])
# only log third-party packages when very verbose
if not io.is_very_verbose():
handler.addFilter(POETRY_FILTER)
for name in loggers:
logger = logging.getLogger(name)
_level = level
# The builders loggers are special and we can actually
# start at the INFO level.
if (
logger.name.startswith("poetry.core.masonry.builders")
and _level > logging.INFO
):
_level = logging.INFO
logger.setLevel(_level)
def configure_env(self, event: Event, event_name: str, _: EventDispatcher) -> None:
from poetry.console.commands.env_command import EnvCommand
from poetry.console.commands.self.self_command import SelfCommand
assert isinstance(event, ConsoleCommandEvent)
command = event.command
if not isinstance(command, EnvCommand) or isinstance(command, SelfCommand):
return
if command._env is not None:
return
from poetry.utils.env import EnvManager
io = event.io
poetry = command.poetry
env_manager = EnvManager(poetry, io=io)
env = env_manager.create_venv()
if env.is_venv() and io.is_verbose():
io.write_error_line(f"Using virtualenv: <comment>{env.path}</>")
command.set_env(env)
@classmethod
def configure_installer_for_event(
cls, event: Event, event_name: str, _: EventDispatcher
) -> None:
from poetry.console.commands.installer_command import InstallerCommand
assert isinstance(event, ConsoleCommandEvent)
command = event.command
if not isinstance(command, InstallerCommand):
return
# If the command already has an installer
# we skip this step
if command._installer is not None:
return
cls.configure_installer_for_command(command, event.io)
@staticmethod
def configure_installer_for_command(command: InstallerCommand, io: IO) -> None:
from poetry.installation.installer import Installer
poetry = command.poetry
installer = Installer(
io,
command.env,
poetry.package,
poetry.locker,
poetry.pool,
poetry.config,
disable_cache=poetry.disable_cache,
)
command.set_installer(installer)
def _load_plugins(self, io: IO) -> None:
if self._plugins_loaded:
return
self._disable_plugins = io.input.has_parameter_option("--no-plugins")
if not self._disable_plugins:
from poetry.plugins.application_plugin import ApplicationPlugin
from poetry.plugins.plugin_manager import PluginManager
PluginManager.add_project_plugin_path(self.project_directory)
manager = PluginManager(ApplicationPlugin.group)
manager.load_plugins()
manager.activate(self)
self._plugins_loaded = True
def main() -> int:
exit_code: int = Application().run()
return exit_code
if __name__ == "__main__":
main()
| Application |
python | wandb__wandb | wandb/vendor/pygments/lexers/robotframework.py | {
"start": 11610,
"end": 11673
} | class ____(_Table):
_tokenizer_class = Variable
| VariableTable |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/django/toystore/test_given_forms.py | {
"start": 1436,
"end": 5247
} | class ____(TestCase):
@given(from_form(CustomerForm))
def test_valid_customer(self, customer_form):
self.assertTrue(customer_form.is_valid())
@given(from_form(ManyNumericsForm))
def test_valid_numerics(self, numerics_form):
self.assertTrue(numerics_form.is_valid())
@given(from_form(ManyTimesForm))
def test_valid_times(self, times_form):
self.assertTrue(times_form.is_valid())
@given(from_form(OddFieldsForm))
def test_valid_odd_fields(self, odd_form):
self.assertTrue(odd_form.is_valid())
def test_dynamic_form(self):
for field_count in range(2, 7):
@given(from_form(DynamicForm, form_kwargs={"field_count": field_count}))
def _test(dynamic_form):
self.assertTrue(dynamic_form.is_valid())
_test()
@given(from_form(BasicFieldForm))
def test_basic_fields_form(self, basic_field_form):
self.assertTrue(basic_field_form.is_valid())
@given(from_form(TemporalFieldForm))
def test_temporal_fields_form(self, time_field_form):
self.assertTrue(time_field_form.is_valid())
@given(from_form(EmailFieldForm))
def test_email_field_form(self, email_field_form):
self.assertTrue(email_field_form.is_valid())
@given(from_form(SlugFieldForm))
def test_slug_field_form(self, slug_field_form):
self.assertTrue(slug_field_form.is_valid())
@given(from_form(URLFieldForm))
def test_url_field_form(self, url_field_form):
self.assertTrue(url_field_form.is_valid())
@given(from_form(RegexFieldForm))
def test_regex_field_form(self, regex_field_form):
self.assertTrue(regex_field_form.is_valid())
@given(from_form(UUIDFieldForm))
def test_uuid_field_form(self, uuid_field_form):
self.assertTrue(uuid_field_form.is_valid())
@given(from_form(ChoiceFieldForm))
def test_choice_fields_form(self, choice_field_form):
self.assertTrue(choice_field_form.is_valid())
@given(from_form(InternetProtocolForm))
def test_ip_fields_form(self, ip_field_form):
self.assertTrue(ip_field_form.is_valid())
@given(from_form(ManyMultiValueForm, form_kwargs={"subfield_count": 2}))
def test_many_values_in_multi_value_field(self, many_multi_value_form):
self.assertTrue(many_multi_value_form.is_valid())
@given(from_form(ManyMultiValueForm, form_kwargs={"subfield_count": 105}))
def test_excessive_values_in_multi_value_field(self, excessive_form):
self.assertTrue(excessive_form.is_valid())
@given(from_form(ShortStringForm))
def test_short_string_form(self, short_string_form):
self.assertTrue(short_string_form.is_valid())
@given(from_form(WithValidatorsForm))
def test_tight_validators_form(self, x):
self.assertTrue(1 <= x.data["_int_one_to_five"] <= 5)
self.assertTrue(1 <= x.data["_decimal_one_to_five"] <= 5)
self.assertTrue(1 <= x.data["_float_one_to_five"] <= 5)
self.assertTrue(5 <= len(x.data["_string_five_to_ten"]) <= 10)
@given(from_form(FileFieldsForm))
def test_file_fields_form(self, x):
assert x.is_valid()
# form.data is empty, and form.files has one entry: file1
self.assertFalse(x.data)
self.assertTrue(set(x.files.keys()) == {"file1"})
self.assertTrue(x.files["file1"])
@skipIf(not has_contrib_auth, "contrib.auth not installed")
@given(from_form(UsernameForm))
def test_username_form(self, username_form):
self.assertTrue(username_form.is_valid())
@skipIf(not has_contrib_auth, "contrib.auth not installed")
@given(from_form(UsernameForm))
def test_read_only_password_hash_field_form(self, password_form):
self.assertTrue(password_form.is_valid())
| TestGetsBasicForms |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/elements.py | {
"start": 182836,
"end": 185092
} | class ____(Annotated):
_Annotated__element: ColumnElement[Any]
def __init__(self, element, values):
Annotated.__init__(self, element, values)
for attr in (
"comparator",
"_proxy_key",
"_tq_key_label",
"_tq_label",
"_non_anon_label",
"type",
):
self.__dict__.pop(attr, None)
for attr in ("name", "key", "table"):
if self.__dict__.get(attr, False) is None:
self.__dict__.pop(attr)
def _with_annotations(self, values):
clone = super()._with_annotations(values)
for attr in (
"comparator",
"_proxy_key",
"_tq_key_label",
"_tq_label",
"_non_anon_label",
):
clone.__dict__.pop(attr, None)
return clone
@util.memoized_property
def name(self):
"""pull 'name' from parent, if not present"""
return self._Annotated__element.name
@_memoized_property_but_not_nulltype
def type(self):
"""pull 'type' from parent and don't cache if null.
type is routinely changed on existing columns within the
mapped_column() initialization process, and "type" is also consulted
during the creation of SQL expressions. Therefore it can change after
it was already retrieved. At the same time we don't want annotated
objects having overhead when expressions are produced, so continue
to memoize, but only when we have a non-null type.
"""
return self._Annotated__element.type
@util.memoized_property
def table(self):
"""pull 'table' from parent, if not present"""
return self._Annotated__element.table
@util.memoized_property
def key(self):
"""pull 'key' from parent, if not present"""
return self._Annotated__element.key
@util.memoized_property
def info(self) -> _InfoType:
if TYPE_CHECKING:
assert isinstance(self._Annotated__element, Column)
return self._Annotated__element.info
@util.memoized_property
def _anon_name_label(self) -> str:
return self._Annotated__element._anon_name_label
| AnnotatedColumnElement |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/prefect_dbt/cloud/models.py | {
"start": 758,
"end": 2403
} | class ____(BaseModel):
"""
Defines options that can be defined when triggering a dbt Cloud job run.
"""
cause: str = Field(
default_factory=default_cause_factory,
description="A text description of the reason for running this job.",
)
git_sha: Optional[str] = Field(
default=None, description="The git sha to check out before running this job."
)
git_branch: Optional[str] = Field(
default=None, description="The git branch to check out before running this job."
)
schema_override: Optional[str] = Field(
default=None,
description="Override the destination schema in the configured "
"target for this job.",
)
dbt_version_override: Optional[str] = Field(
default=None, description="Override the version of dbt used to run this job."
)
threads_override: Optional[int] = Field(
default=None, description="Override the number of threads used to run this job."
)
target_name_override: Optional[str] = Field(
default=None,
description="Override the target.name context variable used when "
"running this job",
)
generate_docs_override: Optional[bool] = Field(
default=None,
description="Override whether or not this job generates docs "
"(true=yes, false=no).",
)
timeout_seconds_override: Optional[int] = Field(
default=None, description="Override the timeout in seconds for this job."
)
steps_override: Optional[List[str]] = Field(
default=None, description="Override the list of steps for this job."
)
| TriggerJobRunOptions |
python | spyder-ide__spyder | spyder/plugins/run/api.py | {
"start": 4775,
"end": 5106
} | class ____(TypedDict):
"""Output format information schema."""
# Human-readable name for the output format. It must be CamelCase
name: str
# String identifier for the output format. If non-existent or None, then a
# snake_case version of the name is used.
identifier: NotRequired[Optional[str]]
| OutputFormat |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 29048,
"end": 29704
} | class ____(UnimplementedWarning):
"""
The 4 datatypes defined in the VOTable specification and supported by
``astropy.io.votable`` are ``TABLEDATA``, ``BINARY``, ``BINARY2`` and ``FITS``.
In addition, ``astropy.io.votable`` also supports ``PARQUET`` serialization, which is
a candidate for addition to the VOTable specification.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:data>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:data>`__
"""
message_template = "Unsupported data format '{}'"
default_args = ("x",)
| W37 |
python | keras-team__keras | keras/src/optimizers/adafactor_test.py | {
"start": 152,
"end": 3791
} | class ____(testing.TestCase):
def test_config(self):
optimizer = Adafactor(
learning_rate=0.5,
beta_2_decay=-0.65,
epsilon_1=1e-15,
epsilon_2=1e-4,
clip_threshold=0.9,
relative_step=False,
)
self.run_class_serialization_test(optimizer)
def test_single_step_1d(self):
optimizer = Adafactor(learning_rate=0.5)
grads = np.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [-0.3693, 0.6307, 1.6307, 2.6307], rtol=1e-4, atol=1e-4
)
def test_single_step_2d(self):
optimizer = Adafactor(learning_rate=0.5)
grads = np.array([[1.0, 6.0], [7.0, 2.0]])
vars = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [[0.7007, -0.0081], [1.2492, 3.4407]], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
np.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Adafactor(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Adafactor(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Adafactor(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Adafactor(
learning_rate=0.5,
beta_2_decay=-0.65,
epsilon_1=1e-15,
epsilon_2=1e-4,
clip_threshold=0.9,
relative_step=False,
)
x = backend.Variable(np.ones([10]))
grads = np.arange(0.1, 1.1, 0.1)
first_grads = np.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.55, 0.55, 0.55, 0.55, 0.55, 0.55, 0.55, 0.55, 0.55, 0.55],
[0.3031, 0.3026, 0.3025, 0.3024, 0.3024, 0.3024, 0.3024, 0.3024, 0.3024, 0.3024],
[0.1671, 0.1665, 0.1663, 0.1663, 0.1663, 0.1663, 0.1663, 0.1663, 0.1663, 0.1663],
[0.0923, 0.0916, 0.0915, 0.0914, 0.0914, 0.0914, 0.0914, 0.0914, 0.0914, 0.0914],
[0.0554, 0.0548, 0.0546, 0.0546, 0.0546, 0.0546, 0.0546, 0.0545, 0.0545, 0.0545]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Adafactor(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Adafactor(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
| AdafactorTest |
python | ray-project__ray | python/ray/data/preprocessors/scaler.py | {
"start": 9061,
"end": 12880
} | class ____(SerializablePreprocessorBase):
r"""Scale each column by its absolute max value.
The general formula is given by
.. math::
x' = \frac{x}{\max{\vert x \vert}}
where :math:`x` is the column and :math:`x'` is the transformed column. If
:math:`\max{\vert x \vert} = 0` (i.e., the column contains all zeros), then the
column is unmodified.
.. tip::
This is the recommended way to scale sparse data. If you data isn't sparse,
you can use :class:`MinMaxScaler` or :class:`StandardScaler` instead.
Examples:
>>> import pandas as pd
>>> import ray
>>> from ray.data.preprocessors import MaxAbsScaler
>>>
>>> df = pd.DataFrame({"X1": [-6, 3], "X2": [2, -4], "X3": [0, 0]}) # noqa: E501
>>> ds = ray.data.from_pandas(df) # doctest: +SKIP
>>> ds.to_pandas() # doctest: +SKIP
X1 X2 X3
0 -6 2 0
1 3 -4 0
Columns are scaled separately.
>>> preprocessor = MaxAbsScaler(columns=["X1", "X2"])
>>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP
X1 X2 X3
0 -1.0 0.5 0
1 0.5 -1.0 0
Zero-valued columns aren't scaled.
>>> preprocessor = MaxAbsScaler(columns=["X3"])
>>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP
X1 X2 X3
0 -6 2 0.0
1 3 -4 0.0
>>> preprocessor = MaxAbsScaler(columns=["X1", "X2"], output_columns=["X1_scaled", "X2_scaled"])
>>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP
X1 X2 X3 X1_scaled X2_scaled
0 -2 -3 1 -1.0 -1.0
1 0 -3 1 0.0 -1.0
2 2 3 1 1.0 1.0
Args:
columns: The columns to separately scale.
output_columns: The names of the transformed columns. If None, the transformed
columns will be the same as the input columns. If not None, the length of
``output_columns`` must match the length of ``columns``, othwerwise an error
will be raised.
"""
def __init__(self, columns: List[str], output_columns: Optional[List[str]] = None):
super().__init__()
self.columns = columns
self.output_columns = Preprocessor._derive_and_validate_output_columns(
columns, output_columns
)
def _fit(self, dataset: "Dataset") -> Preprocessor:
aggregates = [AbsMax(col) for col in self.columns]
self.stats_ = dataset.aggregate(*aggregates)
return self
def _transform_pandas(self, df: pd.DataFrame):
def column_abs_max_scaler(s: pd.Series):
s_abs_max = self.stats_[f"abs_max({s.name})"]
# Handle division by zero.
# All values are 0.
if s_abs_max == 0:
s_abs_max = 1
return s / s_abs_max
df[self.output_columns] = df[self.columns].transform(column_abs_max_scaler)
return df
def _get_serializable_fields(self) -> Dict[str, Any]:
return {
"columns": self.columns,
"output_columns": self.output_columns,
"_fitted": getattr(self, "_fitted", None),
}
def _set_serializable_fields(self, fields: Dict[str, Any], version: int):
# required fields
self.columns = fields["columns"]
self.output_columns = fields["output_columns"]
# optional fields
self._fitted = fields.get("_fitted")
def __repr__(self):
return f"{self.__class__.__name__}(columns={self.columns!r}, output_columns={self.output_columns!r})"
@PublicAPI(stability="alpha")
@SerializablePreprocessor(version=1, identifier="io.ray.preprocessors.robust_scaler")
| MaxAbsScaler |
python | PrefectHQ__prefect | tests/server/models/test_workers.py | {
"start": 2271,
"end": 5439
} | class ____:
async def test_creating_a_pool_creates_default_queue(self, session: AsyncSession):
result = await models.workers.create_work_pool(
session=session,
work_pool=schemas.actions.WorkPoolCreate(name="My Test Pool"),
)
# read the default queue
queue = await models.workers.read_work_queue(
session=session, work_queue_id=result.default_queue_id
)
assert queue.name == "default"
assert queue.priority == 1
# check that it is the only queue for the pool
all_queues = await models.workers.read_work_queues(
session=session, work_pool_id=result.id
)
assert len(all_queues) == 1
assert all_queues[0].id == result.default_queue_id
async def test_cant_delete_default_queue(self, session, work_pool):
with pytest.raises(ValueError, match="(Can't delete a pool's default queue.)"):
await models.workers.delete_work_queue(
session=session, work_queue_id=work_pool.default_queue_id
)
async def test_cant_delete_default_queue_even_in_db(self, session, work_pool, db):
"""Deleting the default queue is not allowed in the db, even if you bypass the model"""
with pytest.raises(sa.exc.IntegrityError):
await session.execute(
sa.delete(db.WorkQueue).where(
db.WorkQueue.id == work_pool.default_queue_id
)
)
async def test_can_rename_default_queue(self, session, work_pool):
queue = await models.workers.read_work_queue(
session=session, work_queue_id=work_pool.default_queue_id
)
assert queue.name == "default"
assert await models.workers.update_work_queue(
session=session,
work_queue_id=work_pool.default_queue_id,
work_queue=schemas.actions.WorkQueueUpdate(name="New Name"),
)
await session.commit()
session.expunge_all()
queue = await models.workers.read_work_queue(
session=session, work_queue_id=work_pool.default_queue_id
)
assert queue.name == "New Name"
async def test_can_reprioritize_default_queue(self, session, work_pool):
queue = await models.workers.read_work_queue(
session=session, work_queue_id=work_pool.default_queue_id
)
assert queue.priority == 1
# create a new queue so we can reprioritize them
await models.workers.create_work_queue(
session=session,
work_pool_id=work_pool.id,
work_queue=schemas.actions.WorkQueueCreate(name="New Queue"),
)
assert await models.workers.update_work_queue(
session=session,
work_queue_id=work_pool.default_queue_id,
work_queue=schemas.actions.WorkQueueUpdate(priority=2),
)
await session.commit()
session.expunge_all()
queue = await models.workers.read_work_queue(
session=session, work_queue_id=work_pool.default_queue_id
)
assert queue.priority == 2
| TestDefaultQueues |
python | ray-project__ray | python/ray/tests/test_multi_tenancy.py | {
"start": 3292,
"end": 10371
} | class ____:
def __init__(self):
self._tmpdir = tempfile.TemporaryDirectory()
self._tmppath = os.path.join(self._tmpdir.name, "signal.txt")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._tmpdir.cleanup()
def wait(self):
while not os.path.exists(self._tmppath):
time.sleep(0.1)
def send(self):
with open(self._tmppath, "w") as f:
f.write("go!")
f.flush()
f.close()
def test_kill_idle_workers(shutdown_only):
# Avoid starting initial workers by setting num_cpus to 0.
ray.init(num_cpus=0)
assert len(get_workers()) == 0
@ray.remote(num_cpus=0)
class Actor:
pass
# Worker 1 should be alive running the actor.
a = Actor.remote()
ray.get(a.__ray_ready__.remote())
assert len(get_workers()) == 1
# NOTE(edoakes): I tried writing this test using a SignalActor instead of a file
# to coordinate the tasks, but it failed because the idle workers weren't killed.
with SignalFile() as signal:
@ray.remote(num_cpus=0)
def foo():
signal.wait()
# Worker 2 should be alive running foo.
obj1 = foo.remote()
wait_for_condition(lambda: len(get_workers()) == 2)
# Worker 3 should be alive running foo.
obj2 = foo.remote()
wait_for_condition(lambda: len(get_workers()) == 3)
# Signal the tasks to unblock and wait for them to complete.
signal.send()
ray.get([obj1, obj2])
# Worker 2 and 3 now become idle and should be killed.
wait_for_condition(lambda: len(get_workers()) == 1)
# Worker 1 should also be killed when the actor exits.
del a
wait_for_condition(lambda: len(get_workers()) == 0)
def test_worker_capping_run_many_small_tasks(shutdown_only):
ray.init(num_cpus=2)
with SignalFile() as signal:
@ray.remote(num_cpus=0.5)
def foo():
signal.wait()
# Run more tasks than `num_cpus`, but the CPU resource requirement is
# still within `num_cpus`.
obj_refs = [foo.remote() for _ in range(4)]
wait_for_condition(lambda: len(get_workers()) == 4)
# Unblock the tasks.
signal.send()
ray.get(obj_refs)
# After the tasks finish, some workers are killed to keep the total
# number of workers <= num_cpus.
wait_for_condition(lambda: len(get_workers()) == 2)
# The two remaining workers stay alive forever.
for _ in range(10):
assert len(get_workers()) == 2
def test_worker_capping_run_chained_tasks(shutdown_only):
ray.init(num_cpus=2)
with SignalFile() as signal:
@ray.remote(num_cpus=0.5)
def foo(x):
if x > 1:
return ray.get(foo.remote(x - 1)) + x
else:
signal.wait()
return x
# Run a chain of tasks which exceed `num_cpus` in amount, but the CPU
# resource requirement is still within `num_cpus`.
obj = foo.remote(4)
wait_for_condition(lambda: len(get_workers()) == 4)
# Unblock the tasks.
signal.send()
ray.get(obj)
# After finished the tasks, some workers are killed to keep the total
# number of workers <= num_cpus.
wait_for_condition(lambda: len(get_workers()) == 2)
# The two remaining workers stay alive forever.
for _ in range(10):
assert len(get_workers()) == 2
def test_worker_registration_failure_after_driver_exit(shutdown_only):
info = ray.init(num_cpus=2)
wait_for_condition(lambda: len(get_workers()) == 2)
driver_code = """
import os
import ray
import time
ray.init(address="{}")
@ray.remote
def foo():
pass
obj_refs = [foo.remote() for _ in range(1000)]
ray.get(obj_refs[0])
os._exit(0)
""".format(
info["address"]
)
# Run a driver that spawns many tasks and blocks until the first result is ready,
# so at least one worker should have registered.
try:
run_string_as_driver(driver_code)
except subprocess.CalledProcessError:
# The driver exits with non-zero status Windows due to ungraceful os._exit.
pass
# Verify that the workers spawned by the old driver go away.
wait_for_condition(lambda: len(get_workers()) <= 2)
def test_not_killing_workers_that_own_objects(shutdown_only):
idle_worker_kill_interval_ms = 10
# Set the small interval for worker capping
# so that we can easily trigger it.
ray.init(
num_cpus=0,
_system_config={
"kill_idle_workers_interval_ms": idle_worker_kill_interval_ms,
},
)
# Create a nested tasks to start 4 workers each of which owns an object.
with SignalFile() as signal:
expected_num_workers = 4
@ray.remote(num_cpus=0)
def nested(i):
# Each of these tasks owns an object so it shouldn't be killed.
if i >= expected_num_workers - 1:
signal.wait()
return [ray.put(np.ones(1 * 1024 * 1024, dtype=np.uint8))]
else:
return [ray.put(np.ones(1 * 1024 * 1024, dtype=np.uint8))] + ray.get(
nested.remote(i + 1)
)
# Wait for all the workers to start up.
outer_ref = nested.remote(0)
wait_for_condition(lambda: len(get_workers()) == expected_num_workers)
# Unblock the tasks.
signal.send()
inner_ref = ray.get(outer_ref)
# Sleep for 10x the idle worker kill interval and verify that those workers
# aren't killed because they own objects that are in scope.
time.sleep((10 * idle_worker_kill_interval_ms) / 1000.0)
assert len(get_workers()) == expected_num_workers
del inner_ref
def test_kill_idle_workers_that_are_behind_owned_workers(shutdown_only):
# When the first N idle workers own objects, and if we have N+N
# total idle workers, we should make sure other N workers are killed.
# It is because the idle workers are killed in the FIFO order.
N = 4
ray.init(
num_cpus=1,
_system_config={
"kill_idle_workers_interval_ms": 10,
"worker_lease_timeout_milliseconds": 0,
},
)
@ray.remote
def nested(i):
if i >= (N * 2) - 1:
return [ray.put(np.ones(1 * 1024 * 1024, dtype=np.uint8))]
elif i >= N:
return [ray.put(np.ones(1 * 1024 * 1024, dtype=np.uint8))] + ray.get(
nested.remote(i + 1)
)
else:
return [1] + ray.get(nested.remote(i + 1))
# The first N workers don't own objects
# and the later N workers do.
ref = ray.get(nested.remote(0))
assert len(ref) == N * 2
num_workers = len(get_workers())
assert num_workers == N * 2
# Make sure there are only N workers left after worker capping.
wait_for_condition(lambda: len(get_workers()) == N)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| SignalFile |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/mirror_sourceware_broken/package.py | {
"start": 307,
"end": 623
} | class ____(AutotoolsPackage, SourcewarePackage):
"""Simple sourceware.org package"""
homepage = "https://sourceware.org/bzip2/"
url = "https://sourceware.org/pub/bzip2/bzip2-1.0.8.tar.gz"
version("1.0.8", sha256="ab5a03176ee106d3f0fa90e381da478ddae405918153cca248e682cd0c4a2269")
| MirrorSourcewareBroken |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels33.py | {
"start": 315,
"end": 1654
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels33.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [65546112, 70217728]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
[10, 20, 30, 40, 50],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
worksheet.write_column("D1", data[3])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {
"value": 1,
"custom": [{"font": {"bold": 1, "italic": 1, "baseline": -1}}],
},
}
)
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | celery__celery | celery/bin/worker.py | {
"start": 981,
"end": 2062
} | class ____(click.Choice):
"""Workers pool option."""
name = "pool"
def __init__(self):
"""Initialize the workers pool option with the relevant choices."""
super().__init__(concurrency.get_available_pool_names())
def convert(self, value, param, ctx):
# Pools like eventlet/gevent needs to patch libs as early
# as possible.
if isinstance(value, type) and issubclass(value, BasePool):
return value
value = super().convert(value, param, ctx)
worker_pool = ctx.obj.app.conf.worker_pool
if value == 'prefork' and worker_pool:
# If we got the default pool through the CLI
# we need to check if the worker pool was configured.
# If the worker pool was configured, we shouldn't use the default.
value = concurrency.get_implementation(worker_pool)
else:
value = concurrency.get_implementation(value)
if not value:
value = concurrency.get_implementation(worker_pool)
return value
| WorkersPool |
python | pallets__jinja | src/jinja2/async_utils.py | {
"start": 2071,
"end": 2816
} | class ____(t.Generic[V]):
def __init__(self, iterator: "t.Iterator[V]"):
self._iterator = iterator
def __aiter__(self) -> "te.Self":
return self
async def __anext__(self) -> V:
try:
return next(self._iterator)
except StopIteration as e:
raise StopAsyncIteration(e.value) from e
def auto_aiter(
iterable: "t.AsyncIterable[V] | t.Iterable[V]",
) -> "t.AsyncIterator[V]":
if hasattr(iterable, "__aiter__"):
return iterable.__aiter__()
else:
return _IteratorToAsyncIterator(iter(iterable))
async def auto_to_list(
value: "t.AsyncIterable[V] | t.Iterable[V]",
) -> list["V"]:
return [x async for x in auto_aiter(value)]
| _IteratorToAsyncIterator |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/destinations.py | {
"start": 8346,
"end": 8949
} | class ____(GeneratedAirbyteDestination):
@public
def __init__(self, name: str, bucket_id: str, secret_key: str):
"""Airbyte Destination for Kvdb.
Documentation can be found at https://kvdb.io/docs/api/
Args:
name (str): The name of the destination.
bucket_id (str): The ID of your KVdb bucket.
secret_key (str): Your bucket Secret Key.
"""
self.bucket_id = check.str_param(bucket_id, "bucket_id")
self.secret_key = check.str_param(secret_key, "secret_key")
super().__init__("Kvdb", name)
| KvdbDestination |
python | ray-project__ray | python/ray/util/state/common.py | {
"start": 55228,
"end": 55497
} | class ____:
#: The class name of the actor.
class_name: str
#: State name to the count dict. State name is equivalent to
#: the protobuf ActorState.
state_counts: Dict[TypeActorStatus, int] = field(default_factory=dict)
@dataclass
| ActorSummaryPerClass |
python | allegroai__clearml | clearml/backend_api/services/v2_20/queues.py | {
"start": 84406,
"end": 85808
} | class ____(Request):
"""
Removes a task entry from the queue.
:param queue: Queue id
:type queue: str
:param task: Task id
:type task: str
"""
_service = "queues"
_action = "remove_task"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"queue": {"description": "Queue id", "type": "string"},
"task": {"description": "Task id", "type": "string"},
},
"required": ["queue", "task"],
"type": "object",
}
def __init__(self, queue: str, task: str, **kwargs: Any) -> None:
super(RemoveTaskRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
| RemoveTaskRequest |
python | falconry__falcon | tests/test_error_handlers.py | {
"start": 882,
"end": 1322
} | class ____:
def on_get(self, req, resp):
raise Exception('Plain Exception')
def on_head(self, req, resp):
raise CustomBaseException('CustomBaseException')
def on_delete(self, req, resp):
raise CustomException('CustomException')
@pytest.fixture
def client(asgi, util):
app = util.create_app(asgi)
app.add_route('/', ErroredClassResource())
return testing.TestClient(app)
| ErroredClassResource |
python | plotly__plotly.py | plotly/graph_objs/layout/ternary/baxis/_title.py | {
"start": 235,
"end": 2875
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.ternary.baxis"
_path_str = "layout.ternary.baxis.title"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this axis' title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.ternary.baxis.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.layout.ternary.baxis.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of this axis.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this axis' title font.
text
Sets the title of this axis.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.ternary.baxis.Title`
font
Sets this axis' title font.
text
Sets the title of this axis.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.ternary.baxis.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.ternary.baxis.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
python | encode__starlette | starlette/datastructures.py | {
"start": 21709,
"end": 22448
} | class ____:
"""
An object that can be used to store arbitrary state.
Used for `request.state` and `app.state`.
"""
_state: dict[str, Any]
def __init__(self, state: dict[str, Any] | None = None):
if state is None:
state = {}
super().__setattr__("_state", state)
def __setattr__(self, key: Any, value: Any) -> None:
self._state[key] = value
def __getattr__(self, key: Any) -> Any:
try:
return self._state[key]
except KeyError:
message = "'{}' object has no attribute '{}'"
raise AttributeError(message.format(self.__class__.__name__, key))
def __delattr__(self, key: Any) -> None:
del self._state[key]
| State |
python | pdm-project__pdm | src/pdm/models/candidates.py | {
"start": 3691,
"end": 11067
} | class ____:
"""A concrete candidate that can be downloaded and installed.
A candidate comes from the PyPI index of a package, or from the requirement itself
(for file or VCS requirements). Each candidate has a name, version and several
dependencies together with package metadata.
"""
__slots__ = (
"_preferred",
"_prepared",
"_requires_python",
"_revision",
"hashes",
"installed",
"link",
"name",
"req",
"requested",
"summary",
"version",
)
def __init__(
self,
req: Requirement,
name: str | None = None,
version: str | None = None,
link: Link | None = None,
installed: im.Distribution | None = None,
):
"""
:param req: the requirement that produces this candidate.
:param name: the name of the candidate.
:param version: the version of the candidate.
:param link: the file link of the candidate.
"""
self.req = req
self.name = name or self.req.project_name
self.version = version
if link is None and not req.is_named:
link = cast("Link", req.as_file_link()) # type: ignore[attr-defined]
self.link = link
self.summary = ""
self.hashes: list[FileHash] = []
self.requested = False
self.installed: im.Distribution | None = installed
self._requires_python: str | None = None
self._prepared: PreparedCandidate | None = None
self._revision = getattr(req, "revision", None)
def identify(self) -> str:
return self.req.identify()
def copy_with(self, requirement: Requirement) -> Candidate:
can = Candidate(requirement, name=self.name, version=self.version, link=self.link, installed=self.installed)
can.summary = self.summary
can.hashes = self.hashes
can._requires_python = self._requires_python
can._prepared = self._prepared
can._revision = self._revision
if can._prepared:
can._prepared.req = requirement
return can
@property
def dep_key(self) -> tuple[str, str | None]:
"""Key for retrieving and storing dependencies from the provider.
Return a tuple of (name, version). For URL candidates, the version is None but
there will be only one for the same name so it is also unique.
"""
return (self.identify(), self.version)
@property
def prepared(self) -> PreparedCandidate | None:
return self._prepared
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Candidate):
return False
if self.req.is_named:
return self.name == other.name and self.version == other.version
return self.name == other.name and self.link == other.link
def get_revision(self) -> str:
if not self.req.is_vcs:
raise AttributeError("Non-VCS candidate doesn't have revision attribute")
if self._revision:
return self._revision
if self.req.revision: # type: ignore[attr-defined]
return self.req.revision # type: ignore[attr-defined]
return self._prepared.revision if self._prepared else "unknown"
def __repr__(self) -> str:
source = getattr(self.link, "comes_from", None)
from_source = f" from {source}" if source else ""
return f"<Candidate {self}{from_source}>"
def __str__(self) -> str:
if self.req.is_named:
return f"{self.name}@{self.version}"
assert self.link is not None
return f"{self.name}@{self.link.url_without_fragment}"
@classmethod
def from_installation_candidate(cls, candidate: Package, req: Requirement) -> Candidate:
"""Build a candidate from unearth's find result."""
return cls(
req,
name=candidate.name,
version=str(candidate.version),
link=candidate.link,
)
@property
def requires_python(self) -> str:
"""The Python version constraint of the candidate."""
if self._requires_python is not None:
return self._requires_python
if self.link:
requires_python = self.link.requires_python
if requires_python is not None:
if requires_python.isdigit():
requires_python = f">={requires_python},<{int(requires_python) + 1}"
try: # ensure the specifier is valid
PySpecSet(requires_python)
except InvalidPyVersion:
pass
else:
self._requires_python = requires_python
return self._requires_python or ""
@requires_python.setter
def requires_python(self, value: str) -> None:
try: # ensure the specifier is valid
PySpecSet(value)
except InvalidPyVersion:
return
self._requires_python = value
@no_type_check
def as_lockfile_entry(self, project_root: Path) -> dict[str, Any]:
"""Build a lockfile entry dictionary for the candidate."""
version = str(self.version)
if self.req.is_pinned:
spec = next(iter(self.req.specifier))
should_normalize_version = "+" not in spec.version
else:
should_normalize_version = True
if should_normalize_version:
try:
version = str(comparable_version(version))
except InvalidVersion as e:
raise RequirementError(f"Invalid version for {self.req.as_line()}: {e}") from None
result = {
"name": normalize_name(self.name),
"version": version,
"extras": sorted(self.req.extras or ()),
"requires_python": str(self.requires_python),
"editable": self.req.editable,
"subdirectory": getattr(self.req, "subdirectory", None),
}
if self.req.is_vcs:
result.update(
{
self.req.vcs: self.req.repo,
"ref": self.req.ref,
}
)
if not self.req.editable:
result.update(revision=self.get_revision())
elif not self.req.is_named:
if self.req.is_file_or_url and self.req.is_local:
self.req._root = project_root
result.update(path=self.req.str_path)
else:
result.update(url=self.req.url)
return {k: v for k, v in result.items() if v}
def format(self) -> str:
"""Format for output."""
return f"[req]{self.name}[/] [warning]{self.version}[/]"
def prepare(self, environment: BaseEnvironment, reporter: CandidateReporter | None = None) -> PreparedCandidate:
"""Prepare the candidate for installation."""
if self._prepared is None:
if isinstance(self.req, FileRequirement):
self.req.check_installable()
self._prepared = PreparedCandidate(self, environment, reporter=reporter or CandidateReporter())
else:
self._prepared.environment = environment
if reporter is not None:
self._prepared.reporter = reporter
return self._prepared
@dataclasses.dataclass
| Candidate |
python | pallets__click | src/click/types.py | {
"start": 39799,
"end": 39927
} | class ____(t.TypedDict, total=False):
envvars: tuple[str, ...]
default: str
range: str
required: str
| OptionHelpExtra |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/config.py | {
"start": 1319,
"end": 16143
} | class ____(TypedDict, total=False):
"""Configuration for a `Runnable`.
See the [reference docs](https://reference.langchain.com/python/langchain_core/runnables/#langchain_core.runnables.RunnableConfig)
for more details.
"""
tags: list[str]
"""Tags for this call and any sub-calls (e.g. a Chain calling an LLM).
You can use these to filter calls.
"""
metadata: dict[str, Any]
"""Metadata for this call and any sub-calls (e.g. a Chain calling an LLM).
Keys should be strings, values should be JSON-serializable.
"""
callbacks: Callbacks
"""Callbacks for this call and any sub-calls (e.g. a Chain calling an LLM).
Tags are passed to all callbacks, metadata is passed to handle*Start callbacks.
"""
run_name: str
"""Name for the tracer run for this call.
Defaults to the name of the class."""
max_concurrency: int | None
"""Maximum number of parallel calls to make.
If not provided, defaults to `ThreadPoolExecutor`'s default.
"""
recursion_limit: int
"""Maximum number of times a call can recurse.
If not provided, defaults to `25`.
"""
configurable: dict[str, Any]
"""Runtime values for attributes previously made configurable on this `Runnable`,
or sub-Runnables, through `configurable_fields` or `configurable_alternatives`.
Check `output_schema` for a description of the attributes that have been made
configurable.
"""
run_id: uuid.UUID | None
"""Unique identifier for the tracer run for this call.
If not provided, a new UUID will be generated.
"""
CONFIG_KEYS = [
"tags",
"metadata",
"callbacks",
"run_name",
"max_concurrency",
"recursion_limit",
"configurable",
"run_id",
]
COPIABLE_KEYS = [
"tags",
"metadata",
"callbacks",
"configurable",
]
DEFAULT_RECURSION_LIMIT = 25
var_child_runnable_config: ContextVar[RunnableConfig | None] = ContextVar(
"child_runnable_config", default=None
)
# This is imported and used in langgraph, so don't break.
def _set_config_context(
config: RunnableConfig,
) -> tuple[Token[RunnableConfig | None], dict[str, Any] | None]:
"""Set the child Runnable config + tracing context.
Args:
config: The config to set.
Returns:
The token to reset the config and the previous tracing context.
"""
config_token = var_child_runnable_config.set(config)
current_context = None
if (
(callbacks := config.get("callbacks"))
and (
parent_run_id := getattr(callbacks, "parent_run_id", None)
) # Is callback manager
and (
tracer := next(
(
handler
for handler in getattr(callbacks, "handlers", [])
if isinstance(handler, LangChainTracer)
),
None,
)
)
and (run := tracer.run_map.get(str(parent_run_id)))
):
current_context = get_tracing_context()
_set_tracing_context({"parent": run})
return config_token, current_context
@contextmanager
def set_config_context(config: RunnableConfig) -> Generator[Context, None, None]:
"""Set the child Runnable config + tracing context.
Args:
config: The config to set.
Yields:
The config context.
"""
ctx = copy_context()
config_token, _ = ctx.run(_set_config_context, config)
try:
yield ctx
finally:
ctx.run(var_child_runnable_config.reset, config_token)
ctx.run(
_set_tracing_context,
{
"parent": None,
"project_name": None,
"tags": None,
"metadata": None,
"enabled": None,
"client": None,
},
)
def ensure_config(config: RunnableConfig | None = None) -> RunnableConfig:
"""Ensure that a config is a dict with all keys present.
Args:
config: The config to ensure.
Returns:
The ensured config.
"""
empty = RunnableConfig(
tags=[],
metadata={},
callbacks=None,
recursion_limit=DEFAULT_RECURSION_LIMIT,
configurable={},
)
if var_config := var_child_runnable_config.get():
empty.update(
cast(
"RunnableConfig",
{
k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]
for k, v in var_config.items()
if v is not None
},
)
)
if config is not None:
empty.update(
cast(
"RunnableConfig",
{
k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]
for k, v in config.items()
if v is not None and k in CONFIG_KEYS
},
)
)
if config is not None:
for k, v in config.items():
if k not in CONFIG_KEYS and v is not None:
empty["configurable"][k] = v
for key, value in empty.get("configurable", {}).items():
if (
not key.startswith("__")
and isinstance(value, (str, int, float, bool))
and key not in empty["metadata"]
and key != "api_key"
):
empty["metadata"][key] = value
return empty
def get_config_list(
config: RunnableConfig | Sequence[RunnableConfig] | None, length: int
) -> list[RunnableConfig]:
"""Get a list of configs from a single config or a list of configs.
It is useful for subclasses overriding batch() or abatch().
Args:
config: The config or list of configs.
length: The length of the list.
Returns:
The list of configs.
Raises:
ValueError: If the length of the list is not equal to the length of the inputs.
"""
if length < 0:
msg = f"length must be >= 0, but got {length}"
raise ValueError(msg)
if isinstance(config, Sequence) and len(config) != length:
msg = (
f"config must be a list of the same length as inputs, "
f"but got {len(config)} configs for {length} inputs"
)
raise ValueError(msg)
if isinstance(config, Sequence):
return list(map(ensure_config, config))
if length > 1 and isinstance(config, dict) and config.get("run_id") is not None:
warnings.warn(
"Provided run_id be used only for the first element of the batch.",
category=RuntimeWarning,
stacklevel=3,
)
subsequent = cast(
"RunnableConfig", {k: v for k, v in config.items() if k != "run_id"}
)
return [
ensure_config(subsequent) if i else ensure_config(config)
for i in range(length)
]
return [ensure_config(config) for i in range(length)]
def patch_config(
config: RunnableConfig | None,
*,
callbacks: BaseCallbackManager | None = None,
recursion_limit: int | None = None,
max_concurrency: int | None = None,
run_name: str | None = None,
configurable: dict[str, Any] | None = None,
) -> RunnableConfig:
"""Patch a config with new values.
Args:
config: The config to patch.
callbacks: The callbacks to set.
recursion_limit: The recursion limit to set.
max_concurrency: The max concurrency to set.
run_name: The run name to set.
configurable: The configurable to set.
Returns:
The patched config.
"""
config = ensure_config(config)
if callbacks is not None:
# If we're replacing callbacks, we need to unset run_name
# As that should apply only to the same run as the original callbacks
config["callbacks"] = callbacks
if "run_name" in config:
del config["run_name"]
if "run_id" in config:
del config["run_id"]
if recursion_limit is not None:
config["recursion_limit"] = recursion_limit
if max_concurrency is not None:
config["max_concurrency"] = max_concurrency
if run_name is not None:
config["run_name"] = run_name
if configurable is not None:
config["configurable"] = {**config.get("configurable", {}), **configurable}
return config
def merge_configs(*configs: RunnableConfig | None) -> RunnableConfig:
"""Merge multiple configs into one.
Args:
*configs: The configs to merge.
Returns:
The merged config.
"""
base: RunnableConfig = {}
# Even though the keys aren't literals, this is correct
# because both dicts are the same type
for config in (ensure_config(c) for c in configs if c is not None):
for key in config:
if key == "metadata":
base["metadata"] = {
**base.get("metadata", {}),
**(config.get("metadata") or {}),
}
elif key == "tags":
base["tags"] = sorted(
set(base.get("tags", []) + (config.get("tags") or [])),
)
elif key == "configurable":
base["configurable"] = {
**base.get("configurable", {}),
**(config.get("configurable") or {}),
}
elif key == "callbacks":
base_callbacks = base.get("callbacks")
these_callbacks = config["callbacks"]
# callbacks can be either None, list[handler] or manager
# so merging two callbacks values has 6 cases
if isinstance(these_callbacks, list):
if base_callbacks is None:
base["callbacks"] = these_callbacks.copy()
elif isinstance(base_callbacks, list):
base["callbacks"] = base_callbacks + these_callbacks
else:
# base_callbacks is a manager
mngr = base_callbacks.copy()
for callback in these_callbacks:
mngr.add_handler(callback, inherit=True)
base["callbacks"] = mngr
elif these_callbacks is not None:
# these_callbacks is a manager
if base_callbacks is None:
base["callbacks"] = these_callbacks.copy()
elif isinstance(base_callbacks, list):
mngr = these_callbacks.copy()
for callback in base_callbacks:
mngr.add_handler(callback, inherit=True)
base["callbacks"] = mngr
else:
# base_callbacks is also a manager
base["callbacks"] = base_callbacks.merge(these_callbacks)
elif key == "recursion_limit":
if config["recursion_limit"] != DEFAULT_RECURSION_LIMIT:
base["recursion_limit"] = config["recursion_limit"]
elif key in COPIABLE_KEYS and config[key] is not None: # type: ignore[literal-required]
base[key] = config[key].copy() # type: ignore[literal-required]
else:
base[key] = config[key] or base.get(key) # type: ignore[literal-required]
return base
def call_func_with_variable_args(
func: Callable[[Input], Output]
| Callable[[Input, RunnableConfig], Output]
| Callable[[Input, CallbackManagerForChainRun], Output]
| Callable[[Input, CallbackManagerForChainRun, RunnableConfig], Output],
input: Input,
config: RunnableConfig,
run_manager: CallbackManagerForChainRun | None = None,
**kwargs: Any,
) -> Output:
"""Call function that may optionally accept a run_manager and/or config.
Args:
func: The function to call.
input: The input to the function.
config: The config to pass to the function.
run_manager: The run manager to pass to the function.
**kwargs: The keyword arguments to pass to the function.
Returns:
The output of the function.
"""
if accepts_config(func):
if run_manager is not None:
kwargs["config"] = patch_config(config, callbacks=run_manager.get_child())
else:
kwargs["config"] = config
if run_manager is not None and accepts_run_manager(func):
kwargs["run_manager"] = run_manager
return func(input, **kwargs) # type: ignore[call-arg]
def acall_func_with_variable_args(
func: Callable[[Input], Awaitable[Output]]
| Callable[[Input, RunnableConfig], Awaitable[Output]]
| Callable[[Input, AsyncCallbackManagerForChainRun], Awaitable[Output]]
| Callable[
[Input, AsyncCallbackManagerForChainRun, RunnableConfig], Awaitable[Output]
],
input: Input,
config: RunnableConfig,
run_manager: AsyncCallbackManagerForChainRun | None = None,
**kwargs: Any,
) -> Awaitable[Output]:
"""Async call function that may optionally accept a run_manager and/or config.
Args:
func: The function to call.
input: The input to the function.
config: The config to pass to the function.
run_manager: The run manager to pass to the function.
**kwargs: The keyword arguments to pass to the function.
Returns:
The output of the function.
"""
if accepts_config(func):
if run_manager is not None:
kwargs["config"] = patch_config(config, callbacks=run_manager.get_child())
else:
kwargs["config"] = config
if run_manager is not None and accepts_run_manager(func):
kwargs["run_manager"] = run_manager
return func(input, **kwargs) # type: ignore[call-arg]
def get_callback_manager_for_config(config: RunnableConfig) -> CallbackManager:
"""Get a callback manager for a config.
Args:
config: The config.
Returns:
The callback manager.
"""
return CallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
inheritable_tags=config.get("tags"),
inheritable_metadata=config.get("metadata"),
)
def get_async_callback_manager_for_config(
config: RunnableConfig,
) -> AsyncCallbackManager:
"""Get an async callback manager for a config.
Args:
config: The config.
Returns:
The async callback manager.
"""
return AsyncCallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
inheritable_tags=config.get("tags"),
inheritable_metadata=config.get("metadata"),
)
P = ParamSpec("P")
T = TypeVar("T")
| RunnableConfig |
python | sphinx-doc__sphinx | sphinx/domains/c/__init__.py | {
"start": 13352,
"end": 13407
} | class ____(CObject):
object_type = 'type'
| CTypeObject |
python | pytorch__pytorch | torch/cpu/__init__.py | {
"start": 2451,
"end": 3064
} | class ____:
def query(self) -> bool:
return True
def record(self, stream=None) -> None:
pass
def synchronize(self) -> None:
pass
def wait(self, stream=None) -> None:
pass
_default_cpu_stream = Stream()
_current_stream = _default_cpu_stream
def current_stream(device: torch.types.Device = None) -> Stream:
r"""Returns the currently selected :class:`Stream` for a given device.
Args:
device (torch.device or int, optional): Ignored.
N.B. This function only exists to facilitate device-agnostic code
"""
return _current_stream
| Event |
python | readthedocs__sphinx_rtd_theme | setup.py | {
"start": 591,
"end": 977
} | class ____(distutils.cmd.Command):
description = "Run Webpack dev server"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
subprocess.run(
["node_modules/.bin/webpack-dev-server", "--open", "--config", "webpack.dev.js"],
check=True
)
| WebpackDevelopCommand |
python | encode__django-rest-framework | tests/test_serializer_nested.py | {
"start": 7065,
"end": 8462
} | class ____:
def setup_method(self):
class NestedSerializer(serializers.Serializer):
one = serializers.IntegerField(max_value=10)
class TestSerializer(serializers.Serializer):
nested = NestedSerializer(required=False, many=True)
self.Serializer = TestSerializer
def test_json_validate(self):
input_data = {}
serializer = self.Serializer(data=input_data)
# request is empty, therefore 'nested' should not be in serializer.data
assert serializer.is_valid()
assert 'nested' not in serializer.validated_data
input_data = {'nested': [{'one': '1'}, {'one': 2}]}
serializer = self.Serializer(data=input_data)
assert serializer.is_valid()
assert 'nested' in serializer.validated_data
def test_multipart_validate(self):
# leave querydict empty
input_data = QueryDict('')
serializer = self.Serializer(data=input_data)
# the querydict is empty, therefore 'nested' should not be in serializer.data
assert serializer.is_valid()
assert 'nested' not in serializer.validated_data
input_data = QueryDict('nested[0]one=1&nested[1]one=2')
serializer = self.Serializer(data=input_data)
assert serializer.is_valid()
assert 'nested' in serializer.validated_data
| TestNotRequiredNestedSerializerWithMany |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType28.py | {
"start": 1141,
"end": 1182
} | class ____(Class4[T_co]): ...
| Class4_Child3 |
python | dask__dask | dask/dataframe/dask_expr/_rolling.py | {
"start": 5052,
"end": 5106
} | class ____(RollingReduction):
how = "max"
| RollingMax |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 41141,
"end": 42442
} | class ____(Request):
"""
Delete models
:param ids: Entities to move
:type ids: Sequence[str]
"""
_service = "models"
_action = "delete_many"
_version = "2.13"
_schema = {
"definitions": {},
"force": {
"description": "Force. Required if there are tasks that use the model as an execution model, or if the model's creating task is published.\n ",
"type": "boolean",
},
"properties": {
"ids": {
"description": "Entities to move",
"items": {"type": "string"},
"type": "array",
}
},
"required": ["ids"],
"type": "object",
}
def __init__(self, ids: List[str], **kwargs: Any) -> None:
super(DeleteManyRequest, self).__init__(**kwargs)
self.ids = ids
@schema_property("ids")
def ids(self) -> List[str]:
return self._property_ids
@ids.setter
def ids(self, value: List[str]) -> None:
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
| DeleteManyRequest |
python | kamyu104__LeetCode-Solutions | Python/minimum-time-takes-to-reach-destination-without-drowning.py | {
"start": 1394,
"end": 2970
} | class ____(object):
def minimumSeconds(self, land):
"""
:type land: List[List[str]]
:rtype: int
"""
DIRECTIONS = ((1, 0), (0, 1), (-1, 0), (0, -1))
lookup1 = [[0 if land[i][j] == "*" else -1 for j in xrange(len(land[0]))] for i in xrange(len(land))]
lookup2 = [[-1]*len(land[0]) for _ in xrange(len(land))]
q1 = [(i, j) for i in xrange(len(land)) for j in xrange(len(land[0])) if land[i][j] == "*"]
q2 = [next((i, j) for i in xrange(len(land)) for j in xrange(len(land[0])) if land[i][j] == "S")]
lookup2[q2[0][0]][q2[0][1]] = 0
while q1 or q2:
new_q1, new_q2 = [], []
for i, j in q1:
for di, dj in DIRECTIONS:
ni, nj = i+di, j+dj
if not (0 <= ni < len(land) and 0 <= nj < len(land[0]) and land[ni][nj] != "X" and land[ni][nj] != "D" and lookup1[ni][nj] == -1):
continue
lookup1[ni][nj] = 0
new_q1.append((ni, nj))
for i, j in q2:
if land[i][j] == "D":
return lookup2[i][j]
for di, dj in DIRECTIONS:
ni, nj = i+di, j+dj
if not (0 <= ni < len(land) and 0 <= nj < len(land[0]) and land[ni][nj] != "X" and lookup2[ni][nj] == lookup1[ni][nj] == -1):
continue
lookup2[ni][nj] = lookup2[i][j]+1
new_q2.append((ni, nj))
q1, q2 = new_q1, new_q2
return -1
| Solution2 |
python | pytorch__pytorch | torch/_dynamo/variables/functions.py | {
"start": 45679,
"end": 46034
} | class ____(
LocalGeneratorObjectVariable
):
"""
.. note::
This is only used when the function is annotated with @contextlib.contextmanager
It is a special case of a generator function as we do not allow return a context manager
from a torch.compile function.
"""
| ContextlibContextManagerLocalGeneratorObjectVariable |
python | django__django | tests/admin_inlines/admin.py | {
"start": 3955,
"end": 4054
} | class ____(forms.NumberInput):
class Media:
js = ("custom_number.js",)
| CustomNumberWidget |
python | cherrypy__cherrypy | cherrypy/test/modpy.py | {
"start": 3237,
"end": 5447
} | class ____(helper.Supervisor):
"""Server Controller for ModPython and CherryPy."""
using_apache = True
using_wsgi = False
template = None
def __str__(self):
"""Render a :class:`ModPythonSupervisor` instance as a string."""
return 'ModPython Server on %s:%s' % (self.host, self.port)
def start(self, modulename):
"""Spawn an Apache ``mod_python`` supervisor process."""
mpconf = CONF_PATH
if not os.path.isabs(mpconf):
mpconf = os.path.join(curdir, mpconf)
with open(mpconf, 'wb') as f:
f.write(
self.template
% {
'port': self.port,
'modulename': modulename,
'host': self.host,
},
)
result = read_process(APACHE_PATH, '-k start -f %s' % mpconf)
if result:
print(result)
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
read_process(APACHE_PATH, '-k stop')
loaded = False
def wsgisetup(req):
"""Start a WGSI server."""
global loaded
if not loaded:
loaded = True
options = req.get_options()
cherrypy.config.update(
{
'log.error_file': os.path.join(curdir, 'test.log'),
'environment': 'test_suite',
'server.socket_host': options['socket_host'],
},
)
modname = options['testmod']
mod = __import__(modname, globals(), locals(), [''])
mod.setup_server()
cherrypy.server.unsubscribe()
cherrypy.engine.start()
from mod_python import apache
return apache.OK
def cpmodpysetup(req):
"""Start a CherryPy server with ``mod_python``."""
global loaded
if not loaded:
loaded = True
options = req.get_options()
cherrypy.config.update(
{
'log.error_file': os.path.join(curdir, 'test.log'),
'environment': 'test_suite',
'server.socket_host': options['socket_host'],
},
)
from mod_python import apache
return apache.OK
| ModPythonSupervisor |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/line/tests.py | {
"start": 236,
"end": 693
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = LineProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"userId": "u7d47d26a6bab09b95695ff02d1a36e38",
"displayName": "\uc774\uc0c1\ud601",
"pictureUrl":
"http://dl.profile.line-cdn.net/0m055ab14d725138288331268c45ac5286a35482fb794a"
}""",
)
def get_expected_to_str(self):
return "\uc774\uc0c1\ud601"
| LineTests |
python | dagster-io__dagster | python_modules/dagster/dagster/_daemon/sensor.py | {
"start": 27075,
"end": 56246
} | class ____(NamedTuple):
run_key: Optional[str]
error_info: Optional[SerializableErrorInfo]
run: Union[SkippedSensorRun, DagsterRun, BackfillSubmission]
def _submit_run_request(
run_id: str,
run_request: RunRequest,
workspace_process_context: IWorkspaceProcessContext,
remote_sensor: RemoteSensor,
existing_runs_by_key,
logger,
sensor_debug_crash_flags,
) -> SubmitRunRequestResult:
instance = workspace_process_context.instance
sensor_origin = remote_sensor.get_remote_origin()
target_data: TargetSnap = check.not_none(remote_sensor.get_target(run_request.job_name))
# reload the code_location on each submission, request_context derived data can become out date
# * non-threaded: if number of serial submissions is too many
# * threaded: if thread sits pending in pool too long
code_location = _get_code_location_for_sensor(workspace_process_context, remote_sensor)
job_subset_selector = JobSubsetSelector(
location_name=code_location.name,
repository_name=sensor_origin.repository_origin.repository_name,
job_name=target_data.job_name,
op_selection=target_data.op_selection,
asset_selection=run_request.asset_selection,
asset_check_selection=run_request.asset_check_keys,
)
remote_job = code_location.get_job(job_subset_selector)
run = _get_or_create_sensor_run(
logger,
instance,
code_location,
remote_sensor,
remote_job,
run_id,
run_request,
target_data,
existing_runs_by_key,
)
if isinstance(run, SkippedSensorRun):
return SubmitRunRequestResult(run_key=run_request.run_key, error_info=None, run=run)
check_for_debug_crash(sensor_debug_crash_flags, "RUN_CREATED")
error_info = None
try:
logger.info(f"Launching run for {remote_sensor.name}")
instance.submit_run(run.run_id, workspace_process_context.create_request_context())
logger.info(f"Completed launch of run {run.run_id} for {remote_sensor.name}")
except Exception:
error_info = DaemonErrorCapture.process_exception(
exc_info=sys.exc_info(),
logger=logger,
log_message=f"Run {run.run_id} created successfully but failed to launch",
)
check_for_debug_crash(sensor_debug_crash_flags, "RUN_LAUNCHED")
return SubmitRunRequestResult(run_key=run_request.run_key, error_info=error_info, run=run)
def _resume_tick(
workspace_process_context: IWorkspaceProcessContext,
context: SensorLaunchContext,
tick: InstigatorTick,
remote_sensor: RemoteSensor,
submit_threadpool_executor: Optional[ThreadPoolExecutor],
sensor_debug_crash_flags: Optional[SingleInstigatorDebugCrashFlags] = None,
):
instance = workspace_process_context.instance
if (
instance.schedule_storage
and instance.schedule_storage.supports_auto_materialize_asset_evaluations
):
evaluations = [
record.get_evaluation_with_run_ids()
for record in instance.schedule_storage.get_auto_materialize_evaluations_for_evaluation_id(
evaluation_id=tick.tick_id
)
]
else:
evaluations = []
yield from _submit_run_requests(
tick.unsubmitted_run_ids_with_requests,
evaluations,
instance=instance,
context=context,
remote_sensor=remote_sensor,
workspace_process_context=workspace_process_context,
submit_threadpool_executor=submit_threadpool_executor,
sensor_debug_crash_flags=sensor_debug_crash_flags,
)
if context.tick.tick_data.user_interrupted:
context.update_state(
TickStatus.SKIPPED,
cursor=context.tick.cursor,
skip_reason="Sensor manually stopped mid-iteration.",
)
else:
context.update_state(TickStatus.SUCCESS, cursor=context.tick.cursor)
def _get_code_location_for_sensor(
workspace_process_context: IWorkspaceProcessContext,
remote_sensor: RemoteSensor,
) -> CodeLocation:
sensor_origin = remote_sensor.get_remote_origin()
return workspace_process_context.create_request_context().get_code_location(
sensor_origin.repository_origin.code_location_origin.location_name
)
def _evaluate_sensor(
workspace_process_context: IWorkspaceProcessContext,
context: SensorLaunchContext,
remote_sensor: RemoteSensor,
state: InstigatorState,
submit_threadpool_executor: Optional[ThreadPoolExecutor],
sensor_debug_crash_flags: Optional[SingleInstigatorDebugCrashFlags] = None,
):
instance = workspace_process_context.instance
if (
remote_sensor.sensor_type == SensorType.AUTOMATION
and not instance.auto_materialize_use_sensors
):
raise DagsterInvalidInvocationError(
"Cannot evaluate an AutomationConditionSensorDefinition if the instance setting "
"`auto_materialize: use_sensors` is set to False. Update your configuration to prevent this error.",
)
context.logger.info(f"Checking for new runs for sensor: {remote_sensor.name}")
code_location = _get_code_location_for_sensor(workspace_process_context, remote_sensor)
repository_handle = remote_sensor.handle.repository_handle
instigator_data = _sensor_instigator_data(state)
sensor_runtime_data = code_location.get_sensor_execution_data(
instance,
repository_handle,
remote_sensor.name,
instigator_data.last_tick_timestamp if instigator_data else None,
instigator_data.last_run_key if instigator_data else None,
instigator_data.cursor if instigator_data else None,
context.log_key,
instigator_data.last_sensor_start_timestamp if instigator_data else None,
)
yield
# Kept for backwards compatibility with sensor log keys that were previously created in the
# sensor evaluation, rather than upfront.
#
# Note that to get sensor logs for failed sensor evaluations, we force users to update their
# Dagster version.
if sensor_runtime_data.log_key:
context.add_log_key(sensor_runtime_data.log_key)
for asset_event in sensor_runtime_data.asset_events:
instance.report_runless_asset_event(asset_event)
if sensor_runtime_data.dynamic_partitions_requests:
_handle_dynamic_partitions_requests(
sensor_runtime_data.dynamic_partitions_requests, instance, context
)
if not (
sensor_runtime_data.run_requests or sensor_runtime_data.automation_condition_evaluations
):
if sensor_runtime_data.dagster_run_reactions:
_handle_run_reactions(
sensor_runtime_data.dagster_run_reactions,
instance,
context,
sensor_runtime_data.cursor,
remote_sensor,
)
elif sensor_runtime_data.skip_message:
context.logger.info(
f"Sensor {remote_sensor.name} skipped: {sensor_runtime_data.skip_message}"
)
context.update_state(
TickStatus.SKIPPED,
skip_reason=sensor_runtime_data.skip_message,
cursor=sensor_runtime_data.cursor,
)
else:
context.logger.info(f"No run requests returned for {remote_sensor.name}, skipping")
context.update_state(TickStatus.SKIPPED, cursor=sensor_runtime_data.cursor)
yield
else:
yield from _handle_run_requests_and_automation_condition_evaluations(
raw_run_requests=sensor_runtime_data.run_requests or [],
automation_condition_evaluations=sensor_runtime_data.automation_condition_evaluations,
cursor=sensor_runtime_data.cursor,
context=context,
instance=instance,
remote_sensor=remote_sensor,
workspace_process_context=workspace_process_context,
submit_threadpool_executor=submit_threadpool_executor,
sensor_debug_crash_flags=sensor_debug_crash_flags,
)
if context.tick.tick_data.user_interrupted:
context.update_state(
TickStatus.SKIPPED,
cursor=sensor_runtime_data.cursor,
skip_reason="Sensor manually stopped mid-iteration.",
)
elif context.run_count:
context.update_state(TickStatus.SUCCESS, cursor=sensor_runtime_data.cursor)
else:
context.update_state(TickStatus.SKIPPED, cursor=sensor_runtime_data.cursor)
def _handle_dynamic_partitions_requests(
dynamic_partitions_requests: Sequence[
Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]
],
instance: DagsterInstance,
context: SensorLaunchContext,
) -> None:
for request in dynamic_partitions_requests:
existent_partitions = []
nonexistent_partitions = []
for partition_key in request.partition_keys:
if instance.has_dynamic_partition(request.partitions_def_name, partition_key):
existent_partitions.append(partition_key)
else:
nonexistent_partitions.append(partition_key)
if isinstance(request, AddDynamicPartitionsRequest):
if nonexistent_partitions:
instance.add_dynamic_partitions(
request.partitions_def_name,
nonexistent_partitions,
)
context.logger.info(
"Added partition keys to dynamic partitions definition"
f" '{request.partitions_def_name}': {nonexistent_partitions}"
)
if existent_partitions:
context.logger.info(
"Skipping addition of partition keys for dynamic partitions definition"
f" '{request.partitions_def_name}' that already exist:"
f" {existent_partitions}"
)
context.add_dynamic_partitions_request_result(
DynamicPartitionsRequestResult(
request.partitions_def_name,
added_partitions=nonexistent_partitions,
deleted_partitions=None,
skipped_partitions=existent_partitions,
)
)
elif isinstance(request, DeleteDynamicPartitionsRequest):
if existent_partitions:
# TODO add a bulk delete method to the instance
for partition in existent_partitions:
instance.delete_dynamic_partition(request.partitions_def_name, partition)
context.logger.info(
"Deleted partition keys from dynamic partitions definition"
f" '{request.partitions_def_name}': {existent_partitions}"
)
if nonexistent_partitions:
context.logger.info(
"Skipping deletion of partition keys for dynamic partitions definition"
f" '{request.partitions_def_name}' that do not exist:"
f" {nonexistent_partitions}"
)
context.add_dynamic_partitions_request_result(
DynamicPartitionsRequestResult(
request.partitions_def_name,
added_partitions=None,
deleted_partitions=existent_partitions,
skipped_partitions=nonexistent_partitions,
)
)
else:
check.failed(f"Unexpected action {request.action} for dynamic partition request")
def _handle_run_reactions(
dagster_run_reactions: Sequence[DagsterRunReaction],
instance: DagsterInstance,
context: SensorLaunchContext,
cursor: Optional[str],
remote_sensor: RemoteSensor,
) -> None:
for run_reaction in dagster_run_reactions:
origin_run_id = check.not_none(run_reaction.dagster_run).run_id
if run_reaction.error:
context.logger.warning(
f"Got a reaction request for run {origin_run_id} but execution errored:"
f" {run_reaction.error}"
)
context.update_state(
TickStatus.FAILURE,
cursor=cursor,
error=run_reaction.error,
)
# Since run status sensors have side effects that we don't want to repeat,
# we still want to update the cursor, even though the tick failed
context.set_should_update_cursor_on_failure(True)
else:
# Use status from the DagsterRunReaction object if it is from a new enough
# version (0.14.4) to be set (the status on the DagsterRun object itself
# may have since changed)
status = (
run_reaction.run_status.value
if run_reaction.run_status
else check.not_none(run_reaction.dagster_run).status.value
)
# log to the original dagster run
message = (
f'Sensor "{remote_sensor.name}" acted on run status '
f"{status} of run {origin_run_id}."
)
instance.report_engine_event(message=message, dagster_run=run_reaction.dagster_run)
context.logger.info(f"Completed a reaction request for run {origin_run_id}: {message}")
context.update_state(
TickStatus.SUCCESS,
cursor=cursor,
origin_run_id=origin_run_id,
)
def _resolve_run_requests(
workspace_process_context: IWorkspaceProcessContext,
context: SensorLaunchContext,
remote_sensor: RemoteSensor,
run_ids_with_requests: Sequence[tuple[str, RunRequest]],
has_evaluations: bool,
) -> Sequence[tuple[str, RunRequest]]:
resolved_run_ids_with_requests = []
for run_id, raw_run_request in run_ids_with_requests:
run_request = raw_run_request.with_replaced_attrs(
tags=merge_dicts(
raw_run_request.tags,
DagsterRun.tags_for_tick_id(context.tick_id, has_evaluations),
)
)
if run_request.stale_assets_only:
stale_assets = resolve_stale_or_missing_assets(
workspace_process_context, # type: ignore
run_request,
remote_sensor,
)
# asset selection is empty set after filtering for stale
if len(stale_assets) == 0:
continue
else:
run_request = run_request.with_replaced_attrs(
asset_selection=stale_assets, stale_assets_only=False
)
resolved_run_ids_with_requests.append((run_id, run_request))
return resolved_run_ids_with_requests
def _handle_run_requests_and_automation_condition_evaluations(
raw_run_requests: Sequence[RunRequest],
automation_condition_evaluations: Sequence[AutomationConditionEvaluation[EntityKey]],
cursor: Optional[str],
instance: DagsterInstance,
context: SensorLaunchContext,
remote_sensor: RemoteSensor,
workspace_process_context: IWorkspaceProcessContext,
submit_threadpool_executor: Optional[ThreadPoolExecutor],
sensor_debug_crash_flags: Optional[SingleInstigatorDebugCrashFlags] = None,
):
# first, write out any evaluations without any run ids
evaluations = [
evaluation.with_run_ids(set()) for evaluation in automation_condition_evaluations
]
if (
instance.schedule_storage
and instance.schedule_storage.supports_auto_materialize_asset_evaluations
):
instance.schedule_storage.add_auto_materialize_asset_evaluations(
evaluation_id=int(context.tick_id), asset_evaluations=evaluations
)
check_for_debug_crash(sensor_debug_crash_flags, "AUTOMATION_EVALUATIONS_ADDED")
def reserved_run_id(run_request: RunRequest) -> str:
if run_request.requires_backfill_daemon():
return make_new_backfill_id()
else:
return make_new_run_id()
reserved_run_ids = [reserved_run_id(run_request) for run_request in raw_run_requests]
# update cursor while reserving the relevant work, as now if the tick fails we will still submit
# the requested runs
context.set_run_requests(
run_requests=raw_run_requests, reserved_run_ids=reserved_run_ids, cursor=cursor
)
check_for_debug_crash(sensor_debug_crash_flags, "RUN_IDS_RESERVED")
run_ids_with_run_requests = list(context.tick.reserved_run_ids_with_requests)
yield from _submit_run_requests(
run_ids_with_run_requests,
evaluations,
instance,
context,
remote_sensor,
workspace_process_context,
submit_threadpool_executor,
sensor_debug_crash_flags,
)
def _submit_run_requests(
raw_run_ids_with_requests: Sequence[tuple[str, RunRequest]],
automation_condition_evaluations: Sequence[AutomationConditionEvaluationWithRunIds],
instance: DagsterInstance,
context: SensorLaunchContext,
remote_sensor: RemoteSensor,
workspace_process_context: IWorkspaceProcessContext,
submit_threadpool_executor: Optional[ThreadPoolExecutor],
sensor_debug_crash_flags: Optional[SingleInstigatorDebugCrashFlags] = None,
):
resolved_run_ids_with_requests = _resolve_run_requests(
workspace_process_context,
context,
remote_sensor,
raw_run_ids_with_requests,
has_evaluations=len(automation_condition_evaluations) > 0,
)
existing_runs_by_key = fetch_existing_runs(
instance, remote_sensor, [request for _, request in resolved_run_ids_with_requests]
)
check_after_runs_num = instance.get_tick_termination_check_interval()
def submit_run_request(
run_id_with_run_request: tuple[str, RunRequest],
) -> SubmitRunRequestResult:
run_id, run_request = run_id_with_run_request
if run_request.requires_backfill_daemon():
return _submit_backfill_request(run_id, run_request, instance)
else:
return _submit_run_request(
run_id,
run_request,
workspace_process_context,
remote_sensor,
existing_runs_by_key,
context.logger,
sensor_debug_crash_flags,
)
if submit_threadpool_executor:
gen_run_request_results = submit_threadpool_executor.map(
submit_run_request, resolved_run_ids_with_requests
)
else:
gen_run_request_results = map(submit_run_request, resolved_run_ids_with_requests)
skipped_runs: list[SkippedSensorRun] = []
evaluations_by_key = {
evaluation.key: evaluation for evaluation in automation_condition_evaluations
}
updated_evaluation_keys = set()
for i, run_request_result in enumerate(gen_run_request_results):
yield run_request_result.error_info
run = run_request_result.run
if isinstance(run, SkippedSensorRun):
skipped_runs.append(run)
context.add_run_info(run_id=None, run_key=run_request_result.run_key)
elif isinstance(run, BackfillSubmission):
context.add_run_info(run_id=run.backfill_id)
else:
context.add_run_info(run_id=run.run_id, run_key=run_request_result.run_key)
entity_keys = [*(run.asset_selection or []), *(run.asset_check_selection or [])]
for key in entity_keys:
if key in evaluations_by_key:
evaluation = evaluations_by_key[key]
evaluations_by_key[key] = dataclasses.replace(
evaluation, run_ids=evaluation.run_ids | {run.run_id}
)
updated_evaluation_keys.add(key)
# check if the sensor is still enabled:
if check_after_runs_num is not None and i % check_after_runs_num == 0:
if not context.sensor_is_enabled():
# The user has manually stopped the sensor mid-iteration. In this case we assume
# the user has a good reason for stopping the sensor (e.g. the sensor is submitting
# many unintentional runs) so we stop submitting runs and will mark the tick as
# skipped so that when the sensor is turned back on we don't detect this tick as incomplete
# and try to submit the same runs again.
context.logger.info(
"Sensor has been manually stopped while submitting runs. No more runs will be submitted."
)
context.update_state(context.status, user_interrupted=True)
break
if (
updated_evaluation_keys
and instance.schedule_storage
and instance.schedule_storage.supports_auto_materialize_asset_evaluations
):
instance.schedule_storage.add_auto_materialize_asset_evaluations(
evaluation_id=int(context.tick_id),
asset_evaluations=[evaluations_by_key[key] for key in updated_evaluation_keys],
)
check_for_debug_crash(sensor_debug_crash_flags, "RUN_IDS_ADDED_TO_EVALUATIONS")
if skipped_runs:
run_keys = [skipped.run_key for skipped in skipped_runs]
skipped_count = len(skipped_runs)
context.logger.info(
f"Skipping {skipped_count} {'run' if skipped_count == 1 else 'runs'} for sensor "
f"{remote_sensor.name} already completed with run keys: {seven.json.dumps(run_keys)}"
)
yield
def _submit_backfill_request(
backfill_id: str,
run_request: RunRequest,
instance: DagsterInstance,
) -> SubmitRunRequestResult:
instance.add_backfill(
PartitionBackfill.from_asset_graph_subset(
backfill_id=backfill_id,
dynamic_partitions_store=instance,
backfill_timestamp=get_current_timestamp(),
asset_graph_subset=check.inst(run_request.asset_graph_subset, AssetGraphSubset),
tags=run_request.tags or {},
# would need to add these as params to RunRequest
title=None,
description=None,
run_config=run_request.run_config,
)
)
return SubmitRunRequestResult(
run_key=None, error_info=None, run=BackfillSubmission(backfill_id=backfill_id)
)
def is_under_min_interval(
state: InstigatorState,
remote_sensor: RemoteSensor,
minimum_allowed_min_interval: Optional[int] = None,
) -> bool:
elapsed = get_elapsed(state)
if elapsed is None:
return False
min_interval = remote_sensor.min_interval_seconds or 0
if minimum_allowed_min_interval is not None:
min_interval = max(min_interval, minimum_allowed_min_interval)
if not min_interval:
return False
return elapsed < min_interval
def get_elapsed(state: InstigatorState) -> Optional[float]:
instigator_data = _sensor_instigator_data(state)
if not instigator_data:
return None
if not instigator_data.last_tick_start_timestamp and not instigator_data.last_tick_timestamp:
return None
return get_current_timestamp() - max(
instigator_data.last_tick_timestamp or 0,
instigator_data.last_tick_start_timestamp or 0,
)
def fetch_existing_runs(
instance: DagsterInstance,
remote_sensor: RemoteSensor,
run_requests: Sequence[RunRequest],
) -> dict[str, DagsterRun]:
run_keys = [run_request.run_key for run_request in run_requests if run_request.run_key]
if not run_keys:
return {}
# fetch runs from the DB with only the run key tag
# note: while possible to filter more at DB level with tags - it is avoided here due to observed
# perf problems
runs_with_run_keys: list[DagsterRun] = []
for run_key in run_keys:
# do serial fetching, which has better perf than a single query with an IN clause, due to
# how the query planner does the runs/run_tags join
runs_with_run_keys.extend(
instance.get_runs(filters=RunsFilter(tags={RUN_KEY_TAG: run_key}))
)
# filter down to runs with run_key that match the sensor name and its namespace (repository)
valid_runs: list[DagsterRun] = []
for run in runs_with_run_keys:
# if the run doesn't have a set origin, just match on sensor name
if run.remote_job_origin is None and run.tags.get(SENSOR_NAME_TAG) == remote_sensor.name:
valid_runs.append(run)
# otherwise prevent the same named sensor across repos from effecting each other
elif (
run.remote_job_origin is not None
and run.remote_job_origin.repository_origin.get_selector()
== remote_sensor.get_remote_origin().repository_origin.get_selector()
and run.tags.get(SENSOR_NAME_TAG) == remote_sensor.name
):
valid_runs.append(run)
existing_runs: dict[str, DagsterRun] = {}
for run in valid_runs:
tags = run.tags or {}
# Guaranteed to have non-null run key because the source set of runs is `runs_with_run_keys`
# above.
run_key = check.not_none(tags.get(RUN_KEY_TAG))
existing_runs[run_key] = run
return existing_runs
def _get_or_create_sensor_run(
logger: logging.Logger,
instance: DagsterInstance,
code_location: CodeLocation,
remote_sensor: RemoteSensor,
remote_job: RemoteJob,
run_id: str,
run_request: RunRequest,
target_data: TargetSnap,
existing_runs_by_key: dict[str, DagsterRun],
) -> Union[DagsterRun, SkippedSensorRun]:
run_key = run_request.run_key
run = (run_key and existing_runs_by_key.get(run_key)) or instance.get_run_by_id(run_id)
if run:
if run.status != DagsterRunStatus.NOT_STARTED:
# A run already exists and was launched for this run key, but the daemon must have
# crashed before the tick could be updated
return SkippedSensorRun(run_key=run_request.run_key, existing_run=run)
else:
logger.info(
f"Run {run.run_id} already created with the run key "
f"`{run_key}` for {remote_sensor.name}"
)
return run
logger.info(f"Creating new run for {remote_sensor.name}")
run = _create_sensor_run(
instance, code_location, remote_sensor, remote_job, run_id, run_request, target_data
)
# Make sure that runs from the same tick are also unique by run key
if run_key:
existing_runs_by_key[run_key] = run
return run
def _create_sensor_run(
instance: DagsterInstance,
code_location: CodeLocation,
remote_sensor: RemoteSensor,
remote_job: RemoteJob,
run_id: str,
run_request: RunRequest,
target_data: TargetSnap,
) -> DagsterRun:
from dagster._daemon.daemon import get_telemetry_daemon_session_id
remote_execution_plan = code_location.get_execution_plan(
remote_job,
run_request.run_config,
step_keys_to_execute=None,
known_state=None,
instance=instance,
)
execution_plan_snapshot = remote_execution_plan.execution_plan_snapshot
tags = {
**(remote_job.run_tags or {}),
**run_request.tags,
# this gets applied in the sensor definition too, but we apply it here for backcompat
# with sensors before the tag was added to the sensor definition
**DagsterRun.tags_for_sensor(remote_sensor),
}
if run_request.run_key:
tags[RUN_KEY_TAG] = run_request.run_key
log_action(
instance,
SENSOR_RUN_CREATED,
metadata={
"DAEMON_SESSION_ID": get_telemetry_daemon_session_id(),
"SENSOR_NAME_HASH": hash_name(remote_sensor.name),
"pipeline_name_hash": hash_name(remote_job.name),
"repo_hash": hash_name(code_location.name),
},
)
return instance.create_run(
job_name=target_data.job_name,
run_id=run_id,
run_config=run_request.run_config,
resolved_op_selection=remote_job.resolved_op_selection,
step_keys_to_execute=None,
status=DagsterRunStatus.NOT_STARTED,
op_selection=target_data.op_selection,
root_run_id=None,
parent_run_id=None,
tags=tags,
job_snapshot=remote_job.job_snapshot,
execution_plan_snapshot=execution_plan_snapshot,
parent_job_snapshot=remote_job.parent_job_snapshot,
remote_job_origin=remote_job.get_remote_origin(),
job_code_origin=remote_job.get_python_origin(),
asset_selection=(
frozenset(run_request.asset_selection)
if run_request.asset_selection is not None
else None
),
asset_check_selection=(
frozenset(run_request.asset_check_keys)
if run_request.asset_check_keys is not None
else None
),
asset_graph=code_location.get_repository(
remote_job.repository_handle.repository_name
).asset_graph,
)
| SubmitRunRequestResult |
python | streamlit__streamlit | lib/streamlit/elements/lib/form_utils.py | {
"start": 873,
"end": 2587
} | class ____(NamedTuple):
"""Form data stored on a DeltaGenerator."""
# The form's unique ID.
form_id: str
def _current_form(this_dg: DeltaGenerator) -> FormData | None:
"""Find the FormData for the given DeltaGenerator.
Forms are blocks, and can have other blocks nested inside them.
To find the current form, we walk up the dg_stack until we find
a DeltaGenerator that has FormData.
"""
if not runtime.exists():
return None
if this_dg._form_data is not None:
return this_dg._form_data
if this_dg == this_dg._main_dg:
# We were created via an `st.foo` call.
# Walk up the dg_stack to see if we're nested inside a `with st.form` statement.
for dg in reversed(context_dg_stack.get()):
if dg._form_data is not None:
return dg._form_data
else:
# We were created via an `dg.foo` call.
# Take a look at our parent's form data to see if we're nested inside a form.
parent = this_dg._parent
if parent is not None and parent._form_data is not None:
return parent._form_data
return None
def current_form_id(dg: DeltaGenerator) -> str:
"""Return the form_id for the current form, or the empty string if we're
not inside an `st.form` block.
(We return the empty string, instead of None, because this value is
assigned to protobuf message fields, and None is not valid.)
"""
form_data = _current_form(dg)
if form_data is None:
return ""
return form_data.form_id
def is_in_form(dg: DeltaGenerator) -> bool:
"""True if the DeltaGenerator is inside an st.form block."""
return current_form_id(dg) != ""
| FormData |
python | apache__airflow | airflow-core/src/airflow/dag_processing/collection.py | {
"start": 26919,
"end": 43390
} | class ____(NamedTuple):
"""Collect asset/alias objects from DAGs and perform database operations for them."""
schedule_asset_references: dict[str, list[Asset]]
schedule_asset_alias_references: dict[str, list[AssetAlias]]
schedule_asset_name_references: set[tuple[str, str]] # dag_id, ref_name.
schedule_asset_uri_references: set[tuple[str, str]] # dag_id, ref_uri.
inlet_references: dict[str, list[tuple[str, Asset]]]
outlet_references: dict[str, list[tuple[str, Asset]]]
assets: dict[tuple[str, str], Asset]
asset_aliases: dict[str, AssetAlias]
@classmethod
def collect(cls, dags: dict[str, LazyDeserializedDAG]) -> Self:
coll = cls(
schedule_asset_references={
dag_id: [asset for _, asset in dag.timetable.asset_condition.iter_assets()]
for dag_id, dag in dags.items()
},
schedule_asset_alias_references={
dag_id: [alias for _, alias in dag.timetable.asset_condition.iter_asset_aliases()]
for dag_id, dag in dags.items()
},
schedule_asset_name_references={
(dag_id, ref.name)
for dag_id, dag in dags.items()
for ref in dag.timetable.asset_condition.iter_asset_refs()
if isinstance(ref, AssetNameRef)
},
schedule_asset_uri_references={
(dag_id, ref.uri)
for dag_id, dag in dags.items()
for ref in dag.timetable.asset_condition.iter_asset_refs()
if isinstance(ref, AssetUriRef)
},
inlet_references={
dag_id: list(_get_dag_assets(dag, Asset, inlets=True, outlets=False))
for dag_id, dag in dags.items()
},
outlet_references={
dag_id: list(_get_dag_assets(dag, Asset, inlets=False, outlets=True))
for dag_id, dag in dags.items()
},
assets={(asset.name, asset.uri): asset for asset in _find_all_assets(dags.values())},
asset_aliases={alias.name: alias for alias in _find_all_asset_aliases(dags.values())},
)
return coll
def sync_assets(self, *, session: Session) -> dict[tuple[str, str], AssetModel]:
# Optimization: skip all database calls if no assets were collected.
if not self.assets:
return {}
orm_assets: dict[tuple[str, str], AssetModel] = {
(am.name, am.uri): am
for am in session.scalars(
select(AssetModel).where(tuple_(AssetModel.name, AssetModel.uri).in_(self.assets))
)
}
for key, model in orm_assets.items():
asset = self.assets[key]
model.group = asset.group
model.extra = asset.extra
orm_assets.update(
((model.name, model.uri), model)
for model in asset_manager.create_assets(
[asset for name_uri, asset in self.assets.items() if name_uri not in orm_assets],
session=session,
)
)
return orm_assets
def sync_asset_aliases(self, *, session: Session) -> dict[str, AssetAliasModel]:
# Optimization: skip all database calls if no asset aliases were collected.
if not self.asset_aliases:
return {}
orm_aliases: dict[str, AssetAliasModel] = {
da.name: da
for da in session.scalars(
select(AssetAliasModel).where(AssetAliasModel.name.in_(self.asset_aliases))
)
}
for name, model in orm_aliases.items():
model.group = self.asset_aliases[name].group
orm_aliases.update(
(model.name, model)
for model in asset_manager.create_asset_aliases(
[alias for name, alias in self.asset_aliases.items() if name not in orm_aliases],
session=session,
)
)
return orm_aliases
def activate_assets_if_possible(self, models: Iterable[AssetModel], *, session: Session) -> None:
"""
Try to activate assets eagerly.
This inserts a record to AssetActive for an asset so it is activated
on creation if its ``name`` and ``uri`` values do not conflict with
anything else. This is a best-effort operation; we simply give up if
there's a conflict. The scheduler makes a more comprehensive pass
through all assets in ``_update_asset_orphanage``.
"""
if (dialect_name := get_dialect_name(session)) == "postgresql":
from sqlalchemy.dialects.postgresql import insert as postgresql_insert
stmt: Any = postgresql_insert(AssetActive).on_conflict_do_nothing()
elif session.bind is not None and dialect_name == "mysql":
from sqlalchemy.dialects.mysql import insert as mysql_insert
# MySQL does not support "do nothing"; this updates the row in
# conflict with its own value to achieve the same idea.
stmt = mysql_insert(AssetActive).on_duplicate_key_update(name=AssetActive.name)
else:
from sqlalchemy.dialects.sqlite import insert as sqlite_insert
stmt = sqlite_insert(AssetActive).on_conflict_do_nothing()
if values := [{"name": m.name, "uri": m.uri} for m in models]:
session.execute(stmt, values)
def add_dag_asset_references(
self,
dags: dict[str, DagModel],
assets: dict[tuple[str, str], AssetModel],
*,
session: Session,
) -> None:
# Optimization: No assets means there are no references to update.
if not assets:
return
for dag_id, references in self.schedule_asset_references.items():
# Optimization: no references at all; this is faster than repeated delete().
if not references:
dags[dag_id].schedule_asset_references = []
continue
referenced_asset_ids = {asset.id for asset in (assets[r.name, r.uri] for r in references)}
orm_refs = {r.asset_id: r for r in dags[dag_id].schedule_asset_references}
for asset_id, ref in orm_refs.items():
if asset_id not in referenced_asset_ids:
session.delete(ref)
session.bulk_save_objects(
DagScheduleAssetReference(asset_id=asset_id, dag_id=dag_id)
for asset_id in referenced_asset_ids
if asset_id not in orm_refs
)
def add_dag_asset_alias_references(
self,
dags: dict[str, DagModel],
aliases: dict[str, AssetAliasModel],
*,
session: Session,
) -> None:
# Optimization: No aliases means there are no references to update.
if not aliases:
return
for dag_id, references in self.schedule_asset_alias_references.items():
# Optimization: no references at all; this is faster than repeated delete().
if not references:
dags[dag_id].schedule_asset_alias_references = []
continue
referenced_alias_ids = {alias.id for alias in (aliases[r.name] for r in references)}
orm_refs = {a.alias_id: a for a in dags[dag_id].schedule_asset_alias_references}
for alias_id, ref in orm_refs.items():
if alias_id not in referenced_alias_ids:
session.delete(ref)
session.bulk_save_objects(
DagScheduleAssetAliasReference(alias_id=alias_id, dag_id=dag_id)
for alias_id in referenced_alias_ids
if alias_id not in orm_refs
)
@staticmethod
def _add_dag_asset_references(
references: set[tuple[str, str]],
model: type[DagScheduleAssetNameReference] | type[DagScheduleAssetUriReference],
attr: str,
*,
session: Session,
) -> None:
if not references:
return
orm_refs = {
tuple(row)
for row in session.execute(
select(model.dag_id, getattr(model, attr)).where(
model.dag_id.in_(dag_id for dag_id, _ in references)
)
)
}
new_refs = references - orm_refs
old_refs = orm_refs - references
if old_refs:
session.execute(delete(model).where(tuple_(model.dag_id, getattr(model, attr)).in_(old_refs)))
if new_refs:
session.execute(insert(model), [{"dag_id": d, attr: r} for d, r in new_refs])
def add_dag_asset_name_uri_references(self, *, session: Session) -> None:
self._add_dag_asset_references(
self.schedule_asset_name_references,
DagScheduleAssetNameReference,
"name",
session=session,
)
self._add_dag_asset_references(
self.schedule_asset_uri_references,
DagScheduleAssetUriReference,
"uri",
session=session,
)
def add_task_asset_references(
self,
dags: dict[str, DagModel],
assets: dict[tuple[str, str], AssetModel],
*,
session: Session,
) -> None:
# Optimization: No assets means there are no references to update.
if not assets:
return
for dag_id, references in self.inlet_references.items():
# Optimization: no references at all; this is faster than repeated delete().
if not references:
dags[dag_id].task_inlet_asset_references = []
continue
referenced_inlets = {
(task_id, asset.id)
for task_id, asset in ((task_id, assets[d.name, d.uri]) for task_id, d in references)
}
orm_refs = {(r.task_id, r.asset_id): r for r in dags[dag_id].task_inlet_asset_references}
for key, ref in orm_refs.items():
if key not in referenced_inlets:
session.delete(ref)
session.bulk_save_objects(
TaskInletAssetReference(asset_id=asset_id, dag_id=dag_id, task_id=task_id)
for task_id, asset_id in referenced_inlets
if (task_id, asset_id) not in orm_refs
)
for dag_id, references in self.outlet_references.items():
# Optimization: no references at all; this is faster than repeated delete().
if not references:
dags[dag_id].task_outlet_asset_references = []
continue
referenced_outlets = {
(task_id, asset.id)
for task_id, asset in ((task_id, assets[d.name, d.uri]) for task_id, d in references)
}
orm_refs = {(r.task_id, r.asset_id): r for r in dags[dag_id].task_outlet_asset_references}
for key, ref in orm_refs.items():
if key not in referenced_outlets:
session.delete(ref)
session.bulk_save_objects(
TaskOutletAssetReference(asset_id=asset_id, dag_id=dag_id, task_id=task_id)
for task_id, asset_id in referenced_outlets
if (task_id, asset_id) not in orm_refs
)
def add_asset_trigger_references(
self, assets: dict[tuple[str, str], AssetModel], *, session: Session
) -> None:
from airflow.serialization.serialized_objects import _encode_trigger
# Update references from assets being used
refs_to_add: dict[tuple[str, str], set[int]] = {}
refs_to_remove: dict[tuple[str, str], set[int]] = {}
triggers: dict[int, dict] = {}
# Optimization: if no asset collected, skip fetching active assets
active_assets = _find_active_assets(self.assets, session=session) if self.assets else {}
for name_uri, asset in self.assets.items():
# If the asset belong to a DAG not active or paused, consider there is no watcher associated to it
asset_watcher_triggers = (
[
{**_encode_trigger(watcher.trigger), "watcher_name": watcher.name}
for watcher in asset.watchers
]
if name_uri in active_assets
else []
)
trigger_hash_to_trigger_dict: dict[int, dict] = {
BaseEventTrigger.hash(trigger["classpath"], trigger["kwargs"]): trigger
for trigger in asset_watcher_triggers
}
triggers.update(trigger_hash_to_trigger_dict)
trigger_hash_from_asset: set[int] = set(trigger_hash_to_trigger_dict.keys())
asset_model = assets[name_uri]
trigger_hash_from_asset_model: set[int] = {
BaseEventTrigger.hash(trigger.classpath, trigger.kwargs) for trigger in asset_model.triggers
}
# Optimization: no diff between the DB and DAG definitions, no update needed
if trigger_hash_from_asset == trigger_hash_from_asset_model:
continue
diff_to_add = trigger_hash_from_asset - trigger_hash_from_asset_model
diff_to_remove = trigger_hash_from_asset_model - trigger_hash_from_asset
if diff_to_add:
refs_to_add[name_uri] = diff_to_add
if diff_to_remove:
refs_to_remove[name_uri] = diff_to_remove
if refs_to_add:
all_trigger_hashes: set[int] = {
trigger_hash for trigger_hashes in refs_to_add.values() for trigger_hash in trigger_hashes
}
all_trigger_keys: set[tuple[str, str]] = {
(
triggers[trigger_hash]["classpath"],
Trigger.encrypt_kwargs(triggers[trigger_hash]["kwargs"]),
)
for trigger_hashes in refs_to_add.values()
for trigger_hash in trigger_hashes
}
orm_triggers: dict[int, Trigger] = {
BaseEventTrigger.hash(trigger.classpath, trigger.kwargs): trigger
for trigger in session.scalars(
select(Trigger).where(
tuple_(Trigger.classpath, Trigger.encrypted_kwargs).in_(all_trigger_keys)
)
)
}
# Create new triggers
new_trigger_models = [
trigger
for trigger in [
Trigger(
classpath=triggers[trigger_hash]["classpath"], kwargs=triggers[trigger_hash]["kwargs"]
)
for trigger_hash in all_trigger_hashes
if trigger_hash not in orm_triggers
]
]
session.add_all(new_trigger_models)
session.flush() # Flush to get the IDs assigned
orm_triggers.update(
(BaseEventTrigger.hash(trigger.classpath, trigger.kwargs), trigger)
for trigger in new_trigger_models
)
# Add new references
for name_uri, trigger_hashes in refs_to_add.items():
asset_model = assets[name_uri]
for trigger_hash in trigger_hashes:
trigger = triggers.get(trigger_hash)
orm_trigger = orm_triggers.get(trigger_hash)
if orm_trigger and trigger:
asset_model.add_trigger(orm_trigger, trigger["watcher_name"])
if refs_to_remove:
# Remove old references
for name_uri, trigger_hashes in refs_to_remove.items():
asset_model = assets[name_uri]
asset_model.watchers = [
watcher
for watcher in asset_model.watchers
if BaseEventTrigger.hash(watcher.trigger.classpath, watcher.trigger.kwargs)
not in trigger_hashes
]
# Remove references from assets no longer used
orphan_assets = session.scalars(
select(AssetModel).filter(~AssetModel.scheduled_dags.any()).filter(AssetModel.triggers.any())
)
for asset_model in orphan_assets:
if (asset_model.name, asset_model.uri) not in self.assets:
# Delete all watchers for this orphaned asset
asset_model.watchers = []
| AssetModelOperation |
python | run-llama__llama_index | llama-index-core/llama_index/core/query_engine/sub_question_query_engine.py | {
"start": 1121,
"end": 1382
} | class ____(BaseModel):
"""
Pair of the sub question and optionally its answer (if its been answered yet).
"""
sub_q: SubQuestion
answer: Optional[str] = None
sources: List[NodeWithScore] = Field(default_factory=list)
| SubQuestionAnswerPair |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/snap/dep_snapshot.py | {
"start": 7744,
"end": 9219
} | class ____(
NamedTuple(
"_NodeInvocationSnap",
[
("node_name", str),
("node_def_name", str),
("tags", Mapping[str, str]),
("input_dep_snaps", Sequence[InputDependencySnap]),
("is_dynamic_mapped", bool),
],
)
):
def __new__(
cls,
node_name: str,
node_def_name: str,
tags: Mapping[str, str],
input_dep_snaps: Sequence[InputDependencySnap],
is_dynamic_mapped: bool = False,
):
return super().__new__(
cls,
node_name=check.str_param(node_name, "node_name"),
node_def_name=check.str_param(node_def_name, "node_def_name"),
tags=check.mapping_param(tags, "tags", key_type=str, value_type=str),
input_dep_snaps=check.sequence_param(
input_dep_snaps, "input_dep_snaps", of_type=InputDependencySnap
),
is_dynamic_mapped=check.bool_param(is_dynamic_mapped, "is_dynamic_mapped"),
)
@cached_property
def input_dep_map(self) -> Mapping[str, InputDependencySnap]:
return {inp_snap.input_name: inp_snap for inp_snap in self.input_dep_snaps}
def input_dep_snap(self, input_name: str) -> InputDependencySnap:
inp_snap = self.input_dep_map.get(input_name)
if inp_snap:
return inp_snap
check.failed(f"Input {input_name} not found for node {self.node_name}")
| NodeInvocationSnap |
python | huggingface__transformers | src/transformers/models/blip/modeling_blip.py | {
"start": 18124,
"end": 18931
} | class ____(PreTrainedModel):
config: BlipConfig
base_model_prefix = "blip"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_no_split_modules = ["BlipEncoderLayer", "BlipTextEmbeddings"]
_skip_keys_device_placement = ["past_key_values"]
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
std = self.config.initializer_range
if isinstance(module, BlipVisionEmbeddings):
if hasattr(self.config, "vision_config"):
std = self.config.vision_config.initializer_range
init.trunc_normal_(module.position_embedding, mean=0.0, std=std)
init.trunc_normal_(module.class_embedding, mean=0.0, std=std)
| BlipPreTrainedModel |
python | google__jax | tests/config_test.py | {
"start": 1236,
"end": 4560
} | class ____(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": "_enum", "config_name": "jax_test_enum_config",
"config_obj": jax_test_enum_config, "default": "default", "val1": "xxx",
"val2": "yyy"},
{"testcase_name": "_bool", "config_name": "jax_test_bool_config",
"config_obj": jax_test_bool_config, "default": True, "val1": False,
"val2": True},
)
def test_config_setting_via_update(self, config_name, config_obj, default, val1, val2):
self.assertEqual(config_obj.value, default)
jax.config.update(config_name, val1)
self.assertEqual(config_obj.value, val1)
jax.config.update(config_name, val2)
self.assertEqual(config_obj.value, val2)
jax.config.update(config_name, default)
self.assertEqual(config_obj.value, default)
@parameterized.named_parameters(
{"testcase_name": "_enum", "config_obj": jax_test_enum_config,
"default": "default", "val1": "xxx", "val2": "yyy"},
{"testcase_name": "_bool", "config_obj": jax_test_bool_config,
"default": True, "val1": False, "val2": True},
)
def test_config_setting_via_context(self, config_obj, default, val1, val2):
self.assertEqual(config_obj.value, default)
with config_obj(val1):
self.assertEqual(config_obj.value, val1)
with config_obj(val2):
self.assertEqual(config_obj.value, val2)
self.assertEqual(config_obj.value, val1)
self.assertEqual(config_obj.value, default)
def test_config_update_validation(self):
self.assertEqual(jax_test_enum_config.value, 'default')
with self.assertRaisesRegex(ValueError, 'new enum value must be in.*'):
jax.config.update('jax_test_enum_config', 'invalid')
# Error should raise before changing the value
self.assertEqual(jax_test_enum_config.value, 'default')
def test_config_context_validation(self):
self.assertEqual(jax_test_enum_config.value, 'default')
with self.assertRaisesRegex(ValueError, 'new enum value must be in.*'):
with jax_test_enum_config('invalid'):
pass
self.assertEqual(jax_test_enum_config.value, 'default')
def test_bool_config_update_validation(self):
self.assertEqual(jax_test_bool_config.value, True)
with self.assertRaisesRegex(ValueError, "invalid bool"):
jax.config.update('jax_test_bool_config', InvalidBool())
# Error should raise before changing the value
self.assertEqual(jax_test_bool_config.value, True)
def test_bool_config_context_validation(self):
self.assertEqual(jax_test_bool_config.value, True)
with self.assertRaisesRegex(ValueError, "invalid bool"):
with jax_test_bool_config(InvalidBool()):
pass
self.assertEqual(jax_test_bool_config.value, True)
def test_cloud_tpu_init(self):
if not jtu.is_cloud_tpu():
self.skipTest('Not running on a Cloud TPU VM.')
# Context manager resets the jax_platforms config to its original value.
with jtu.global_config_context(jax_platforms=None):
cloud_tpu_init()
self.assertEqual(config.jax_platforms.value, 'tpu,cpu')
with jtu.global_config_context(jax_platforms='platform_A'):
cloud_tpu_init()
self.assertEqual(config.jax_platforms.value, 'platform_A')
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| ConfigTest |
python | urllib3__urllib3 | test/conftest.py | {
"start": 11193,
"end": 12560
} | class ____:
def __init__(self, *args: typing.Any, **kwargs: typing.Any):
raise ValueError(
"HTTP/2 support currently only applies to HTTPS, don't use http_version for HTTP tests"
)
@pytest.fixture(params=["h11", "h2"])
def http_version(request: pytest.FixtureRequest) -> typing.Generator[str]:
orig_HTTPConnection: typing.Any = None
if request.param == "h2":
urllib3.http2.inject_into_urllib3()
from urllib3 import connection as urllib3_connection
from urllib3.connectionpool import HTTPConnectionPool
orig_HTTPConnection = urllib3_connection.HTTPConnection
urllib3_connection.HTTPConnection = ErroringHTTPConnection # type: ignore[misc,assignment]
HTTPConnectionPool.ConnectionCls = ErroringHTTPConnection # type: ignore[assignment]
try:
yield request.param
finally:
if request.param == "h2":
urllib3_connection.HTTPConnection = orig_HTTPConnection # type: ignore[misc]
HTTPConnectionPool.ConnectionCls = orig_HTTPConnection
urllib3.http2.extract_from_urllib3()
@pytest.fixture(autouse=True, scope="function")
def reset_http2_probe_cache() -> typing.Generator[None]:
# Always reset the HTTP/2 probe cache per test case.
try:
yield
finally:
http2_probe._reset()
| ErroringHTTPConnection |
python | django__django | tests/template_tests/syntax_tests/i18n/test_get_current_language_bidi.py | {
"start": 129,
"end": 607
} | class ____(SimpleTestCase):
libraries = {"i18n": "django.templatetags.i18n"}
@setup({"template": "{% load i18n %} {% get_current_language_bidi %}"})
def test_no_as_var(self):
msg = (
"'get_current_language_bidi' requires 'as variable' (got "
"['get_current_language_bidi'])"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string("template")
| I18nGetCurrentLanguageBidiTagTests |
python | getsentry__sentry | tests/snuba/search/test_backend.py | {
"start": 3914,
"end": 11342
} | class ____(SharedSnubaMixin):
@property
def backend(self):
return EventsDatasetSnubaSearchBackend()
def setUp(self) -> None:
super().setUp()
self.base_datetime = before_now(days=3).replace(microsecond=0)
event1_timestamp = (self.base_datetime - timedelta(days=21)).isoformat()
self.event1 = self.store_event(
data={
"fingerprint": ["put-me-in-group1"],
"event_id": "a" * 32,
"message": "foo. Indeed, this message is intended to be greater than 256 characters such that we can put this unique string identifier after that point in the string. The purpose of this is in order to verify we are using snuba to search messages instead of Postgres (postgres truncates at 256 characters and clickhouse does not). santryrox.",
"environment": "production",
"tags": {"server": "example.com", "sentry:user": "event1@example.com"},
"timestamp": event1_timestamp,
"stacktrace": {"frames": [{"module": "group1"}]},
"level": "fatal",
},
project_id=self.project.id,
)
self.event3 = self.store_event(
data={
"fingerprint": ["put-me-in-group1"],
"event_id": "c" * 32,
"message": "group1",
"environment": "production",
"tags": {"server": "example.com", "sentry:user": "event3@example.com"},
"timestamp": self.base_datetime.isoformat(),
"stacktrace": {"frames": [{"module": "group1"}]},
"level": "fatal",
},
project_id=self.project.id,
)
self.group1 = Group.objects.get(id=self.event1.group.id)
assert self.group1.id == self.event1.group.id
assert self.group1.id == self.event3.group.id
assert self.group1.first_seen == self.event1.datetime
assert self.group1.last_seen == self.event3.datetime
self.group1.times_seen = 5
self.group1.status = GroupStatus.UNRESOLVED
self.group1.substatus = GroupSubStatus.ONGOING
self.group1.priority = PriorityLevel.HIGH
self.group1.update(type=ErrorGroupType.type_id)
self.group1.seer_fixability_score = FixabilityScoreThresholds.HIGH.value + 0.01
self.group1.seer_autofix_last_triggered = self.event3.datetime
self.group1.save()
self.store_group(self.group1)
self.event2 = self.store_event(
data={
"fingerprint": ["put-me-in-group2"],
"event_id": "b" * 32,
"timestamp": (self.base_datetime - timedelta(days=20)).isoformat(),
"message": "bar",
"stacktrace": {"frames": [{"module": "group2"}]},
"environment": "staging",
"tags": {
"server": "example.com",
"url": "http://example.com",
"sentry:user": "event2@example.com",
},
"level": "error",
},
project_id=self.project.id,
)
self.group2 = Group.objects.get(id=self.event2.group.id)
assert self.group2.id == self.event2.group.id
assert self.group2.first_seen == self.group2.last_seen == self.event2.datetime
self.group2.status = GroupStatus.RESOLVED
self.group2.substatus = None
self.group2.times_seen = 10
self.group2.update(type=ErrorGroupType.type_id)
self.group2.priority = PriorityLevel.HIGH
self.group2.seer_fixability_score = FixabilityScoreThresholds.MEDIUM.value
self.group2.seer_autofix_last_triggered = self.event2.datetime
self.group2.save()
self.store_group(self.group2)
GroupBookmark.objects.create(
user_id=self.user.id, group=self.group2, project=self.group2.project
)
GroupAssignee.objects.create(
user_id=self.user.id, group=self.group2, project=self.group2.project
)
GroupSubscription.objects.create(
user_id=self.user.id, group=self.group1, project=self.group1.project, is_active=True
)
GroupSubscription.objects.create(
user_id=self.user.id, group=self.group2, project=self.group2.project, is_active=False
)
self.environments = {
"production": self.event1.get_environment(),
"staging": self.event2.get_environment(),
}
def set_up_multi_project(self):
self.project2 = self.create_project(organization=self.project.organization)
self.event_p2 = self.store_event(
data={
"event_id": "a" * 32,
"fingerprint": ["put-me-in-groupP2"],
"timestamp": (self.base_datetime - timedelta(days=21)).isoformat(),
"message": "foo",
"stacktrace": {"frames": [{"module": "group_p2"}]},
"tags": {"server": "example.com"},
"environment": "production",
},
project_id=self.project2.id,
)
self.group_p2 = Group.objects.get(id=self.event_p2.group.id)
self.group_p2.times_seen = 6
self.group_p2.last_seen = self.base_datetime - timedelta(days=1)
self.group_p2.save()
self.store_group(self.group_p2)
def create_group_with_integration_external_issue(self, environment="production"):
event = self.store_event(
data={
"fingerprint": ["linked_group1"],
"event_id": uuid.uuid4().hex,
"timestamp": self.base_datetime.isoformat(),
"environment": environment,
},
project_id=self.project.id,
)
integration, _ = self.create_provider_integration_for(
event.group.organization, self.user, provider="example", name="Example"
)
self.create_integration_external_issue(
group=event.group,
integration=integration,
key="APP-123",
)
return event.group
def create_group_with_platform_external_issue(self, environment="production"):
event = self.store_event(
data={
"fingerprint": ["linked_group2"],
"event_id": uuid.uuid4().hex,
"timestamp": self.base_datetime.isoformat(),
"environment": environment,
},
project_id=self.project.id,
)
self.create_platform_external_issue(
group=event.group,
service_type="sentry-app",
display_name="App#issue-1",
web_url="https://example.com/app/issues/1",
)
return event.group
def run_test_query(
self, query, expected_groups, expected_negative_groups=None, environments=None, user=None
):
results = self.make_query(search_filter_query=query, environments=environments, user=user)
def sort_key(result):
return result.id
assert sorted(results, key=sort_key) == sorted(expected_groups, key=sort_key)
if expected_negative_groups is not None:
results = self.make_query(search_filter_query=f"!{query}", user=user)
assert sorted(results, key=sort_key) == sorted(expected_negative_groups, key=sort_key)
| EventsDatasetTestSetup |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/compute.py | {
"start": 60867,
"end": 68250
} | class ____(ComputeEngineBaseOperator):
"""
Creates an Instance Group Managers using the body specified.
After the group is created, instances in the group are created using the specified Instance Template.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineInsertInstanceGroupManagerOperator`
:param body: Instance Group Managers representation as object.
:param project_id: Google Cloud project ID where the Compute Engine Instance Group Managers exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param request_id: Unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new Instance Group Managers again)
It should be in UUID format as defined in RFC 4122
:param resource_id: Name of the Instance Group Managers. If the name of Instance Group Managers is
not specified in body['name'], the name will be taken from 'resource_id' parameter.
:param gcp_conn_id: The connection ID used to connect to Google Cloud. Defaults to 'google_cloud_default'.
:param api_version: API version used (for example v1 - or beta). Defaults to v1.
:param impersonation_chain: Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
operator_extra_links = (ComputeInstanceGroupManagerDetailsLink(),)
# [START gce_igm_insert_fields]
template_fields: Sequence[str] = (
"project_id",
"body",
"zone",
"request_id",
"gcp_conn_id",
"api_version",
"impersonation_chain",
"resource_id",
)
# [END gce_igm_insert_fields]
def __init__(
self,
*,
body: dict,
zone: str,
project_id: str = PROVIDE_PROJECT_ID,
resource_id: str | None = None,
request_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version="v1",
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
impersonation_chain: str | Sequence[str] | None = None,
validate_body: bool = True,
**kwargs,
) -> None:
self.body = body
self.request_id = request_id
if "name" in body:
resource_id = self.body["name"]
self._field_validator = None # Optional[GcpBodyFieldValidator]
self.retry = retry
self.timeout = timeout
self.metadata = metadata
if validate_body:
self._field_validator = GcpBodyFieldValidator(
GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION, api_version=api_version
)
self._field_sanitizer = GcpBodyFieldSanitizer(GCE_INSTANCE_FIELDS_TO_SANITIZE)
super().__init__(
project_id=project_id,
zone=zone,
resource_id=resource_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def check_body_fields(self) -> None:
required_params = ["base_instance_name", "target_size", "instance_template"]
for param in required_params:
if param not in self.body:
readable_param = param.replace("_", " ")
raise AirflowException(
f"The body '{self.body}' should contain at least {readable_param} for the new operator "
f"in the '{param}' field. Check (google.cloud.compute_v1.types.Instance) "
f"for more details about body fields description."
)
def _validate_all_body_fields(self) -> None:
if self._field_validator:
self._field_validator.validate(self.body)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.resource_id and "name" not in self.body:
raise AirflowException(
"The required parameters 'resource_id' and body['name'] are missing. "
"Please, provide at least one of them."
)
def execute(self, context: Context) -> dict:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self._validate_all_body_fields()
self.check_body_fields()
try:
# Idempotence check (sort of) - we want to check if the new Instance Group Manager
# is already created and if isn't, we create new one
existing_instance_group_manager = hook.get_instance_group_manager(
resource_id=self.resource_id,
project_id=self.project_id,
zone=self.zone,
)
except exceptions.NotFound as e:
# We actually expect to get 404 / Not Found here as the Instance Group Manager should
# not yet exist
if e.code != 404:
raise e
else:
self.log.info("The %s Instance Group Manager already exists", existing_instance_group_manager)
ComputeInstanceGroupManagerDetailsLink.persist(
context=context,
project_id=self.project_id or hook.project_id,
)
return InstanceGroupManager.to_dict(existing_instance_group_manager)
self._field_sanitizer.sanitize(self.body)
self.log.info("Creating Instance Group Manager with specified body: %s", self.body)
hook.insert_instance_group_manager(
body=self.body,
request_id=self.request_id,
project_id=self.project_id,
zone=self.zone,
)
self.log.info("The specified Instance Group Manager has been created SUCCESSFULLY", self.body)
new_instance_group_manager = hook.get_instance_group_manager(
resource_id=self.resource_id,
project_id=self.project_id,
zone=self.zone,
)
ComputeInstanceGroupManagerDetailsLink.persist(
context=context,
project_id=self.project_id or hook.project_id,
)
return InstanceGroupManager.to_dict(new_instance_group_manager)
| ComputeEngineInsertInstanceGroupManagerOperator |
python | sympy__sympy | sympy/printing/llvmjitcode.py | {
"start": 10958,
"end": 12423
} | class ____(LLVMJitCode):
def __init__(self, signature):
super().__init__(signature)
def _create_param_dict(self, func_args):
for i, a in enumerate(func_args):
if isinstance(a, IndexedBase):
self.param_dict[a] = (self.fn.args[i], i)
self.fn.args[i].name = str(a)
else:
self.param_dict[a] = (self.fn.args[self.signature.input_arg],
i)
def _create_function(self, expr):
"""Create function body and return LLVM IR"""
bb_entry = self.fn.append_basic_block('entry')
builder = ll.IRBuilder(bb_entry)
lj = LLVMJitCallbackPrinter(self.module, builder, self.fn,
func_arg_map=self.param_dict)
ret = self._convert_expr(lj, expr)
if self.signature.ret_arg:
output_fp_ptr = builder.bitcast(self.fn.args[self.signature.ret_arg],
ll.PointerType(self.fp_type))
for i, val in enumerate(ret):
index = ll.Constant(ll.IntType(32), i)
output_array_ptr = builder.gep(output_fp_ptr, [index])
builder.store(val, output_array_ptr)
builder.ret(ll.Constant(ll.IntType(32), 0)) # return success
else:
lj.builder.ret(self._wrap_return(lj, ret))
strmod = str(self.module)
return strmod
| LLVMJitCodeCallback |
python | doocs__leetcode | solution/2000-2099/2031.Count Subarrays With More Ones Than Zeros/Solution2.py | {
"start": 0,
"end": 308
} | class ____:
def subarraysWithMoreZerosThanOnes(self, nums: List[int]) -> int:
sl = SortedList([0])
mod = 10**9 + 7
ans = s = 0
for x in nums:
s += x or -1
ans += sl.bisect_left(s)
ans %= mod
sl.add(s)
return ans
| Solution |
python | walkccc__LeetCode | solutions/406. Queue Reconstruction by Height/406.py | {
"start": 0,
"end": 228
} | class ____:
def reconstructQueue(self, people: list[list[int]]) -> list[list[int]]:
ans = []
people.sort(key=lambda x: (-x[0], x[1]))
for person in people:
ans.insert(person[1], person)
return ans
| Solution |
python | lepture__authlib | authlib/oidc/core/errors.py | {
"start": 514,
"end": 953
} | class ____(OAuth2Error):
"""The Authorization Server requires End-User authentication. This error
MAY be returned when the prompt parameter value in the Authentication
Request is none, but the Authentication Request cannot be completed
without displaying a user interface for End-User authentication.
http://openid.net/specs/openid-connect-core-1_0.html#AuthError
"""
error = "login_required"
| LoginRequiredError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 917428,
"end": 920096
} | class ____(sgqlc.types.Type):
"""A ref update rules for a viewer."""
__schema__ = github_schema
__field_names__ = (
"allows_deletions",
"allows_force_pushes",
"blocks_creations",
"pattern",
"required_approving_review_count",
"required_status_check_contexts",
"requires_code_owner_reviews",
"requires_conversation_resolution",
"requires_linear_history",
"requires_signatures",
"viewer_allowed_to_dismiss_reviews",
"viewer_can_push",
)
allows_deletions = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="allowsDeletions")
"""Can this branch be deleted."""
allows_force_pushes = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="allowsForcePushes")
"""Are force pushes allowed on this branch."""
blocks_creations = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="blocksCreations")
"""Can matching branches be created."""
pattern = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="pattern")
"""Identifies the protection rule pattern."""
required_approving_review_count = sgqlc.types.Field(Int, graphql_name="requiredApprovingReviewCount")
"""Number of approving reviews required to update matching branches."""
required_status_check_contexts = sgqlc.types.Field(sgqlc.types.list_of(String), graphql_name="requiredStatusCheckContexts")
"""List of required status check contexts that must pass for commits
to be accepted to matching branches.
"""
requires_code_owner_reviews = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="requiresCodeOwnerReviews")
"""Are reviews from code owners required to update matching branches."""
requires_conversation_resolution = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="requiresConversationResolution")
"""Are conversations required to be resolved before merging."""
requires_linear_history = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="requiresLinearHistory")
"""Are merge commits prohibited from being pushed to this branch."""
requires_signatures = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="requiresSignatures")
"""Are commits required to be signed."""
viewer_allowed_to_dismiss_reviews = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerAllowedToDismissReviews")
"""Is the viewer allowed to dismiss reviews."""
viewer_can_push = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanPush")
"""Can the viewer push to the branch"""
| RefUpdateRule |
python | ray-project__ray | python/ray/data/aggregate.py | {
"start": 42380,
"end": 45474
} | class ____(AggregateFnV2[List[int], float]):
"""Calculates the percentage of null values in a column.
This aggregation computes the percentage of null (missing) values in a dataset column.
It treats both None values and NaN values as null. The result is a percentage value
between 0.0 and 100.0, where 0.0 means no missing values and 100.0 means all values
are missing.
Example:
.. testcode::
import ray
from ray.data.aggregate import MissingValuePercentage
# Create a dataset with some missing values
ds = ray.data.from_items([
{"value": 1}, {"value": None}, {"value": 3},
{"value": None}, {"value": 5}
])
# Calculate missing value percentage
result = ds.aggregate(MissingValuePercentage(on="value"))
# result: 40.0 (2 out of 5 values are missing)
# Using with groupby
ds = ray.data.from_items([
{"group": "A", "value": 1}, {"group": "A", "value": None},
{"group": "B", "value": 3}, {"group": "B", "value": None}
])
result = ds.groupby("group").aggregate(MissingValuePercentage(on="value")).take_all()
# result: [{'group': 'A', 'missing_pct(value)': 50.0},
# {'group': 'B', 'missing_pct(value)': 50.0}]
Args:
on: The name of the column to calculate missing value percentage on.
alias_name: Optional name for the resulting column. If not provided,
defaults to "missing_pct({column_name})".
"""
def __init__(
self,
on: str,
alias_name: Optional[str] = None,
):
# Initialize with a list accumulator [null_count, total_count]
super().__init__(
alias_name if alias_name else f"missing_pct({str(on)})",
on=on,
ignore_nulls=False, # Include nulls for this calculation
zero_factory=lambda: [0, 0], # Our AggType is a simple list
)
def aggregate_block(self, block: Block) -> List[int]:
column_accessor = BlockColumnAccessor.for_column(block[self._target_col_name])
total_count = column_accessor.count(ignore_nulls=False)
null_count = pc.sum(
pc.is_null(column_accessor._as_arrow_compatible(), nan_is_null=True)
).as_py()
# Return our accumulator
return [null_count, total_count]
def combine(self, current_accumulator: List[int], new: List[int]) -> List[int]:
# Merge two accumulators by summing their components
assert len(current_accumulator) == len(new) == 2
return [
current_accumulator[0] + new[0], # Sum null counts
current_accumulator[1] + new[1], # Sum total counts
]
def finalize(self, accumulator: List[int]) -> Optional[float]:
# Calculate the final percentage
if accumulator[1] == 0:
return None
return (accumulator[0] / accumulator[1]) * 100.0
@PublicAPI(stability="alpha")
| MissingValuePercentage |
python | tensorflow__tensorflow | tensorflow/python/ops/parallel_for/pfor.py | {
"start": 9195,
"end": 34226
} | class ____:
"""Object for storing state for converting the outputs of a while_loop."""
def __init__(
self,
exit_node: tensor_lib.Tensor,
pfor_ops: List[ops.Operation],
fallback_to_while_loop: bool,
pfor_config: "PForConfig",
):
"""Initializer.
Args:
exit_node: A tensor output from the while_loop.
pfor_ops: list of ops inside the current pfor loop.
fallback_to_while_loop: If True, fallback to while loop when conversion of
an op is not supported
pfor_config: PForConfig object used while constructing loop body.
"""
self._fallback_to_while_loop = fallback_to_while_loop
self._pfor_config = pfor_config
self._pfor_ops = set(pfor_ops)
self._pfor_op_ids = set(x._id for x in pfor_ops)
assert isinstance(exit_node, tensor_lib.Tensor)
self._while_context = exit_node.op._get_control_flow_context()
assert isinstance(self._while_context, control_flow_ops.WhileContext)
self._context_name = self._while_context.name
self._condition = self._while_context.pivot.op.inputs[0]
# Parts of an external while_loop could be created inside a pfor loop.
# However for the purpose here, we declare such loops to be external. Also
# note that we check if the condition was created inside or outside to
# determine if the while_loop was first created inside or outside.
# TODO(agarwal): check that the Enter and Exit of this loop are unstacked.
self._is_inside_loop = self.op_is_inside_loop(self._condition.op)
if self._is_inside_loop:
for e in self._while_context.loop_exits:
assert self.op_is_inside_loop(e.op)
# Note the code below tries to reverse engineer an existing while_loop graph
# by assuming the following pattern of nodes.
#
# NextIteration <---- Body <--- Enter
# | ^
# V ___| Y
# Enter -> Merge -> Switch___
# ^ | N
# | V
# LoopCond Exit
# Node that elements in the list below correspond one-to-one with each
# other. i.e. these lists are the same size, and the i_th entry corresponds
# to different Operations/Tensors of a single cycle as illustrated above.
# List of Switch ops (ops.Operation) that feed into an Exit Node.
self._exit_switches = []
# List of inputs (tensor_lib.Tensor) to NextIteration.
self._body_outputs = []
# List of list of control inputs of the NextIteration nodes.
self._next_iter_control_inputs = []
# List of Merge ops (ops.Operation).
self._enter_merges = []
# List of output (tensor_lib.Tensor) of Exit nodes.
self._outputs = []
# List of Enter Tensors.
# There are two types of Enter nodes:
# - The Enter nodes that are used in the `loop_vars` argument to
# `while_loop` (see
# https://www.tensorflow.org/api_docs/python/tf/while_loop). We collect
# these Enter nodes immediately below by tracing backwards from the Exit
# nodes via Exit <- Switch <- Merge <- Enter. You can see this chain in the
# diagram above. This allows us to have a 1:1 correspondence between the
# self._outputs and the first elements in self._enters.
# - The Enter nodes that are used only by the body. They don't appear in the
# `loop_vars` and are not returned from the `while_loop`. In Python code,
# they are usually captured by the body lambda. We collect them below by
# iterating over all the ops in the graph. They are appended to the end of
# self._enters or self._direct_enters, and don't correspond to any outputs
# in self._outputs. Note that we keep the resource/variant Enter nodes in
# self._direct_enters and the constructed while_loop's body uses them
# directly as opposed to passing them as loop variables. This is done
# because the while_body cannot partition the resource/variant Tensors, so
# it has to leave them unchanged.
self._enters = []
self._direct_enters = []
for e in self._while_context.loop_exits:
self._outputs.append(e.op.outputs[0])
switch = e.op.inputs[0].op
assert switch.type == "Switch", switch
self._exit_switches.append(switch)
merge = switch.inputs[0].op
assert merge.type == "Merge", merge
self._enter_merges.append(merge)
enter = merge.inputs[0].op
assert enter.type == "Enter", enter
self._enters.append(enter.outputs[0])
next_iter = merge.inputs[1].op
assert next_iter.type == "NextIteration", next_iter
self._body_outputs.append(next_iter.inputs[0])
self._next_iter_control_inputs.append(next_iter.control_inputs)
# Collect all the Enter nodes that are not part of `loop_vars`, the second
# category described above.
# Also track whether the loop body has any stateful ops.
self._is_stateful = False
for op in ops.get_default_graph().get_operations():
# TODO(agarwal): make sure this works with nested case.
control_flow_context = op._get_control_flow_context()
if control_flow_context is None:
continue
if control_flow_context.name == self._context_name:
self._is_stateful |= _is_stateful_pfor_op(op)
if op.type == "Enter":
output = op.outputs[0]
if output not in self._enters:
if output.dtype in (dtypes.resource, dtypes.variant):
if output not in self._direct_enters:
self._direct_enters.append(output)
else:
self._enters.append(output)
def __str__(self) -> str:
"""String representation."""
return "while_loop(%s)" % self.name
@property
def inputs(self):
"""Input to all the Enter nodes."""
return [x.op.inputs[0] for x in self._enters + self._direct_enters]
@property
def control_inputs(self):
"""Control input to all the Enter nodes."""
control_inputs = []
for x in self._enters + self._direct_enters:
control_inputs.extend(x.op.control_inputs)
return control_inputs
@property
def outputs(self) -> List[tensor_lib.Tensor]:
"""Outputs of all the Exit nodes."""
return self._outputs
@property
def name(self) -> str:
"""Context name for the while loop."""
return self._context_name
@property
def is_inside_loop(self) -> bool:
"""Returns true if the while_loop was created inside the pfor."""
return self._is_inside_loop
def op_is_inside_loop(self, op: ops.Operation) -> bool:
"""True if op was created inside the pfor loop body."""
assert isinstance(op, ops.Operation)
# Note that we use self._pfor_op_ids for the check and not self._pfor_ops
# since it appears there tensorflow API could return different python
# objects representing the same Operation node.
return op._id in self._pfor_op_ids
@property
def is_stateful(self) -> bool:
return self._is_stateful
@property
def pfor_converter(self) -> "WhileOp":
"""Return a converter for the while loop."""
return self
def _init_pfor(self, parent_pfor, indices, cond_stacked, inputs,
inputs_stacked):
"""Create a PFor object for converting parts of the while_loop.
Args:
parent_pfor: PFor object being used for converting the while_loop.
indices: int32 Tensor of ids for the iterations that are still active
(i.e. did not exit the while_loop).
cond_stacked: True if the while_loop condition is stacked.
inputs: list of input Tensors corresponding 1-to-1 with self._enters. Note
that these Tensors are a subset of the loop variables for the generated
while_loop.
inputs_stacked: List of booleans corresponding 1-to-1 with `inputs`,
indicating if the value is stacked or not.
Returns:
A PFor instance. The instance is initialized by adding conversion mappings
of nodes that will be external to the conversion that the returned
instance will be used for. e.g. Enter nodes as well as Merge and Switch
outputs are mapped to converted values.
"""
num_outputs = len(self._outputs)
assert len(inputs) == len(self._enters)
assert len(inputs_stacked) == len(self._enters)
loop_var = parent_pfor.loop_var
loop_len = array_ops.size(indices)
pfor = PFor(
loop_var,
loop_len,
pfor_ops=self._pfor_ops,
all_indices=indices,
all_indices_partitioned=cond_stacked,
fallback_to_while_loop=self._fallback_to_while_loop,
pfor_config=self._pfor_config)
# Map all inputs of Enter nodes in self._direct_enters to their converted
# values.
for enter in self._direct_enters:
enter_input = enter.op.inputs[0]
converted_enter, stacked, is_sparse_stacked = parent_pfor._convert_helper(
enter_input)
# Since these are resources / variants, they should be unstacked.
assert not stacked and not is_sparse_stacked, (enter, converted_enter)
pfor._add_conversion(enter, wrap(converted_enter, False))
# Map all Enter nodes to the inputs.
for enter, inp, stacked in zip(self._enters, inputs, inputs_stacked):
pfor._add_conversion(enter, wrap(inp, stacked))
# Map outputs of Switch and Merge.
for i in range(num_outputs):
wrapped_inp = wrap(inputs[i], inputs_stacked[i])
merge = self._enter_merges[i]
pfor._add_conversion(merge.outputs[0], wrapped_inp)
# Note that second output of Merge is typically not used, except possibly
# as a control dependency. To avoid trying to output the correct value, we
# employ a hack here. We output a dummy invalid value with an incorrect
# dtype. This will allow control dependency to work but if using it as an
# input, it should typically lead to errors during graph construction due
# to dtype mismatch.
# TODO(agarwal): Check in the original graph to see if there are any
# consumers of this Tensor that use it as an input.
pfor._add_conversion(merge.outputs[1],
wrap(constant_op.constant(-1.0), False))
switch = self._exit_switches[i]
# Don't need to worry about switch.output[0] which will feed to Exit node.
pfor._add_conversion(switch.outputs[1], wrapped_inp)
return pfor
def _convert_enter(self, parent_pfor: "PFor", enter):
"""Converts an Enter node."""
inp, stacked, _ = parent_pfor._convert_helper(enter.op.inputs[0])
control_inputs = []
for x in enter.op.control_inputs:
converted = parent_pfor._convert_helper(x)
if not isinstance(converted, ops.Operation):
converted = converted.t
control_inputs.append(converted)
if control_inputs:
with ops.control_dependencies(control_inputs):
inp = array_ops.identity(inp)
return inp, stacked
def _maybe_stacked(self, cache, inp):
"""Heuristic to figure out if the converting inp leads to a stacked value.
Args:
cache: map from Tensor to boolean indicating stacked/unstacked.
inp: input Tensor.
Returns:
True if `inp` could get stacked. If the function returns False, the
converted value should be guaranteed to be unstacked. If returning True,
it may or may not be stacked.
"""
if inp in cache:
return cache[inp]
if not self.op_is_inside_loop(inp.op):
return False
op = inp.op
output = False
if op.type in [
"OnesLike",
"Shape",
"Rank",
"ShapeN",
"ZerosLike",
"TensorArrayV3",
"TensorArraySizeV3",
]:
output = False
elif _is_stateful_pfor_op(op):
# This may be fairly aggressive.
output = True
elif op.type == "Exit":
# This may be fairly aggressive.
output = True
else:
for t in op.inputs:
if self._maybe_stacked(cache, t):
output = True
break
cache[inp] = output
return output
def _create_init_values(self, pfor_input: "_PforInput"):
"""Create arguments passed to converted while_loop."""
with ops.name_scope("while_init"):
loop_len_vector = pfor_input.pfor.loop_len_vector
loop_len = loop_len_vector[0]
num_outputs = len(self._outputs)
inputs = []
maybe_stacked_cache = {}
# Convert all the Enters. Need to do this before checking for stacking
# below.
for i, enter in enumerate(self._enters):
inp, stacked = self._convert_enter(pfor_input.pfor, enter)
inputs.append(inp)
maybe_stacked_cache[enter] = stacked
# Since this enter node is part of the `loop_vars`, it corresponds to an
# output and its preceding switch. We mark this switch's output the same
# stackness, to act at the base case for the logic below. Below, we will
# be going through the body figuring out which inputs might need to be
# stacked and which inputs can safely remain unstacked.
if i < num_outputs:
maybe_stacked_cache[self._exit_switches[i].outputs[1]] = stacked
# Shape invariants for init_values corresponding to self._enters.
input_shape_invariants = []
# TensorArrays for outputs of converted while loop
output_tas = []
# Shape invariants for output TensorArrays.
ta_shape_invariants = []
# List of booleans indicating stackness of inputs, i.e. tensors
# corresponding to self._enters.
inputs_stacked = []
for i, inp in enumerate(inputs):
enter = self._enters[i]
inp_stacked = self._maybe_stacked(maybe_stacked_cache, enter)
# Note that even when an input is unstacked, the body could make it
# stacked. we use a heuristic below to figure out if body may be making
# it stacked.
if i < num_outputs:
body_output = self._body_outputs[i]
if enter.op in self._pfor_ops:
body_output_stacked = self._maybe_stacked(maybe_stacked_cache,
body_output)
else:
# If constructed outside of pfor loop, then the output would not be
# stacked.
body_output_stacked = False
if body_output_stacked and not inp_stacked:
inp = _stack(inp, loop_len_vector).t
inputs[i] = inp
inp_stacked = True
# TODO(agarwal): other attributes for the TensorArray ?
output_tas.append(tensor_array_ops.TensorArray(inp.dtype, loop_len))
ta_shape_invariants.append(tensor_shape.TensorShape(None))
inputs_stacked.append(inp_stacked)
input_shape_invariants.append(tensor_shape.TensorShape(None))
# See documentation for __call__ for the structure of init_values.
init_values = [True, pfor_input.pfor.all_indices] + inputs + output_tas
# TODO(agarwal): try stricter shape invariants
shape_invariants = (
[tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None)] + input_shape_invariants +
ta_shape_invariants)
return init_values, inputs_stacked, shape_invariants
def _process_cond_unstacked(self, conditions, indices, inputs, output_tas):
"""Handles case when condition is unstacked.
Note that all iterations end together. So we don't need to partition the
inputs. When all iterations are done, we write the inputs to the
TensorArrays. Note that we only write to index 0 of output_tas. Since all
iterations end together, they can all be output together.
"""
not_all_done = array_ops.reshape(conditions, [])
new_output_tas = []
# pylint: disable=cell-var-from-loop
for i, out_ta in enumerate(output_tas):
inp = inputs[i]
new_output_tas.append(
tf_cond.cond(not_all_done, lambda: out_ta,
lambda: out_ta.write(0, inp)))
# pylint: enable=cell-var-from-loop
return not_all_done, indices, inputs, new_output_tas
def _process_cond_stacked(self, conditions, indices, inputs, inputs_stacked,
output_tas):
num_outputs = len(self._outputs)
# Compute if all iterations are done.
not_all_done = math_ops.reduce_any(conditions)
conditions_int = math_ops.cast(conditions, dtypes.int32)
# Partition the indices.
done_indices, new_indices = data_flow_ops.dynamic_partition(
indices, conditions_int, 2)
new_inputs = []
new_output_tas = []
for i, (inp, stacked) in enumerate(zip(inputs, inputs_stacked)):
# Partition the inputs.
if stacked:
done_inp, new_inp = data_flow_ops.dynamic_partition(
inp, conditions_int, 2)
else:
# TODO(agarwal): avoid this stacking. See TODO earlier in
# _process_cond_unstacked.
done_inp = _stack(inp, [array_ops.size(done_indices)]).t
new_inp = inp
new_inputs.append(new_inp)
# For iterations that are done, write them to TensorArrays.
if i < num_outputs:
out_ta = output_tas[i]
# Note that done_indices can be empty. done_inp should also be empty in
# that case.
new_output_tas.append(out_ta.scatter(done_indices, done_inp))
return not_all_done, new_indices, new_inputs, new_output_tas
def _process_body(
self,
pfor_input: "_PforInput",
inputs_stacked,
new_indices,
cond_stacked,
new_inputs,
not_all_done,
):
"""Convert the body function."""
def true_fn(control_inputs, body_pfor, body_output, stacked):
"""Converts the body function for all but last iteration.
This essentially converts body_output. Additionally, it needs to handle
any control dependencies on the NextIteration node. So it creates another
Identity node with the converted dependencies.
"""
converted_control_inp = []
for x in control_inputs:
for t in x.outputs:
converted_control_inp.append(body_pfor._convert_helper(t).t)
if stacked:
# Note convert always does the stacking.
output = body_pfor.convert(body_output)
else:
output, convert_stacked, _ = body_pfor._convert_helper(body_output)
assert convert_stacked == stacked, body_output
with ops.control_dependencies(converted_control_inp):
return array_ops.identity(output)
body_pfor = self._init_pfor(pfor_input.pfor, new_indices, cond_stacked,
new_inputs, inputs_stacked)
new_outputs = []
for i, (body_output,
stacked) in enumerate(zip(self._body_outputs, inputs_stacked)):
control_inp = self._next_iter_control_inputs[i]
out_dtype = body_output.dtype
# Note that we want to run the body only if not all pfor iterations are
# done. If all are done, we return empty tensors since these values will
# not be used. Notice that the value returned by the loop is based on
# TensorArrays and not directly on these returned values.
# pylint: disable=cell-var-from-loop
new_output = tf_cond.cond(
not_all_done,
lambda: true_fn(control_inp, body_pfor, body_output, stacked),
lambda: constant_op.constant([], dtype=out_dtype))
# pylint: enable=cell-var-from-loop
new_outputs.append(new_output)
return new_outputs
def __call__(self, pfor_input: "_PforInput"):
"""Converter for the while_loop.
The conversion of a while_loop is another while_loop.
The arguments to this converted while_loop are as follows:
not_all_done: Boolean scalar Tensor indicating if all the pfor iterations
are done.
indices: int32 1-D Tensor storing the id of the iterations that are not
done.
args: Remaining arguments. These can be divided into 3 categories:
- First set of arguments are the tensors that correspond to the initial
elements of self._enters. The elements that appear in original while
loop's `loop_vars`.
- The second set of arguments are the tensors that correspond to the
remaining elements of self._enters. These are the tensors that directly
enter the original while loop body.
- Finally, the last set of arguments are TensorArrays. These TensorArrays
correspond to the outputs of the original while_loop, i.e. to the
elements in self._outputs. Each TensorArray has `PFor.loop_len`
elements, i.e. the number of pfor iterations. At the end, the i'th
element of each TensorArray will contain the output computed by the
i'th iteration of pfor. Note that elements can be written into these
tensors arrays in any order, depending on when the corresponding pfor
iteration is done.
If the original while_loop had `k` tensors in its `loop_vars` and its body
directly captured `m` tensors, the `args` will contain `2 * k + m` values.
In each iteration, the while_loop body recomputes the condition for all
active pfor iterations to see which of them are now done. It then partitions
all the inputs and passes them along to the converted body. Values for all
the iterations that are done are written to TensorArrays indexed by the pfor
iteration number. When all iterations are done, the TensorArrays are stacked
to get the final value.
Args:
pfor_input: A PForInput object corresponding to the output of any Exit
node from this while loop.
Returns:
List of converted outputs.
"""
# Create init_values that will be passed to the while_loop.
init_values, inputs_stacked, shape_invariants = self._create_init_values(
pfor_input)
# Note that we use a list as a hack since we need the nested function body
# to set the value of cond_is_stacked. python2.x doesn't support nonlocal
# variables.
cond_is_stacked = [None]
def cond(not_all_done, *_):
return not_all_done
def body(not_all_done, indices, *args):
# See documentation for __call__ for the structure of *args.
num_enters = len(self._enters)
inputs = args[:num_enters]
output_tas = args[num_enters:]
# TODO(agarwal): see which outputs have consumers and only populate the
# TensorArrays corresponding to those. Or do those paths get trimmed out
# from inside the while_loop body?
assert len(inputs) >= len(output_tas)
assert len(inputs) == len(inputs_stacked)
# Convert condition
with ops.name_scope("while_cond"):
# Note that we set cond_stacked to True here. At this point we don't
# know if it could be loop invariant, hence the conservative value is
# to assume stacked.
cond_pfor = self._init_pfor(
pfor_input.pfor,
indices,
cond_stacked=True,
inputs=inputs,
inputs_stacked=inputs_stacked)
conditions, cond_stacked, _ = cond_pfor._convert_helper(self._condition)
cond_is_stacked[0] = cond_stacked
# Recompute the new condition, write outputs of done iterations, and
# partition the inputs if needed.
if not cond_stacked:
(not_all_done, new_indices, new_inputs,
new_output_tas) = self._process_cond_unstacked(conditions, indices,
inputs, output_tas)
else:
(not_all_done, new_indices, new_inputs,
new_output_tas) = self._process_cond_stacked(conditions, indices,
inputs, inputs_stacked,
output_tas)
# Convert body
with ops.name_scope("while_body"):
# Compute the outputs from the body.
new_outputs = self._process_body(pfor_input, inputs_stacked,
new_indices, cond_stacked, new_inputs,
not_all_done)
# Note that the first num_outputs new values of inputs are computed using
# the body. Rest of them were direct Enters into the condition/body and
# the partitioning done earlier is sufficient to give the new value.
num_outputs = len(self._outputs)
new_args = ([not_all_done, new_indices] + new_outputs +
list(new_inputs[num_outputs:]) + new_output_tas)
return tuple(new_args)
while_outputs = while_loop.while_loop(
cond, body, init_values, shape_invariants=shape_invariants)
output_tas = while_outputs[-len(self._outputs):]
outputs = []
assert cond_is_stacked[0] is not None
for inp_stacked, ta in zip(inputs_stacked, output_tas):
if cond_is_stacked[0]:
outputs.append(wrap(ta.stack(), True))
else:
# Note that if while_loop condition is unstacked, all iterations exit at
# the same time and we wrote those outputs in index 0 of the tensor
# array.
outputs.append(wrap(ta.read(0), inp_stacked))
return outputs
| WhileOp |
python | getsentry__sentry | src/sentry/taskworker/client/processing_result.py | {
"start": 121,
"end": 308
} | class ____:
"""Result structure from child processess to parent"""
task_id: str
status: TaskActivationStatus.ValueType
host: str
receive_timestamp: float
| ProcessingResult |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/mlengine.py | {
"start": 1705,
"end": 1921
} | class ____(BaseGoogleLink):
"""Helper class for constructing ML Engine link."""
name = "MLEngine Models List"
key = "ml_engine_models_list"
format_str = MLENGINE_MODELS_LIST_LINK
| MLEngineModelsListLink |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.