language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
urllib3__urllib3
src/urllib3/exceptions.py
{ "start": 1931, "end": 2161 }
class ____(HTTPError): """Raised when something unexpected happens mid-request/response.""" #: Renamed to ProtocolError but aliased for backwards compatibility. ConnectionError = ProtocolError # Leaf Exceptions
ProtocolError
python
dagster-io__dagster
python_modules/libraries/dagster-sling/dagster_sling/sling_event_iterator.py
{ "start": 6042, "end": 8460 }
class ____(Iterator[T]): """A wrapper around an iterator of Sling events which contains additional methods for post-processing the events, such as fetching column metadata. """ def __init__( self, events: Iterator[T], sling_cli: "SlingResource", replication_config: dict[str, Any], context: Union[OpExecutionContext, AssetExecutionContext], ) -> None: self._inner_iterator = events self._sling_cli = sling_cli self._replication_config = replication_config self._context = context def __next__(self) -> T: return next(self._inner_iterator) def __iter__(self) -> "SlingEventIterator[T]": return self @public def fetch_column_metadata(self) -> "SlingEventIterator": """Fetches column metadata for each table synced by the Sling CLI. Retrieves the column schema and lineage for each target table. Returns: SlingEventIterator: An iterator of Dagster events with column metadata attached. """ def _fetch_column_metadata() -> Iterator[T]: for event in self: col_metadata = fetch_column_metadata( event, self._sling_cli, self._replication_config, self._context ) if event.metadata: yield event._replace(metadata={**col_metadata, **event.metadata}) return SlingEventIterator[T]( _fetch_column_metadata(), self._sling_cli, self._replication_config, self._context ) @public def fetch_row_count(self) -> "SlingEventIterator": """Fetches row count metadata for each table synced by the Sling CLI. Retrieves the row count for each target table. Returns: SlingEventIterator: An iterator of Dagster events with row count metadata attached. """ def _fetch_row_count() -> Iterator[T]: for event in self: row_count_metadata = fetch_row_count_metadata( event, self._sling_cli, self._replication_config, self._context ) if event.metadata: yield event._replace(metadata={**row_count_metadata, **event.metadata}) return SlingEventIterator[T]( _fetch_row_count(), self._sling_cli, self._replication_config, self._context )
SlingEventIterator
python
kubernetes-client__python
kubernetes/client/models/apiextensions_v1_service_reference.py
{ "start": 383, "end": 6646 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'name': 'str', 'namespace': 'str', 'path': 'str', 'port': 'int' } attribute_map = { 'name': 'name', 'namespace': 'namespace', 'path': 'path', 'port': 'port' } def __init__(self, name=None, namespace=None, path=None, port=None, local_vars_configuration=None): # noqa: E501 """ApiextensionsV1ServiceReference - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._name = None self._namespace = None self._path = None self._port = None self.discriminator = None self.name = name self.namespace = namespace if path is not None: self.path = path if port is not None: self.port = port @property def name(self): """Gets the name of this ApiextensionsV1ServiceReference. # noqa: E501 name is the name of the service. Required # noqa: E501 :return: The name of this ApiextensionsV1ServiceReference. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this ApiextensionsV1ServiceReference. name is the name of the service. Required # noqa: E501 :param name: The name of this ApiextensionsV1ServiceReference. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501 raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501 self._name = name @property def namespace(self): """Gets the namespace of this ApiextensionsV1ServiceReference. # noqa: E501 namespace is the namespace of the service. Required # noqa: E501 :return: The namespace of this ApiextensionsV1ServiceReference. # noqa: E501 :rtype: str """ return self._namespace @namespace.setter def namespace(self, namespace): """Sets the namespace of this ApiextensionsV1ServiceReference. namespace is the namespace of the service. Required # noqa: E501 :param namespace: The namespace of this ApiextensionsV1ServiceReference. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and namespace is None: # noqa: E501 raise ValueError("Invalid value for `namespace`, must not be `None`") # noqa: E501 self._namespace = namespace @property def path(self): """Gets the path of this ApiextensionsV1ServiceReference. # noqa: E501 path is an optional URL path at which the webhook will be contacted. # noqa: E501 :return: The path of this ApiextensionsV1ServiceReference. # noqa: E501 :rtype: str """ return self._path @path.setter def path(self, path): """Sets the path of this ApiextensionsV1ServiceReference. path is an optional URL path at which the webhook will be contacted. # noqa: E501 :param path: The path of this ApiextensionsV1ServiceReference. # noqa: E501 :type: str """ self._path = path @property def port(self): """Gets the port of this ApiextensionsV1ServiceReference. # noqa: E501 port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility. # noqa: E501 :return: The port of this ApiextensionsV1ServiceReference. # noqa: E501 :rtype: int """ return self._port @port.setter def port(self, port): """Sets the port of this ApiextensionsV1ServiceReference. port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility. # noqa: E501 :param port: The port of this ApiextensionsV1ServiceReference. # noqa: E501 :type: int """ self._port = port def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ApiextensionsV1ServiceReference): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ApiextensionsV1ServiceReference): return True return self.to_dict() != other.to_dict()
ApiextensionsV1ServiceReference
python
django__django
tests/admin_views/models.py
{ "start": 16576, "end": 16761 }
class ____(Pizza): class Meta: proxy = True # No default permissions are created for this model and both name and toppings # are readonly for this model's admin.
ReadablePizza
python
dagster-io__dagster
docs/sphinx/_ext/sphinx-click/tests/test_formatter.py
{ "start": 23736, "end": 25371 }
class ____(unittest.TestCase): """Validate ``click.CommandCollection`` instances.""" maxDiff = None def test_basics(self): """Validate a ``click.CommandCollection`` with grouped outputs.""" @click.group() def grp1(): """A first group.""" pass @grp1.command() def hello(): """A hello command.""" @click.group() def grp2(): """A second group.""" pass @grp2.command() def world(): """A world command.""" cli = click.CommandCollection( name="cli", sources=[grp1, grp2], help="A simple CommandCollection." ) ctx = click.Context(cli, info_name="cli") output = list(ext._format_command(ctx, nested="full")) # noqa self.assertEqual( textwrap.dedent( """ A simple CommandCollection. .. program:: cli .. code-block:: shell cli [OPTIONS] COMMAND [ARGS]... """ ).lstrip(), "\n".join(output), ) output = list(ext._format_command(ctx, nested="short")) # noqa self.assertEqual( textwrap.dedent( """ A simple CommandCollection. .. program:: cli .. code-block:: shell cli [OPTIONS] COMMAND [ARGS]... .. rubric:: Commands .. object:: hello A hello command. .. object:: world A world command. """ ).lstrip(), "\n".join(output), )
CommandCollectionTestCase
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/tuple8.py
{ "start": 154, "end": 580 }
class ____(tuple[int, str, int, _T]): def __new__(cls) -> Self: ... objA = ClassA[complex]() (a, b, c, d) = objA aa1: int = a bb1: str = b cc1: int = c dd1: complex = d aa2: int = objA[0] bb2: str = objA[1] cc2: int = objA[2] dd2: complex = objA[3] # These should generate errors because # these are not the correct types. aa3: str = a bb3: complex = b cc3: str = c dd3: int = d for aaa in objA: print(aaa)
ClassA
python
pytorch__pytorch
test/test_custom_ops.py
{ "start": 17800, "end": 84762 }
class ____(CustomOpTestCaseBase): test_ns = "_test_custom_op" @requires_compile def test_functionalize_error(self): with torch.library._scoped_library(self.test_ns, "FRAGMENT") as lib: lib.define("foo(Tensor(a!) x) -> Tensor(a!)") def foo(x): return x.sin_() lib.impl("foo", foo, "CompositeExplicitAutograd") foo_op = self.get_op(f"{self.test_ns}::foo") lib.define("bar(Tensor(a) x) -> Tensor(a)") def bar(x): return x.view(-1) lib.impl("bar", bar, "CompositeExplicitAutograd") bar_op = self.get_op(f"{self.test_ns}::bar") msg = r".*We only support functionalizing operators whose outputs do not have alias annotations" x = torch.randn(3) @torch.compile(backend="aot_eager", fullgraph=True) def f(x): return foo_op(x) @torch.compile(backend="aot_eager", fullgraph=True) def g(x): return bar_op(x) with self.assertRaisesRegex(RuntimeError, msg): f(x) with self.assertRaisesRegex(RuntimeError, msg): g(x) def test_invalid_schemas(self): # function schema validation goes through torchgen, so this is just a # basic test. with self.assertRaisesRegex(AssertionError, "Invalid function schema: foo"): custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo", "(") def test_invalid_qualname(self): with self.assertRaisesRegex(ValueError, "overload"): custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo.Tensor", "() -> ()") def test_name_must_match(self): with self.assertRaisesRegex(ValueError, "to have name"): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def baz(x: Tensor) -> Tensor: raise NotImplementedError def test_unsupported_schemas(self): with self.assertRaisesRegex(ValueError, "only supports functional"): custom_ops.custom_op( f"{TestCustomOp.test_ns}::foo", "(Tensor(a!) x) -> Tensor(a)" )(foo) with self.assertRaisesRegex(ValueError, "only supports functional"): custom_ops.custom_op( f"{TestCustomOp.test_ns}::foo", "(Tensor(a) x) -> Tensor(a)" )(foo) with self.assertRaisesRegex(ValueError, "only supports functional"): custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo", "(Tensor x) -> ()")( foo ) with self.assertRaisesRegex(ValueError, "self"): custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo", "(Tensor self) -> ()")( foo ) # Tests for the older custom_op API def test_schema_matches_signature(self): with self.assertRaisesRegex(ValueError, "signature to match"): @custom_op(f"{TestCustomOp.test_ns}::blah", "(Tensor y) -> Tensor") def blah(x): pass with self.assertRaisesRegex(ValueError, "signature to match"): @custom_op( f"{TestCustomOp.test_ns}::blah2", "(Tensor x, *, Tensor y) -> Tensor" ) def blah2(x, y): pass with self.assertRaisesRegex(ValueError, "signature to match"): @custom_op( f"{TestCustomOp.test_ns}::blah3", "(Tensor x, *, Tensor w, Tensor z) -> Tensor", ) def blah3(x, *, y, z): pass with self.assertRaisesRegex(ValueError, "signature to match"): @custom_op( f"{TestCustomOp.test_ns}::blah4", "(Tensor x, *, Tensor z, Tensor y) -> Tensor", ) def blah4(x, *, y, z): pass with self.assertRaisesRegex(ValueError, "not supported"): @custom_op(f"{TestCustomOp.test_ns}::blah5", "(Tensor x) -> Tensor") def blah5(*args): pass with self.assertRaisesRegex(ValueError, "not supported"): @custom_op( f"{TestCustomOp.test_ns}::blah6", "(*, Tensor z, Tensor y) -> Tensor" ) def blah6(**kwargs): pass with self.assertRaisesRegex(ValueError, "default arguments"): @custom_op( f"{TestCustomOp.test_ns}::blah7", "(Tensor x, *, Tensor y) -> Tensor" ) def blah7(x=1, *, y): pass with self.assertRaisesRegex(ValueError, "default arguments"): @custom_op( f"{TestCustomOp.test_ns}::blah8", "(Tensor x, *, Tensor y) -> Tensor" ) def blah8(x, *, y=1): pass # kwonly-arg works @custom_op( f"{TestCustomOp.test_ns}::blah9", "(Tensor x, *, Tensor y) -> Tensor" ) def blah9(x, *, y): pass def test_infer_schema_no_return(self): with self.assertRaisesRegex( ValueError, "No return type annotation was provided. Please add one." ): @torch.library.custom_op("mylib::foo", mutates_args={}) def foo(x: torch.Tensor, y: int): return x * y def test_infer_schema_supported(self): def a(x: Tensor) -> Tensor: return torch.empty([]) self.assertExpectedInline( infer_schema(a, mutates_args=()), """(Tensor x) -> Tensor""" ) def kwonly1(x: Tensor, *, y: int, z: float) -> Tensor: return torch.empty([]) self.assertExpectedInline( infer_schema(kwonly1, mutates_args=()), """(Tensor x, *, SymInt y, float z) -> Tensor""", ) def kwonly2(*, y: Tensor) -> Tensor: return torch.empty([]) self.assertExpectedInline( infer_schema(kwonly2, mutates_args=()), """(*, Tensor y) -> Tensor""" ) def b( x: Tensor, y: int, z: bool, a: float, b: torch.dtype, c: torch.device, d: torch.types.Number, ) -> Tuple[Tensor, int, float, bool]: return torch.empty([]), 1, 0.1, True self.assertExpectedInline( infer_schema(b, mutates_args=()), """(Tensor x, SymInt y, bool z, float a, ScalarType b, Device c, Scalar d) -> (Tensor, SymInt, float, bool)""", ) def c( x: Tensor, y: Sequence[Tensor], z: Optional[Tensor], w: Sequence[Optional[Tensor]], ) -> List[Tensor]: return [torch.empty([])] self.assertExpectedInline( infer_schema(c, mutates_args=()), """(Tensor x, Tensor[] y, Tensor? z, Tensor?[] w) -> Tensor[]""", ) def d(x: Tensor) -> Tuple[List[Tensor], Tensor]: return [torch.empty([])], torch.empty([]) self.assertExpectedInline( infer_schema(d, mutates_args=()), """(Tensor x) -> (Tensor[], Tensor)""" ) def e() -> Tensor: return torch.empty([]) self.assertExpectedInline(infer_schema(e, mutates_args=()), """() -> Tensor""") def f(x: Tensor) -> None: pass self.assertExpectedInline( infer_schema(f, mutates_args=()), """(Tensor x) -> ()""" ) def g( x: Tensor, y: List[Tensor], z: List[Tensor], w: List[Optional[Tensor]] ) -> None: pass self.assertExpectedInline( infer_schema(g, mutates_args=()), """(Tensor x, Tensor[] y, Tensor[] z, Tensor?[] w) -> ()""", ) self.assertExpectedInline( infer_schema(g, mutates_args={"x", "w", "z"}), """(Tensor(a0!) x, Tensor[] y, Tensor(a2!)[] z, Tensor(a3!)?[] w) -> ()""", ) self.assertExpectedInline( infer_schema(g, mutates_args="unknown"), """(Tensor(a0!) x, Tensor(a1!)[] y, Tensor(a2!)[] z, Tensor(a3!)?[] w) -> ()""", ) def h( x: Tensor, a: Optional[int] = None, b: float = 3.14, c: bool = True, d: int = 3, e: str = "foo", f: torch.dtype = torch.float, g: torch.dtype = torch.float32, h: torch.dtype = torch.int, i: torch.device = torch.device("cpu:0"), j: torch.device = "cpu", ) -> None: pass self.assertExpectedInline( infer_schema(h, mutates_args=()), ( """(Tensor x, SymInt? a=None, float b=3.14, bool c=True, SymInt d=3, str e="foo", """ """ScalarType f=float32, ScalarType g=float32, ScalarType h=int32, Device i="cpu:0", Device j="cpu") -> ()""" ), ) def foo_impl(x: torch.Tensor) -> torch.Tensor: return x.sin() schema = torch.library.infer_schema(foo_impl, op_name="myop", mutates_args={}) self.assertExpectedInline(schema, "myop(Tensor x) -> Tensor") # Ensure that a global in this file is properly found & evaluated. def stringy_fn(x: torch.Tensor) -> "MyList[torch.Tensor]": return [torch.randn_like(x)] schema = infer_schema(stringy_fn, mutates_args={}) self.assertExpectedInline(schema, "(Tensor x) -> Tensor[]") # Make sure that substrings are evaluated properly. def substringy_fn( x: torch.Tensor, ) -> list["MyTensor"]: return [torch.randn_like(x)] schema = infer_schema(substringy_fn, mutates_args={}) self.assertExpectedInline(schema, "(Tensor x) -> Tensor[]") def test_infer_schema_unsupported(self): with self.assertRaisesRegex(ValueError, "varargs"): def foo(*args): raise NotImplementedError infer_schema(foo, mutates_args=()) with self.assertRaisesRegex(ValueError, "varkwargs"): def foo(**kwargs): raise NotImplementedError infer_schema(foo, mutates_args=()) with self.assertRaisesRegex(ValueError, "must have a type annotation"): def foo(x): raise NotImplementedError infer_schema(foo, mutates_args=()) with self.assertRaisesRegex(ValueError, "unsupported"): def foo(x: Tensor) -> Tuple[Tensor, ...]: raise NotImplementedError infer_schema(foo, mutates_args=()) with self.assertRaisesRegex(ValueError, "can be mutated"): def foo(x: Tensor, y: int) -> Tensor: raise NotImplementedError infer_schema(foo, mutates_args={"y"}) # Ensure that a global defined in infer_schema's file ISN'T found. with self.assertRaisesRegex( ValueError, r"Unsupported type annotation list\[_TestTensor\]\. It is not a type\.", ): def stringy_bad_type( x: torch.Tensor, ) -> "list[_TestTensor]": return [torch.randn_like(x)] self.assertTrue(hasattr(torch._library.infer_schema, "_TestTensor")) schema = infer_schema(stringy_bad_type, mutates_args={}) def _generate_examples(self, typ): if typ is int: return [17] if typ is float: return [3.14] if typ is bool: return [True] if typ is str: return ["foo"] if typ is torch.dtype: return [torch.float32] if typ is torch.device: return [torch.device("cpu")] if typ == torch.types.Number: return [2.718] if typ is torch.Tensor: return [torch.tensor(3)] if typ == Optional[torch.types.Number]: return [None, 2.718] if typ == OpaqueType: return [make_opaque("moo")] origin = typing.get_origin(typ) if origin is Union: args = typing.get_args(typ) assert len(args) == 2 and (args[0] is type(None) or args[1] is type(None)) elt = args[0] if args[1] is type(None) else args[1] return self._generate_examples(elt) + [None] if origin is list: args = typing.get_args(typ) assert len(args) == 1 elt = args[0] return [ self._generate_examples(elt), self._generate_examples(elt), self._generate_examples(elt), ] if origin is collections.abc.Sequence: args = typing.get_args(typ) assert len(args) == 1 examples = self._generate_examples(args[0]) return list(itertools.product(examples, examples)) + [] raise NotImplementedError( f"testrunner cannot generate instanstance of type {typ}" ) def test_supported_return_types_single_return(self): for typ in torch._library.infer_schema.SUPPORTED_RETURN_TYPES: for example in self._generate_examples(typ): try: @custom_ops.custom_op(f"{self.test_ns}::foo") def foo(x: Tensor) -> typ: raise NotImplementedError @custom_ops.impl(f"{self.test_ns}::foo") def foo_impl(x: Tensor) -> typ: return example op = self.get_op(f"{self.test_ns}::foo") result = op(torch.randn([])) self.assertEqual(result, example, msg=f"{typ} {example}") finally: custom_ops._destroy(f"{self.test_ns}::foo") def test_supported_return_types_multi_return(self): for typ in torch._library.infer_schema.SUPPORTED_RETURN_TYPES: for example in self._generate_examples(typ): try: @custom_ops.custom_op(f"{self.test_ns}::foo") def foo(x: Tensor) -> Tuple[typ, typ]: raise NotImplementedError @custom_ops.impl(f"{self.test_ns}::foo") def foo_impl(x: Tensor) -> Tuple[typ, typ]: return (example, example) op = self.get_op(f"{self.test_ns}::foo") result = op(torch.randn([])) expected = (example, example) self.assertEqual(result, expected, msg=f"{typ} {example}") finally: custom_ops._destroy(f"{self.test_ns}::foo") def test_supported_param_types(self): for typ in torch._library.infer_schema.SUPPORTED_PARAM_TYPES: @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: Tensor, y: typ) -> Tensor: raise NotImplementedError yeet = None @custom_ops.impl(f"{TestCustomOp.test_ns}::foo", device_types=["cpu"]) def foo_cpu(x, y): nonlocal yeet yeet = y return x.clone() try: for example in self._generate_examples(typ): op = self.get_op(f"{self.test_ns}::foo") op(torch.randn([]), example) self.assertEqual(yeet, example, msg=f"{typ} {example}") yeet = None finally: custom_ops._destroy(f"{TestCustomOp.test_ns}::foo") def test_sequences(self): # Sequence[int] gets automagically turned into int[] in the schema. # This test checks that we actually do support arbitrary sequence types. class MySequence(collections.abc.Sequence): def __init__(self) -> None: self._container = [1, 2, 3] def __getitem__(self, idx): return self._container[idx] def __len__(self): return len(self._container) @custom_ops.custom_op(f"{self.test_ns}::foo") def foo(x: torch.Tensor, sizes: Sequence[int]) -> torch.Tensor: raise NotImplementedError called = 0 @custom_ops.impl(f"{self.test_ns}::foo", device_types="cpu") def foo_cpu(x, sizes): nonlocal called called += 1 # Dispatcher will normalize the sequence type into a List self.assertEqual(sizes, [1, 2, 3]) return x.clone() x = torch.randn([]) seq = MySequence() op = self.get_op(f"{self.test_ns}::foo") op(x, seq) self.assertEqual(called, 1) def test_unsupported_param_types(self): # Not comprehensive (it doesn't need to be), just a check that our mechanism works with self.assertRaisesRegex(ValueError, "unsupported type"): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: Tensor, y: List[Optional[int]]) -> Tensor: raise NotImplementedError del foo with self.assertRaisesRegex(ValueError, "unsupported type"): # int[N] in Dispatcher is a bit wild, so we don't try to support it. @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: Tensor, y: Tuple[int, int]) -> Tensor: raise NotImplementedError del foo with self.assertRaisesRegex(ValueError, r"For example, list\[int\]"): # test that we propose a correct and supported type. @torch.library.custom_op(f"{TestCustomOp.test_ns}::foo", mutates_args={}) def foo(x: Tensor, y: Tuple[int, int]) -> Tensor: raise NotImplementedError del foo with self.assertRaises(ValueError) as cm: @torch.library.custom_op(f"{TestCustomOp.test_ns}::foo", mutates_args={}) def foo(x: Tensor, y: Tuple[int, float]) -> Tensor: raise NotImplementedError del foo self.assertNotIn("example", str(cm.exception), "") with self.assertRaisesRegex(ValueError, "unsupported type"): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: Tensor, y: Callable) -> Tensor: raise NotImplementedError del foo # Define a named tuple for a Point with x and y coordinates Point = collections.namedtuple("Point", ["x", "y"]) with self.assertRaisesRegex(ValueError, "unsupported type"): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: Tensor, y: Point) -> Tensor: raise NotImplementedError del foo def test_supported_schemas(self): # All of these should already be tested by PyTorch codegen # (we share the same mechanism), but here's a sanity check. schemas = [ "(Tensor x) -> Tensor", "(Tensor x) -> Tensor y", "(Tensor[] x) -> Tensor y", "(Tensor x) -> (Tensor, Tensor)", "(Tensor x) -> (Tensor y, Tensor z)", "(Tensor x) -> (Tensor y, Tensor z)", ] other_schemas = [ "(Tensor x, Tensor w) -> (Tensor y, Tensor z)", "(Tensor x, Tensor w) -> (Tensor, Tensor)", "(Tensor x, Tensor w) -> Tensor", "(Tensor? x, Tensor w) -> Tensor", "(Tensor? x, Tensor[] w) -> Tensor", "(Tensor x, int[] w) -> Tensor", "(Tensor x, SymInt[] w) -> Tensor", "(Tensor x, Scalar w) -> Tensor", "(Tensor x, float w) -> Tensor", "(Tensor x, float? w) -> Tensor", "(Tensor x, bool[] w) -> Tensor", ] for schema in schemas: custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo", schema) custom_ops._destroy(f"{TestCustomOp.test_ns}::foo") for schema in other_schemas: custom_ops.custom_op(f"{TestCustomOp.test_ns}::bar", schema) custom_ops._destroy(f"{TestCustomOp.test_ns}::bar") def test_reserved_ns(self): from torch._custom_op.impl import RESERVED_NS for ns in RESERVED_NS: with self.assertRaisesRegex(ValueError, "is a reserved namespace"): custom_ops.custom_op(f"{ns}::foo", "(Tensor x) -> Tensor") with self.assertRaisesRegex(ValueError, "is a reserved namespace"): @custom_ops.custom_op(f"{ns}::foo2") def foo2(x: torch.Tensor) -> torch.Tensor: raise NotImplementedError def test_private_ctor(self): with self.assertRaisesRegex(RuntimeError, "CustomOp constructor is private"): CustomOp(None, None, None, None, None) def test_lifetime(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: raise NotImplementedError custom_op = torch._custom_op.impl.get_op(f"{TestCustomOp.test_ns}::foo") # We can't define an op multiple times, with self.assertRaisesRegex(RuntimeError, "multiple times"): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: # noqa: F811 raise NotImplementedError # Unless we delete the original op. custom_ops._destroy(f"{TestCustomOp.test_ns}::foo") # Smoke test @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: # noqa: F811 raise NotImplementedError custom_ops._destroy(f"{TestCustomOp.test_ns}::foo") def test_autograd_notimplemented(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: # noqa: F811 raise NotImplementedError x = torch.randn(3, requires_grad=True) op = self.get_op(f"{self.test_ns}::foo") with self.assertRaisesRegex(RuntimeError, "Autograd has not been implemented"): op(x) custom_ops._destroy(f"{TestCustomOp.test_ns}::foo") del foo @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: Sequence[torch.Tensor]) -> torch.Tensor: raise NotImplementedError x = torch.randn(3, requires_grad=True) y = torch.randn(3) op = self.get_op(f"{self.test_ns}::foo") with self.assertRaisesRegex(RuntimeError, "Autograd has not been implemented"): op([y, x]) custom_ops._destroy(f"{TestCustomOp.test_ns}::foo") del foo @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: raise NotImplementedError x = torch.randn(3, requires_grad=True) y = torch.randn(3) op = self.get_op(f"{self.test_ns}::foo") with self.assertRaisesRegex(RuntimeError, "Autograd has not been implemented"): op(y, x) custom_ops._destroy(f"{TestCustomOp.test_ns}::foo") def test_autograd_notimplemented_gradmode(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x, y): return x * y x = torch.randn(3, requires_grad=True) y = torch.randn(3) op = self.get_op(f"{self.test_ns}::foo") with torch.no_grad(): # Shouldn't raise, because we are in no_grad op(y, x) def test_impl_cpu(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo", device_types="cpu") def foo_cpu(x): return x.sin() x = torch.randn(3) op = self.get_op(f"{self.test_ns}::foo") result = op(x) self.assertEqual(result, foo_cpu(x)) def test_impl_invalid_devices(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: raise NotImplementedError def foo_impl(x): return x.sin() from torch._custom_op.impl import SUPPORTED_DEVICE_TYPE_TO_KEY for device_type in SUPPORTED_DEVICE_TYPE_TO_KEY: # Smoke test: should not raise error custom_ops.impl(f"{TestCustomOp.test_ns}::foo", device_types=device_type)( foo_impl ) # Not supported by this API: we can either support them in the future # or provide some other CustomOp.def_* function. This depends on how # common the use cases are. for invalid_type in ["hip", "xla", "mkldnn", ["cpu", "hip"]]: with self.assertRaisesRegex(ValueError, "we only support device_type"): custom_ops.impl( f"{TestCustomOp.test_ns}::foo", device_types=invalid_type )(foo_impl) def test_backward_partially_registered(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x): return x.sin() @custom_ops.impl_backward(f"{TestCustomOp.test_ns}::foo") def foo_backward(ctx, saved, grad): return grad * saved.cos() x = torch.randn([], requires_grad=True) op = self.get_op(f"{self.test_ns}::foo") with self.assertRaisesRegex( RuntimeError, "unable to find a 'save_for_backward'" ): y = op(x) y.backward() def test_save_for_backward_inputs_are_namedtuple(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x): return x.sin() hit = 0 @custom_ops.impl_save_for_backward(f"{TestCustomOp.test_ns}::foo") def foo_save_for_backward(inputs, output): nonlocal hit hit += 1 self.assertTrue(isinstance(inputs, tuple)) self.assertEqual(list(inputs._asdict().keys()), ["x"]) return inputs.x @custom_ops.impl_backward(f"{TestCustomOp.test_ns}::foo") def foo_backward(ctx, saved, grad): return {"x": grad * saved.cos()} x = torch.randn([], requires_grad=True) op = self.get_op(f"{self.test_ns}::foo") y = op(x) self.assertEqual(hit, 1) y.backward() self.assertEqual(hit, 1) def test_backward_returns_dict(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x): return x.sin() @custom_ops.impl_save_for_backward(f"{TestCustomOp.test_ns}::foo") def foo_save_for_backward(inputs, output): return inputs.x @custom_ops.impl_backward(f"{TestCustomOp.test_ns}::foo") def foo_backward(ctx, saved, grad): return grad * saved.cos() x = torch.randn([], requires_grad=True) op = self.get_op(f"{self.test_ns}::foo") y = op(x) with self.assertRaisesRegex(RuntimeError, "to be a dict"): y.backward() def test_backward_dict_invalid_keys(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x): return x.sin() @custom_ops.impl_save_for_backward(f"{TestCustomOp.test_ns}::foo") def foo_save_for_backward(inputs, output): return inputs.x @custom_ops.impl_backward(f"{TestCustomOp.test_ns}::foo") def foo_backward(ctx, saved, grad): return {"x": grad * saved.cos(), "y": None} x = torch.randn([], requires_grad=True) op = self.get_op(f"{self.test_ns}::foo") y = op(x) with self.assertRaisesRegex(RuntimeError, "to have keys {'x'}"): y.backward() def test_backward_dict_grad_for_nontensor(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor, dim: int) -> torch.Tensor: raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x, dim): return x.sin() @custom_ops.impl_save_for_backward(f"{TestCustomOp.test_ns}::foo") def foo_save_for_backward(inputs, output): return inputs.x @custom_ops.impl_backward(f"{TestCustomOp.test_ns}::foo") def foo_backward(ctx, saved, grad): return {"x": grad * saved.cos(), "dim": None} x = torch.randn([], requires_grad=True) op = self.get_op(f"{self.test_ns}::foo") y = op(x, 32) with self.assertRaisesRegex(RuntimeError, "non-Tensor-like types"): y.backward() def test_backward_dict_requires_keys_for_input_tensors(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x, y): return x.sin() @custom_ops.impl_save_for_backward(f"{TestCustomOp.test_ns}::foo") def foo_save_for_backward(inputs, output): return inputs.x @custom_ops.impl_backward(f"{TestCustomOp.test_ns}::foo") def foo_backward(ctx, saved, grad): return {"x": grad * saved.cos()} x = torch.randn([], requires_grad=True) op = self.get_op(f"{self.test_ns}::foo") y = op(x, x) with self.assertRaisesRegex(RuntimeError, r"to have keys {.*'y'.*}"): y.backward() def test_backward_dict_requires_keys_for_input_optional_tensors(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor, y: Optional[torch.Tensor]) -> torch.Tensor: raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x, y): return x.sin() @custom_ops.impl_save_for_backward(f"{TestCustomOp.test_ns}::foo") def foo_save_for_backward(inputs, output): return inputs.x @custom_ops.impl_backward(f"{TestCustomOp.test_ns}::foo") def foo_backward(ctx, saved, grad): return {"x": grad * saved.cos()} x = torch.randn([], requires_grad=True) op = self.get_op(f"{self.test_ns}::foo") y = op(x, None) with self.assertRaisesRegex(RuntimeError, r"to have keys {.*'y'.*}"): y.backward() def test_backward_grads_are_tensor_or_none(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x): return x.sin() @custom_ops.impl_save_for_backward(f"{TestCustomOp.test_ns}::foo") def foo_save_for_backward(inputs, output): return inputs.x @custom_ops.impl_backward(f"{TestCustomOp.test_ns}::foo") def foo_backward(ctx, saved, grad): return {"x": (grad * saved.cos(),)} x = torch.randn([], requires_grad=True) op = self.get_op(f"{self.test_ns}::foo") y = op(x) with self.assertRaisesRegex(RuntimeError, "either None or a Tensor"): y.backward() def test_backward_tensorlist_input_requires_list_grads_with_same_numel(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(xs: Sequence[torch.Tensor]) -> torch.Tensor: raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(xs): return xs[0].sin() @custom_ops.impl_save_for_backward(f"{TestCustomOp.test_ns}::foo") def foo_save_for_backward(inputs, output): return inputs.xs[0] @custom_ops.impl_backward(f"{TestCustomOp.test_ns}::foo") def foo_backward(ctx, saved, grad): return {"xs": [grad * saved.cos(), None]} xs = [torch.randn([], requires_grad=True) for _ in range(3)] op = self.get_op(f"{self.test_ns}::foo") y = op(xs) with self.assertRaisesRegex(RuntimeError, "3 gradients but got 2"): y.backward() def test_backward_tensorlist_input_requires_list_grads_none_or_Tensor(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(xs: Sequence[torch.Tensor]) -> torch.Tensor: raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(xs): return xs[0].sin() @custom_ops.impl_save_for_backward(f"{TestCustomOp.test_ns}::foo") def foo_save_for_backward(inputs, output): return inputs.xs[0] @custom_ops.impl_backward(f"{TestCustomOp.test_ns}::foo") def foo_backward(ctx, saved, grad): return {"xs": [grad * saved.cos(), None, (None,)]} xs = [torch.randn([], requires_grad=True) for _ in range(3)] op = self.get_op(f"{self.test_ns}::foo") y = op(xs) with self.assertRaisesRegex(RuntimeError, "None or Tensor"): y.backward() def test_backward_tensorlist_input_requires_list_grads(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(xs: Sequence[torch.Tensor]) -> torch.Tensor: raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(xs): return xs[0].sin() @custom_ops.impl_save_for_backward(f"{TestCustomOp.test_ns}::foo") def foo_save_for_backward(inputs, output): return inputs.xs[0] @custom_ops.impl_backward(f"{TestCustomOp.test_ns}::foo") def foo_backward(ctx, saved, grad): return {"xs": None} xs = [torch.randn([], requires_grad=True) for _ in range(3)] op = self.get_op(f"{self.test_ns}::foo") y = op(xs) with self.assertRaisesRegex(RuntimeError, "list of gradients"): y.backward() def test_backward_output_differentiability_type(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(xs: Sequence[torch.Tensor]) -> torch.Tensor: raise NotImplementedError with self.assertRaisesRegex(RuntimeError, "output_differentiability"): @custom_ops.impl_backward( f"{TestCustomOp.test_ns}::foo", output_differentiability=True ) def foo_backward(ctx, saved, grad): return {"xs": None} def test_backward_output_differentiability_numel(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(xs: Sequence[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError with self.assertRaisesRegex(RuntimeError, "output_differentiability"): @custom_ops.impl_backward( f"{TestCustomOp.test_ns}::foo", output_differentiability=[True] ) def foo_backward(ctx, saved, grad): return {"xs": None} def test_backward_output_differentiability_tensorlist(self): @custom_ops.custom_op(f"{self.test_ns}::foo") def foo(x: Tensor) -> Tuple[List[Tensor], Tensor]: raise NotImplementedError @custom_ops.impl(f"{self.test_ns}::foo") def foo_impl(x): return [x.clone(), x.clone()], x.clone() @custom_ops.impl_save_for_backward(f"{TestCustomOp.test_ns}::foo") def foo_save_for_backward(inputs, output): return [] @custom_ops.impl_backward( f"{TestCustomOp.test_ns}::foo", output_differentiability=[False, True] ) def foo_backward(ctx, saved, grad_lst, grad): return {"x": grad} op = self.get_op(f"{self.test_ns}::foo") x = torch.randn(3, requires_grad=True) [a, b], c = op(x) self.assertFalse(a.requires_grad) self.assertFalse(b.requires_grad) self.assertTrue(c.requires_grad) def test_backward_output_differentiability_non_tensor(self): @custom_ops.custom_op(f"{self.test_ns}::foo") def foo(x: Tensor) -> Tuple[Tensor, int]: raise NotImplementedError @custom_ops.impl(f"{self.test_ns}::foo") def foo_impl(x): return x.clone(), 3 @custom_ops.impl_save_for_backward(f"{TestCustomOp.test_ns}::foo") def foo_save_for_backward(inputs, output): return [] @custom_ops.impl_backward( f"{TestCustomOp.test_ns}::foo", output_differentiability=[True, True] ) def foo_backward(ctx, saved, grad0, grad1): return {"x": grad0} op = self.get_op(f"{self.test_ns}::foo") x = torch.randn(3, requires_grad=True) with self.assertRaisesRegex(RuntimeError, "is not a Tensor"): op(x) @unittest.skipIf(not TEST_CUDA, "requires CUDA") def test_impl_separate(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo", device_types="cpu") def foo_cpu(x): return x.sin() @custom_ops.impl(f"{TestCustomOp.test_ns}::foo", device_types="cuda") def foo_cuda(x): return x.cos() x = torch.randn(3) op = self.get_op(f"{self.test_ns}::foo") result = op(x) self.assertEqual(result, foo_cpu(x)) x_cuda = x.cuda() op = self.get_op(f"{self.test_ns}::foo") result = op(x_cuda) self.assertEqual(result, foo_cuda(x_cuda)) @unittest.skipIf(not TEST_CUDA, "requires CUDA") def test_impl_multiple(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x): return x.cos() op = self.get_op(f"{self.test_ns}::foo") x = torch.randn(3) result = op(x) self.assertEqual(result, foo_impl(x)) x_cuda = x.cuda() result = op(x_cuda) self.assertEqual(result, foo_impl(x_cuda)) def test_impl_abstract_overload(self): lib = self.lib() lib.define("sin.blah(Tensor x) -> Tensor") torch.library.register_fake( f"{self.test_ns}::sin.blah", torch.empty_like, lib=lib ) op = self.ns().sin.blah x = torch.randn(3, device="meta") op(x) def test_impl_meta(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor, dim: int) -> torch.Tensor: raise NotImplementedError @torch.library.register_fake(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) def foo_meta(x, dim): output_shape = list(x.shape) del output_shape[dim] return x.new_empty(output_shape) x = torch.randn(2, 3, device="meta") op = self.get_op(f"{self.test_ns}::foo") result = op(x, 1) self.assertEqual(result.shape, foo_meta(x, 1).shape) def test_duplicate_impl(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor, dim: int) -> torch.Tensor: raise NotImplementedError @torch.library.register_fake(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) def foo_meta(x, dim): output_shape = list(x.shape) del output_shape[dim] return x.new_empty(output_shape) with self.assertRaisesRegex(RuntimeError, r"test_custom_ops.py:\d+"): @torch.library.register_fake(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) def foo_meta2(x, dim): output_shape = list(x.shape) del output_shape[dim] return x.new_empty(output_shape) def test_new_data_dependent_symint(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: raise NotImplementedError @torch.library.register_fake(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) def foo_meta(x): ctx = torch.library.get_ctx() r = ctx.new_dynamic_size(min=1) with self.assertRaisesRegex(ValueError, "greater than or equal to 0"): ctx.new_dynamic_size(min=-1) with self.assertRaisesRegex(ValueError, "SymInt"): ctx.new_dynamic_size(max=x.numel()) # NB: You must return dynamic sizes! return x.new_empty(r) x = torch.randn(2, 3, device="cpu") op = self.get_op(f"{self.test_ns}::foo") make_fx(op, tracing_mode="symbolic")(x) def test_meta_for_data_dependent_shape_operation(self): x = torch.randn(10, device="meta") with self.assertRaisesRegex(RuntimeError, "data-dependent shape"): numpy_nonzero(x) def test_basic_make_fx(self): # More serious tests are in our CustomOp opinfo db, # this one is just a sanity check. @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: raise NotImplementedError @torch.library.register_fake(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) def foo_meta(x): return x.sum() x = torch.randn(3) op = self.get_op(f"{self.test_ns}::foo") gm = make_fx(op, tracing_mode="symbolic")(x) self.assertTrue(f"{TestCustomOp.test_ns}.foo" in gm.code) def test_not_implemented_error(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: raise NotImplementedError x = torch.randn(3) op = self.get_op(f"{self.test_ns}::foo") with self.assertRaisesRegex(NotImplementedError, "cpu impl registered"): op(x) x = torch.randn(3, device="meta") with self.assertRaisesRegex(NotImplementedError, "no fake impl or Meta kernel"): op(x) @custom_ops.custom_op(f"{TestCustomOp.test_ns}::bar") def bar(sizes: Sequence[int]) -> torch.Tensor: raise NotImplementedError op = self.get_op(f"{self.test_ns}::bar") with self.assertRaisesRegex(NotImplementedError, "no Tensor inputs"): op((1, 2, 3)) def test_data_dependent_basic(self): x = torch.randn(5, 5) gm = make_fx(numpy_nonzero, tracing_mode="symbolic")(x) self.assertTrue("nonzero" in gm.code) def test_data_dependent_fake_tracing(self): x = torch.randn(5, 5) # We've updated to attempt to use unbacked symints even for fake # tracing make_fx(numpy_nonzero, tracing_mode="fake")(x) def test_symints(self): def f(x): return torch.ops._torch_testing.numpy_view_copy(x, x.shape) x = torch.randn(2, 3, 4) gm = make_fx(f, tracing_mode="symbolic")(x) result = gm(x) self.assertEqual(result, f(x)) self.assertExpectedInline( gm.code.strip(), """\ def forward(self, x_1): sym_size_int = torch.ops.aten.sym_size.int(x_1, 0) sym_size_int_1 = torch.ops.aten.sym_size.int(x_1, 1) sym_size_int_2 = torch.ops.aten.sym_size.int(x_1, 2) numpy_view_copy = torch.ops._torch_testing.numpy_view_copy.default(x_1, [sym_size_int, sym_size_int_1, sym_size_int_2]); x_1 = sym_size_int = sym_size_int_1 = sym_size_int_2 = None return numpy_view_copy""", # noqa: B950 ) @unittest.skipIf(IS_WINDOWS, "torch.compile doesn't work on windows") def test_data_dependent_compile(self): import torch._dynamo.testing from torch._dynamo.utils import counters counters.clear() cnt = torch._dynamo.testing.CompileCounter() @torch.compile(backend=cnt) def f(x): return numpy_nonzero(x.clone()).clone() f(torch.randn(10)) self.assertEqual(len(counters["graph_break"]), 1) self.assertEqual(next(iter(counters["graph_break"].values())), 1) self.assertExpectedInline( next(iter(counters["graph_break"].keys())).replace(";", "\n"), """\ Dynamic shape operator Explanation: Operator `_torch_testing.numpy_nonzero.default`'s output shape depends on input Tensor data. Hint: Enable tracing of dynamic shape operators with `torch._dynamo.config.capture_dynamic_output_shape_ops = True` Developer debug context: _torch_testing.numpy_nonzero.default For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0036.html""", ) # pre-existing problem: torch.compile(dynamic=True) will, by default, # graph break on data-dependent operations. Eventually we'll make it so # that it never graph breaks on data-dependent operations. @unittest.expectedFailure def test_data_dependent_nms_dynamic_compile(self): import torch._dynamo.testing from torch._dynamo.utils import counters counters.clear() cnt = torch._dynamo.testing.CompileCounter() @torch.compile(backend=cnt, dynamic=True) def f(x, s, i): return torch.ops._torch_testing.numpy_nms(x.clone(), s, i).clone() f(torch.randn(20, 4), torch.randn(20), 0.1) self.assertEqual(len(counters["graph_break"]), 0) def test_impl_on_existing_op(self): lib = self.lib() lib.define("foo(Tensor x) -> Tensor") qualname = f"{self.test_ns}::foo" @torch._custom_ops.impl(qualname) def foo_impl(x): return x.sin() op = self.get_op(qualname) x = torch.randn(3) result = op(x) self.assertEqual(result, x.sin()) @parametrize( "key", ["CPU", "CUDA", "CompositeImplicitAutograd", "CompositeExplicitAutograd"] ) def test_impl_on_existing_op_with_cpu_registration(self, key): lib = self.lib() lib.define("foo(Tensor x) -> Tensor") qualname = f"{self.test_ns}::foo" def foo_impl(x): return x.sin() lib.impl("foo", foo_impl, key) op = self.get_op(qualname) with self.assertRaisesRegex(RuntimeError, "already has an implementation"): custom_ops.impl(qualname, func=foo_impl) def test_abstract_impl_on_existing_op(self): lib = self.lib() lib.define("foo(Tensor x) -> Tensor") qualname = f"{self.test_ns}::foo" @torch.library.register_fake(qualname, lib=self.lib()) def foo_impl(x): return x.sin() op = self.get_op(qualname) with torch._subclasses.FakeTensorMode(): x = torch.randn(3) result = op(x) self.assertEqual(result.shape, x.shape) self.assertEqual(result.stride(), x.stride()) def test_abstract_impl_on_existing_op_with_meta(self): lib = self.lib() lib.define("foo(Tensor x) -> Tensor") qualname = f"{self.test_ns}::foo" def foo_impl(x): return x.sin() lib.impl("foo", foo_impl, "Meta") op = self.get_op(qualname) with self.assertRaisesRegex(RuntimeError, r"already has .*Meta implementation"): torch.library.register_fake(qualname, foo_impl, lib=self.lib()) def test_abstract_impl_on_existing_op_with_CompositeImplicitAutograd(self): lib = self.lib() lib.define("foo(Tensor x) -> Tensor") qualname = f"{self.test_ns}::foo" def foo_impl(x): return x.sin() lib.impl("foo", foo_impl, "CompositeImplicitAutograd") op = self.get_op(qualname) with self.assertRaisesRegex(RuntimeError, "CompositeImplicitAutograd"): torch.library.register_fake(qualname, foo_impl, lib=self.lib()) def test_abstract_impl_on_existing_op_with_CompositeExplicitAutograd(self): lib = self.lib() lib.define("foo(Tensor x) -> Tensor") qualname = f"{self.test_ns}::foo" def foo_impl(x): return x.sin() lib.impl("foo", foo_impl, "CompositeExplicitAutograd") op = self.get_op(qualname) torch.library.register_fake(qualname, lambda x: x.sum(), lib=self.lib()) with torch._subclasses.FakeTensorMode(): x = torch.randn(10) result = op(x) self.assertEqual(result.shape, ()) def _test_backward_impl_raises(self, qualname, err_regex): with self.assertRaisesRegex(RuntimeError, err_regex): @custom_ops.impl_save_for_backward(qualname) def foo2(x): return with self.assertRaisesRegex(RuntimeError, err_regex): @custom_ops.impl_backward(qualname) def foo3(x): return def test_backward_impl_on_existing_op_incorrect_schema_views(self): lib = self.lib() lib.define("foo(Tensor(a) x) -> Tensor(a)") qualname = f"{self.test_ns}::foo" self._test_backward_impl_raises(qualname, "operator that returns views") def test_backward_impl_on_existing_op_incorrect_schema_mutable(self): lib = self.lib() lib.define("foo(Tensor(a!) x) -> Tensor") qualname = f"{self.test_ns}::foo" self._test_backward_impl_raises(qualname, "non-functional") def test_backward_impl_on_existing_op_incorrect_schema_no_output(self): lib = self.lib() lib.define("foo(Tensor x) -> ()") qualname = f"{self.test_ns}::foo" self._test_backward_impl_raises(qualname, "no returns") def test_backward_impl_on_existing_op_CompositeImplicitAutograd(self): lib = self.lib() lib.define("foo(Tensor x) -> Tensor") qualname = f"{self.test_ns}::foo" lib.impl("foo", lambda x: x.sin().cos(), "CompositeImplicitAutograd") self._test_backward_impl_raises(qualname, "CompositeImplicitAutograd") @parametrize("key", ["Autograd", "AutogradCPU", "AutogradCUDA"]) def test_backward_impl_on_existing_op_with_key(self, key): lib = self.lib() lib.define("foo(Tensor x) -> Tensor") qualname = f"{self.test_ns}::foo" lib.impl("foo", lambda x: x.sin().cos(), key) self._test_backward_impl_raises(qualname, key) def test_is_functional_schema(self): tests = { "foo(Tensor x) -> Tensor": True, "foo(Tensor(a) x) -> Tensor": True, "foo(Tensor(a!) x) -> Tensor": False, "foo(Tensor(a) x) -> Tensor(a)": False, "foo(Tensor x) -> ()": False, } for schema_str, expected in tests.items(): res = torch._library.utils.is_functional_schema(schema_str) self.assertEqual(res, expected) from torchgen.model import FunctionSchema schema = FunctionSchema.parse(schema_str) res = torch._library.utils.is_functional_schema(schema) self.assertEqual(res, expected) schema = torch._C.parse_schema(schema_str) res = torch._library.utils.is_functional_schema(schema) self.assertEqual(res, expected) def test_incorrect_schema_types(self): with torch.library._scoped_library("mylib", "FRAGMENT") as lib: with self.assertRaisesRegex(RuntimeError, "unknown type specifier"): lib.define("foo12(Tensor a) -> asdfasdf") with self.assertRaisesRegex(RuntimeError, "unknown type specifier"): lib.define("foo12(asdf a) -> Tensor") with self.assertRaisesRegex(RuntimeError, "Use `SymInt` or `int`"): lib.define("foo12(int64_t a) -> Tensor") with self.assertRaisesRegex(RuntimeError, "Use `float`"): lib.define("foo12(double a) -> Tensor") def test_is_tensorlist_like_type(self): tensorlists = [ # Tensor[] torch.ops.aten.where.default._schema.returns[0].type, # Tensor?[] torch.ops.aten.index.Tensor._schema.arguments[1].type, # Tensor[]? torch._C.parse_schema("foo(Tensor[]? x) -> ()").arguments[0].type, # Tensor?[]? torch._C.parse_schema("foo(Tensor?[]? x) -> ()").arguments[0].type, ] non_tensorlists = [ # Tensor torch.ops.aten.sin.default._schema.arguments[0].type, # IntList torch.ops.aten.sum.dim_IntList._schema.arguments[1].type, ] for a in tensorlists: self.assertTrue(torch._library.utils.is_tensorlist_like_type(a)) for a in non_tensorlists: self.assertFalse(torch._library.utils.is_tensorlist_like_type(a)) def test_backward_impl_on_existing_op(self): lib = self.lib() lib.define("foo(Tensor x) -> Tensor") qualname = f"{self.test_ns}::foo" @custom_ops.impl(qualname) def foo_impl(x): with torch.no_grad(): return x.sin() @custom_ops.impl_save_for_backward(qualname) def foo_save_for_backward(inputs, output): return inputs.x @custom_ops.impl_backward(qualname) def foo_backward(ctx, saved, grad_out): return {"x": grad_out * saved.cos()} op = self.get_op(qualname) x = torch.randn([], requires_grad=True) y = op(x) (gx,) = torch.autograd.grad(y, x) self.assertEqual(gx, x.cos()) @parametrize( "tags", [ subtest(torch.Tag.pointwise, "single"), subtest((torch.Tag.pointwise,), "tuple"), subtest([torch.Tag.pointwise], "list"), ], ) def test_define_with_tags(self, tags): lib = self.lib() tags = (torch.Tag.pointwise,) torch.library.define( f"{self.test_ns}::foo", "(Tensor x) -> Tensor", lib=lib, tags=tags ) actual = self.ns().foo.default.tags self.assertTrue(isinstance(actual, list)) self.assertEqual(actual, list(tags)) def test_builtin_aten_ops_are_pt2_compliant(self): for op in [torch.ops.aten.sin.default, torch.ops.aten.sum.dim_IntList]: self.assertIn(torch.Tag.pt2_compliant_tag, op.tags) def test_builtin_torchscript_ops(self): for op in [torch.ops.aten.sub.complex, torch.ops.aten.mul.complex]: self.assertIn(torch.Tag.pt2_compliant_tag, op.tags) def test_autogen_aten_ops_are_pt2_compliant(self): for op in [torch.ops.aten.fill.Tensor_out]: self.assertIn(torch.Tag.generated, op.tags) self.assertIn(torch.Tag.pt2_compliant_tag, op.tags) def test_resolve_packet(self): x = torch.randn(3) result = torch._C._jit_resolve_packet("aten::sum", x) self.assertEqual(result, "default") result = torch._C._jit_resolve_packet("aten::sum", x, dim=1) self.assertEqual(result, "dim_IntList") with self.assertRaisesRegex(RuntimeError, "failed to match any schema"): result = torch._C._jit_resolve_packet("aten::sum", x, x, x) def test_define_bad_schema(self): lib = self.lib() with self.assertRaisesRegex(ValueError, "expected schema to look like"): torch.library.define(f"{self.test_ns}::foo", "foo(Tensor x) -> Tensor") def test_define_and_impl(self): lib = self.lib() torch.library.define(f"{self.test_ns}::foo", "(Tensor x) -> Tensor", lib=lib) @torch.library.impl(f"{self.test_ns}::foo", "CPU", lib=lib) def f(x): return torch.from_numpy(np.sin(x.numpy())) x = torch.randn(3) y = self.ns().foo(x) assert torch.allclose(y, x.sin()) def test_define_validation(self): with self.assertRaisesRegex(ValueError, "namespace"): torch.library.define("foo", "(Tensor x) -> Tensor") def test_legacy_define(self): lib = self.lib() @torch.library.define(lib, "foo(Tensor x) -> Tensor") def f(x): return torch.from_numpy(np.sin(x.numpy())) x = torch.randn(3) y = self.ns().foo(x) assert torch.allclose(y, x.sin()) def test_impl_function(self): lib = self.lib() torch.library.define(f"{self.test_ns}::foo", "(Tensor x) -> Tensor", lib=lib) def f(x): return torch.from_numpy(np.sin(x.numpy())) torch.library.impl(f"{self.test_ns}::foo", "CPU", f, lib=lib) x = torch.randn(3) y = self.ns().foo(x) assert torch.allclose(y, x.sin()) def test_legacy_impl(self): lib = self.lib() torch.library.define(f"{self.test_ns}::foo", "(Tensor x) -> Tensor", lib=lib) @torch.library.impl(lib, "foo", "CPU") def f(x): return torch.from_numpy(np.sin(x.numpy())) x = torch.randn(3) y = self.ns().foo(x) assert torch.allclose(y, x.sin()) def test_defined_in_python(self): self.assertFalse(torch.ops.aten.sin.default._defined_in_python) self.assertFalse(torch.ops.aten.sum.dim_IntList._defined_in_python) lib = self.lib() torch.library.define("{self._test_ns}::foo", "(Tensor x) -> Tensor", lib=lib) ns = self.ns() self.assertTrue(ns.foo.default._defined_in_python) torch.library.define( "{self._test_ns}::bar.overload", "(Tensor x) -> Tensor", lib=lib ) self.assertTrue(ns.bar.overload._defined_in_python) def _test_impl_device(self, name, types, device): lib = self.lib() torch.library.define(f"{self.test_ns}::{name}", "(Tensor x) -> Tensor", lib=lib) @torch.library.impl(f"{self.test_ns}::{name}", types) def f(x): x_np = x.cpu().numpy() y = torch.from_numpy(np.sin(x_np)) return y.to(device=x.device) x = torch.randn(3, device=device) y = getattr(self.ns(), name)(x) assert torch.allclose(y, x.sin()) def test_impl_device_cpu(self): self._test_impl_device("foo1", "default", "cpu") self._test_impl_device("foo2", ["cpu"], "cpu") self._test_impl_device("foo3", ["cpu", "cuda"], "cpu") @unittest.skipIf(not TEST_CUDA, "requires cuda") def test_impl_device_cuda(self): self._test_impl_device("foo4", "default", "cuda") self._test_impl_device("foo5", ["cuda"], "cuda") self._test_impl_device("foo6", ["cpu", "cuda"], "cuda") def test_impl_device_function(self): lib = self.lib() torch.library.define(f"{self.test_ns}::foo", "(Tensor x) -> Tensor", lib=lib) def f(x): x_np = x.cpu().numpy() y = torch.from_numpy(np.sin(x_np)) return y.to(device=x.device) torch.library.impl(f"{self.test_ns}::foo", "default", f, lib=lib) x = torch.randn(3) y = self.ns().foo(x) assert torch.allclose(y, x.sin()) def test_impl_device_invalid(self): with self.assertRaisesRegex(RuntimeError, "Expected one of cpu, cuda"): torch.library.impl("blah::blah", "somethingsomething") def test_override_impl(self): lib = self.lib() op_name = f"{self.test_ns}::foo" torch.library.define(op_name, "(Tensor x) -> Tensor", lib=lib) op = self.ns().foo.default def foo_impl1(x): return x * 1 # Register cpu impl to foo_impl1 lib.impl("foo", foo_impl1, "CPU") self.assertEqual(op(torch.ones(3)), torch.ones(3)) def foo_impl2(x): return torch.cat([x, x]) with self.assertRaisesRegex(RuntimeError, "already a kernel registered"): lib.impl("foo", foo_impl2, "CPU") # Override cpu impl to foo_impl2 lib.impl(op_name, foo_impl2, "CPU", allow_override=True) self.assertEqual(op(torch.ones(3)), torch.ones(6)) def test_override_fake(self): lib = self.lib() op_name = f"{self.test_ns}::foo" torch.library.define(op_name, "(Tensor x) -> Tensor", lib=lib) op = self.ns().foo.default def foo_impl1(x): return x * 1 # Register fake kernel to foo_impl1 torch.library.register_fake(op_name, foo_impl1, lib=lib) with torch._subclasses.FakeTensorMode(): self.assertEqual(op(torch.ones(3)).shape, [3]) self.assertEqual(op(torch.ones(3, device="meta")).shape, [3]) def foo_impl2(x): return torch.cat([x, x]) with self.assertRaisesRegex(RuntimeError, "already has an fake impl"): torch.library.register_fake(op_name, foo_impl2, lib=lib) # Override fake kernel to foo_impl2 torch.library.register_fake(op_name, foo_impl2, lib=lib, allow_override=True) with torch._subclasses.FakeTensorMode(): self.assertEqual(op(torch.ones(3)).shape, [6]) self.assertEqual(op(torch.ones(3, device="meta")).shape, [6]) # Use scoped_library to temporarily register Fake kernel to foo_impl1 with torch.library._scoped_library(self.test_ns, "FRAGMENT") as lib2: torch.library.register_fake( op_name, foo_impl1, lib=lib2, allow_override=True ) with torch._subclasses.FakeTensorMode(): self.assertEqual(op(torch.ones(3)).shape, [3]) self.assertEqual(op(torch.ones(3, device="meta")).shape, [3]) # Fake kernel should go back to foo_impl2 with torch._subclasses.FakeTensorMode(): self.assertEqual(op(torch.ones(3)).shape, [6]) self.assertEqual(op(torch.ones(3, device="meta")).shape, [6]) def test_override_meta(self): lib = self.lib() op_name = f"{self.test_ns}::foo" torch.library.define(op_name, "(Tensor x) -> Tensor", lib=lib) op = self.ns().foo.default def foo_impl1(x): return x * 1 # Register Meta kernel to foo_impl1 lib.impl("foo", foo_impl1, "Meta") self.assertEqual(op(torch.ones(3, device="meta")).shape, [3]) def foo_impl2(x): return torch.cat([x, x]) with self.assertRaisesRegex(RuntimeError, "already a kernel registered"): lib.impl("foo", foo_impl2, "Meta") # Override Meta kernel to foo_impl2 lib.impl("foo", foo_impl2, "Meta", allow_override=True) self.assertEqual(op(torch.ones(3, device="meta")).shape, [6]) # Use scoped_library to temporarily register Meta kernel to foo_impl1 with torch.library._scoped_library(self.test_ns, "FRAGMENT") as lib2: lib2.impl("foo", foo_impl1, "Meta", allow_override=True) self.assertEqual(op(torch.ones(3, device="meta")).shape, [3]) # Meta kernel should go back to foo_impl2 self.assertEqual(op(torch.ones(3, device="meta")).shape, [6]) # Use register_fake to override Meta kernel to foo_impl1 torch.library.register_fake(op_name, foo_impl1, lib=lib, allow_override=True) self.assertEqual(op(torch.ones(3, device="meta")).shape, [3]) def test_override_cea(self): lib = self.lib() op_name = f"{self.test_ns}::foo" torch.library.define(op_name, "(Tensor x) -> Tensor", lib=lib) op = self.ns().foo.default def foo_impl1(x): return x * 1 # Register CEA impl to foo_impl1 lib.impl("foo", foo_impl1, "CompositeExplicitAutograd") with torch._subclasses.FakeTensorMode(): self.assertEqual(op(torch.ones(3)).shape, [3]) self.assertEqual(op(torch.ones(3, device="meta")).shape, [3]) def foo_impl2(x): return torch.cat([x, x]) # Override Meta/fake kernel with foo_impl2 torch.library.register_fake(op_name, foo_impl2, lib=lib, allow_override=True) with torch._subclasses.FakeTensorMode(): self.assertEqual(op(torch.ones(3)).shape, [6]) self.assertEqual(op(torch.ones(3, device="meta")).shape, [6]) @scoped_load_inline def test_autograd_function_backed_op(self, load_inline): cpp_source = """ struct CustomOpAutogradFunction : public torch::autograd::Function<CustomOpAutogradFunction> { static constexpr bool is_traceable = true; static torch::Tensor forward( torch::autograd::AutogradContext* ctx, const torch::Tensor& x) { return x; } static torch::autograd::variable_list backward( torch::autograd::AutogradContext *ctx, torch::autograd::variable_list grad_output) { return grad_output; } }; torch::Tensor custom_op_backed_by_autograd_fn(const torch::Tensor& x) { return CustomOpAutogradFunction::apply(x); } TORCH_LIBRARY(test_autograd_function_backed_op, m) { m.def("custom_op_backed_by_autograd_fn", custom_op_backed_by_autograd_fn); } """ module = load_inline( name="test_autograd_function_backed_op", cpp_sources=cpp_source, functions="custom_op_backed_by_autograd_fn", verbose=True, ) x = torch.ones(2, 2, requires_grad=True) temp = x.detach().clone() out = ( torch.ops.test_autograd_function_backed_op.custom_op_backed_by_autograd_fn( x ) ) loss = out.sum() loss.backward() self.assertEqual(x.grad, temp) # Using a non-existent DSO is a quick way to trigger an OSError, # which can be used to not break BC. def test_load_library(self): with self.assertRaisesRegex( OSError, "Could not load this library: .*libnoexist.so" ): torch.ops.load_library("libnoexist.so") def op_with_incorrect_schema(testcase, name): lib = testcase.lib() lib.define(f"{name}(Tensor x) -> Tensor") qualname = f"{testcase.test_ns}::{name}" lib.impl(name, lambda x: x[:], "CompositeExplicitAutograd") return testcase.get_op(qualname)
TestCustomOp
python
pytorch__pytorch
torch/distributed/checkpoint/default_planner.py
{ "start": 15995, "end": 27460 }
class ____(DefaultLoadPlanner): """ Extension of DefaultLoadPlanner, which rebuilds state_dict from the saved metadata. Useful for loading in state_dict without first initializing a model, such as when converting a DCP checkpoint into a Torch save file. . N.B. `state_dict` must be an empty dictionary when used with this LoadPlanner .. warning:: Because the entire state dict is initialized, It's recommended to only utilize this LoadPlanner on a single rank or process to avoid OOM. """ def __init__(self, keys=None, *args, **kwargs): self.keys = keys super().__init__(*args, **kwargs) def _should_include_key(self, key: str, metadata: Metadata) -> bool: if self.keys is None: return True if key in self.keys: return True unflattened_keys: list[str] = [] planner_data = metadata.planner_data.get(key) for unflattened_key in planner_data: if unflattened_keys: unflattened_keys.append( ".".join([unflattened_keys[-1], str(unflattened_key)]) ) else: unflattened_keys.append(unflattened_key) if any(unflattened_key in self.keys for unflattened_key in unflattened_keys): return True return False def set_up_planner( self, state_dict: STATE_DICT_TYPE, metadata: Optional[Metadata] = None, is_coordinator: bool = False, ) -> None: if state_dict: raise AssertionError("not state_dict") if metadata is None: raise AssertionError("metadata is not None") # rebuild the state dict from the metadata for k, v in metadata.state_dict_metadata.items(): if not self._should_include_key(k, metadata): continue if isinstance(v, TensorStorageMetadata): v = torch.empty(v.size, dtype=v.properties.dtype) # type: ignore[assignment] if metadata.planner_data is not None and k in metadata.planner_data: set_element(state_dict, metadata.planner_data[k], v) else: state_dict[k] = v super().set_up_planner(state_dict, metadata, is_coordinator) def create_default_local_load_plan( state_dict: dict[str, Any], metadata: Metadata, strict: bool = True ) -> LoadPlan: requests = [] """ Create the ``LoadPlan`` used by DefaultLoadPlanner. It produces one read item per value in ``state_dict`` using the metadata in ``metadata``. The default behavior is to match key exactly between state_dict and metadata. It handles resharding by issuing multiple read requests against storage in order to match load requirements. """ for fqn, obj in state_dict.items(): # ignore state_dict keys which do not exist in `state_dict` if strict=False if fqn not in metadata.state_dict_metadata: if strict: raise RuntimeError(f"Missing key in checkpoint state_dict: {fqn}.") else: continue md = metadata.state_dict_metadata[fqn] if ( isinstance(md, TensorStorageMetadata) and getattr(obj, "size", None) is not None and md.size != obj.size() ): raise ValueError( f"Size mismatch between saved {md.size} and current: {obj.size()} for {fqn}", ) # Since DTensor supports submesh, adding extra check to ensure _create_read_items() # gets called only when the current rank is part of the mesh for the corresponding DTensor. if isinstance(obj, DTensor): if obj.device_mesh.get_coordinate() is not None: requests += _create_read_items(fqn, md, obj) else: requests += _create_read_items(fqn, md, obj) return LoadPlan(requests) def create_default_global_load_plan( all_plans: list[LoadPlan], ) -> list[LoadPlan]: """ Create global load plan used by DefaultLoadPlanner. The default load behavior involved no global coordination and this function currently doesn't change the local plans. """ return all_plans def create_default_local_save_plan( state_dict: dict[str, Any], is_coordinator: bool ) -> SavePlan: """ Create the ``SavePlan`` used by DefaultSavePlanner. On non-coordinator ranks, this function ignores tensors and non-tensor objects, only producing writes for ShardedTensor objects. On the coordinator rank, produce writes for all values. """ requests = [] for fqn, obj in state_dict.items(): # Since DTensor supports submesh, adding extra check to ensure _create_write_items() # gets called only when the current rank is part of the mesh for the corresponding DTensor. if isinstance(obj, DTensor): if obj.device_mesh.get_coordinate() is not None: requests += _create_write_items(fqn, obj) else: # For the plain tensor and non-tensor values, add the request for all # the ranks. Coordinator will decides whether to deduplicate the # values based on the keys. requests += _create_write_items(fqn, obj) return SavePlan(requests) def create_default_global_save_plan( all_plans: list[SavePlan], rewrite_index_hints: bool = True, ) -> tuple[list[SavePlan], Metadata]: """ Create the global plan and metadata used by DefaultSavePlanner. Metadata is produced by concatenating the metadata of all ``WriteItem`` from the supplied plans. The only global planning change is to update index hints in all ``MetadataIndex`` objects if ``rewrite_index_hints`` is True. """ md: dict[str, STORAGE_TYPES] = {} new_plans = [] for plan in all_plans: new_items = [] for item in plan.items: if item.type != WriteItemType.SHARD: if item.index.fqn in md: raise AssertionError("item.index.fqn not in md") if item.type == WriteItemType.BYTE_IO: md[item.index.fqn] = BytesStorageMetadata() new_items.append(item) else: if item.tensor_data is None: raise AssertionError("item.tensor_data is not None") tensor_md = cast( TensorStorageMetadata, md.setdefault( item.index.fqn, TensorStorageMetadata( properties=item.tensor_data.properties, size=item.tensor_data.size, chunks=[], ), ), ) new_item = item if rewrite_index_hints: new_index = dataclasses.replace( item.index, index=len(tensor_md.chunks) ) new_item = dataclasses.replace(item, index=new_index) new_items.append(new_item) if item.tensor_data.chunk is None: raise AssertionError(f""" Cannot create MD for tensor without bounds. FQN: {item.index.fqn} """) tensor_md.chunks.append(item.tensor_data.chunk) new_plans.append(dataclasses.replace(plan, items=new_items)) return (new_plans, Metadata(md)) def _create_default_local_metadata(state_dict: STATE_DICT_TYPE) -> Metadata: """Return the ``Metadata`` if DefaultSavePlanner was used to checkpoint ``state_dict``.""" plan = _create_default_metadata_only_plan(state_dict) _, md = create_default_global_save_plan([plan]) return md def _check_box_overlap(box0: ChunkStorageMetadata, box1: ChunkStorageMetadata) -> bool: """Check if two boxes overlap. Tuples are (offset, lengths).""" # For each dim of each shard, check if one shard resides on the other # end of second shard with respect to that dim. As an example for a 2D # shard, we would check if one shard is above or on the left of the # other shard. ndims = len(box0.offsets) for i in range(ndims): if box0.offsets[i] >= box1.offsets[i] + box1.sizes[i]: return False if box1.offsets[i] >= box0.offsets[i] + box0.sizes[i]: return False return True def _check_box_bounds( outer_box_size: torch.Size, inner_box: ChunkStorageMetadata ) -> bool: for i in range(len(outer_box_size)): if inner_box.offsets[i] < 0: return False if inner_box.sizes[i] < 0: return False if inner_box.offsets[i] + inner_box.sizes[i] > outer_box_size[i]: return False return True def _validate_global_plan(global_plan: list[SavePlan], metadata: Metadata) -> bool: all_good = True for key, value in metadata.state_dict_metadata.items(): if isinstance(value, BytesStorageMetadata): continue if len(value.size) == 0: continue chunks = value.chunks chunks_volume = 0 for chunk in chunks: # Compute the volume if not _check_box_bounds(value.size, chunk): logger.warning( """ key:%s has out of bounds chunk: tensor-size:%s chunk: %s """, key, value.size, chunk, ) all_good = False chunks_volume += math.prod(chunk.sizes) if len(chunks) > 1: dims = len(value.size) sweep_dim = max(range(dims), default=0, key=lambda d: value.size[d]) sorted_indices = sorted( range(len(chunks)), key=lambda idx: ( chunks[idx].offsets[sweep_dim], *(chunks[idx].offsets[d] for d in range(dims)), ), ) active: list[tuple[int, int]] = [] for idx in sorted_indices: current = chunks[idx] start = current.offsets[sweep_dim] end = start + current.sizes[sweep_dim] cutoff = bisect_right(active, (start, sys.maxsize)) if cutoff: del active[:cutoff] for _, other_idx in active: other = chunks[other_idx] if _check_box_overlap(current, other): logger.warning( "key:%s has overlapping chunks: %s %s", key, current, other, ) all_good = False insort(active, (end, idx)) # Check whether combined chunk cover the whole tensor tensor_volume = math.prod(value.size) if len(global_plan) > 1 and chunks_volume != tensor_volume: logger.warning( """ key:%s invalid fill tensor-volume: %s chunks-volume: %s """, key, tensor_volume, chunks_volume, ) all_good = False return all_good
_EmptyStateDictLoadPlanner
python
walkccc__LeetCode
solutions/3359. Find Sorted Submatrices With Maximum Element at Most K/3359.py
{ "start": 138, "end": 1464 }
class ____: def countSubmatrices(self, grid: list[list[int]], k: int) -> int: m = len(grid) n = len(grid[0]) ans = 0 # dp[i][j] := the number of valid subarrays ending in grid[i][j] dp = [[0] * n for _ in range(m)] # stacks[j] := the stack of valid # (subarray width, row index, number of accumulated submatrices) ending in # column j stacks: list[T] = [[T(0, -1, 0)] for _ in range(n)] for i, row in enumerate(grid): for j, num in enumerate(row): if num > k: stacks[j] = [T(0, i, 0)] else: dp[i][j] = 1 if j > 0 and row[j - 1] <= k and row[j - 1] >= row[j]: # Extend the valid subarrays to the current number. dp[i][j] += dp[i][j - 1] width = dp[i][j] # Remove subarray widths greater than the current count since they # will become invalid. while stacks[j] and width < stacks[j][-1].subarrayWidth: stacks[j].pop() height = i - stacks[j][-1].rowIndex newSubmatrices = width * height accumulatedSubmatrices = (stacks[j][-1].accumulatedSubmatrices + newSubmatrices) ans += accumulatedSubmatrices stacks[j].append(T(width, i, accumulatedSubmatrices)) return ans
Solution
python
huggingface__transformers
src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
{ "start": 35685, "end": 37145 }
class ____(ModelOutput): r""" loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Language modeling loss from the language model. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head of the language model. vision_outputs (`BaseModelOutputWithPooling`): Outputs of the vision encoder. qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`): Outputs of the Q-Former (Querying Transformer). language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`): Outputs of the language model. """ loss: Optional[tuple[torch.FloatTensor]] = None logits: Optional[tuple[torch.FloatTensor]] = None vision_outputs: Optional[torch.FloatTensor] = None qformer_outputs: Optional[tuple[torch.FloatTensor]] = None language_model_outputs: Optional[tuple[torch.FloatTensor]] = None def to_tuple(self) -> tuple[Any]: return tuple( self[k] if k not in ["vision_outputs", "qformer_outputs", "language_model_outputs"] else getattr(self, k).to_tuple() for k in self.keys() ) @auto_docstring( custom_intro=""" InstructBlipVideo base Model consisting of language model, qformer and vision encoder. """ )
InstructBlipVideoForConditionalGenerationModelOutput
python
pypa__pip
src/pip/_vendor/distro/distro.py
{ "start": 1826, "end": 21227 }
class ____(TypedDict): id: str version: str version_parts: VersionDict like: str codename: str _UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc") _UNIXUSRLIBDIR = os.environ.get("UNIXUSRLIBDIR", "/usr/lib") _OS_RELEASE_BASENAME = "os-release" #: Translation table for normalizing the "ID" attribute defined in os-release #: files, for use by the :func:`distro.id` method. #: #: * Key: Value as defined in the os-release file, translated to lower case, #: with blanks translated to underscores. #: #: * Value: Normalized value. NORMALIZED_OS_ID = { "ol": "oracle", # Oracle Linux "opensuse-leap": "opensuse", # Newer versions of OpenSuSE report as opensuse-leap } #: Translation table for normalizing the "Distributor ID" attribute returned by #: the lsb_release command, for use by the :func:`distro.id` method. #: #: * Key: Value as returned by the lsb_release command, translated to lower #: case, with blanks translated to underscores. #: #: * Value: Normalized value. NORMALIZED_LSB_ID = { "enterpriseenterpriseas": "oracle", # Oracle Enterprise Linux 4 "enterpriseenterpriseserver": "oracle", # Oracle Linux 5 "redhatenterpriseworkstation": "rhel", # RHEL 6, 7 Workstation "redhatenterpriseserver": "rhel", # RHEL 6, 7 Server "redhatenterprisecomputenode": "rhel", # RHEL 6 ComputeNode } #: Translation table for normalizing the distro ID derived from the file name #: of distro release files, for use by the :func:`distro.id` method. #: #: * Key: Value as derived from the file name of a distro release file, #: translated to lower case, with blanks translated to underscores. #: #: * Value: Normalized value. NORMALIZED_DISTRO_ID = { "redhat": "rhel", # RHEL 6.x, 7.x } # Pattern for content of distro release file (reversed) _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile( r"(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)" ) # Pattern for base file name of distro release file _DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$") # Base file names to be looked up for if _UNIXCONFDIR is not readable. _DISTRO_RELEASE_BASENAMES = [ "SuSE-release", "altlinux-release", "arch-release", "base-release", "centos-release", "fedora-release", "gentoo-release", "mageia-release", "mandrake-release", "mandriva-release", "mandrivalinux-release", "manjaro-release", "oracle-release", "redhat-release", "rocky-release", "sl-release", "slackware-version", ] # Base file names to be ignored when searching for distro release file _DISTRO_RELEASE_IGNORE_BASENAMES = ( "debian_version", "lsb-release", "oem-release", _OS_RELEASE_BASENAME, "system-release", "plesk-release", "iredmail-release", "board-release", "ec2_version", ) def linux_distribution(full_distribution_name: bool = True) -> Tuple[str, str, str]: """ .. deprecated:: 1.6.0 :func:`distro.linux_distribution()` is deprecated. It should only be used as a compatibility shim with Python's :py:func:`platform.linux_distribution()`. Please use :func:`distro.id`, :func:`distro.version` and :func:`distro.name` instead. Return information about the current OS distribution as a tuple ``(id_name, version, codename)`` with items as follows: * ``id_name``: If *full_distribution_name* is false, the result of :func:`distro.id`. Otherwise, the result of :func:`distro.name`. * ``version``: The result of :func:`distro.version`. * ``codename``: The extra item (usually in parentheses) after the os-release version number, or the result of :func:`distro.codename`. The interface of this function is compatible with the original :py:func:`platform.linux_distribution` function, supporting a subset of its parameters. The data it returns may not exactly be the same, because it uses more data sources than the original function, and that may lead to different data if the OS distribution is not consistent across multiple data sources it provides (there are indeed such distributions ...). Another reason for differences is the fact that the :func:`distro.id` method normalizes the distro ID string to a reliable machine-readable value for a number of popular OS distributions. """ warnings.warn( "distro.linux_distribution() is deprecated. It should only be used as a " "compatibility shim with Python's platform.linux_distribution(). Please use " "distro.id(), distro.version() and distro.name() instead.", DeprecationWarning, stacklevel=2, ) return _distro.linux_distribution(full_distribution_name) def id() -> str: """ Return the distro ID of the current distribution, as a machine-readable string. For a number of OS distributions, the returned distro ID value is *reliable*, in the sense that it is documented and that it does not change across releases of the distribution. This package maintains the following reliable distro ID values: ============== ========================================= Distro ID Distribution ============== ========================================= "ubuntu" Ubuntu "debian" Debian "rhel" RedHat Enterprise Linux "centos" CentOS "fedora" Fedora "sles" SUSE Linux Enterprise Server "opensuse" openSUSE "amzn" Amazon Linux "arch" Arch Linux "buildroot" Buildroot "cloudlinux" CloudLinux OS "exherbo" Exherbo Linux "gentoo" GenToo Linux "ibm_powerkvm" IBM PowerKVM "kvmibm" KVM for IBM z Systems "linuxmint" Linux Mint "mageia" Mageia "mandriva" Mandriva Linux "parallels" Parallels "pidora" Pidora "raspbian" Raspbian "oracle" Oracle Linux (and Oracle Enterprise Linux) "scientific" Scientific Linux "slackware" Slackware "xenserver" XenServer "openbsd" OpenBSD "netbsd" NetBSD "freebsd" FreeBSD "midnightbsd" MidnightBSD "rocky" Rocky Linux "aix" AIX "guix" Guix System "altlinux" ALT Linux ============== ========================================= If you have a need to get distros for reliable IDs added into this set, or if you find that the :func:`distro.id` function returns a different distro ID for one of the listed distros, please create an issue in the `distro issue tracker`_. **Lookup hierarchy and transformations:** First, the ID is obtained from the following sources, in the specified order. The first available and non-empty value is used: * the value of the "ID" attribute of the os-release file, * the value of the "Distributor ID" attribute returned by the lsb_release command, * the first part of the file name of the distro release file, The so determined ID value then passes the following transformations, before it is returned by this method: * it is translated to lower case, * blanks (which should not be there anyway) are translated to underscores, * a normalization of the ID is performed, based upon `normalization tables`_. The purpose of this normalization is to ensure that the ID is as reliable as possible, even across incompatible changes in the OS distributions. A common reason for an incompatible change is the addition of an os-release file, or the addition of the lsb_release command, with ID values that differ from what was previously determined from the distro release file name. """ return _distro.id() def name(pretty: bool = False) -> str: """ Return the name of the current OS distribution, as a human-readable string. If *pretty* is false, the name is returned without version or codename. (e.g. "CentOS Linux") If *pretty* is true, the version and codename are appended. (e.g. "CentOS Linux 7.1.1503 (Core)") **Lookup hierarchy:** The name is obtained from the following sources, in the specified order. The first available and non-empty value is used: * If *pretty* is false: - the value of the "NAME" attribute of the os-release file, - the value of the "Distributor ID" attribute returned by the lsb_release command, - the value of the "<name>" field of the distro release file. * If *pretty* is true: - the value of the "PRETTY_NAME" attribute of the os-release file, - the value of the "Description" attribute returned by the lsb_release command, - the value of the "<name>" field of the distro release file, appended with the value of the pretty version ("<version_id>" and "<codename>" fields) of the distro release file, if available. """ return _distro.name(pretty) def version(pretty: bool = False, best: bool = False) -> str: """ Return the version of the current OS distribution, as a human-readable string. If *pretty* is false, the version is returned without codename (e.g. "7.0"). If *pretty* is true, the codename in parenthesis is appended, if the codename is non-empty (e.g. "7.0 (Maipo)"). Some distributions provide version numbers with different precisions in the different sources of distribution information. Examining the different sources in a fixed priority order does not always yield the most precise version (e.g. for Debian 8.2, or CentOS 7.1). Some other distributions may not provide this kind of information. In these cases, an empty string would be returned. This behavior can be observed with rolling releases distributions (e.g. Arch Linux). The *best* parameter can be used to control the approach for the returned version: If *best* is false, the first non-empty version number in priority order of the examined sources is returned. If *best* is true, the most precise version number out of all examined sources is returned. **Lookup hierarchy:** In all cases, the version number is obtained from the following sources. If *best* is false, this order represents the priority order: * the value of the "VERSION_ID" attribute of the os-release file, * the value of the "Release" attribute returned by the lsb_release command, * the version number parsed from the "<version_id>" field of the first line of the distro release file, * the version number parsed from the "PRETTY_NAME" attribute of the os-release file, if it follows the format of the distro release files. * the version number parsed from the "Description" attribute returned by the lsb_release command, if it follows the format of the distro release files. """ return _distro.version(pretty, best) def version_parts(best: bool = False) -> Tuple[str, str, str]: """ Return the version of the current OS distribution as a tuple ``(major, minor, build_number)`` with items as follows: * ``major``: The result of :func:`distro.major_version`. * ``minor``: The result of :func:`distro.minor_version`. * ``build_number``: The result of :func:`distro.build_number`. For a description of the *best* parameter, see the :func:`distro.version` method. """ return _distro.version_parts(best) def major_version(best: bool = False) -> str: """ Return the major version of the current OS distribution, as a string, if provided. Otherwise, the empty string is returned. The major version is the first part of the dot-separated version string. For a description of the *best* parameter, see the :func:`distro.version` method. """ return _distro.major_version(best) def minor_version(best: bool = False) -> str: """ Return the minor version of the current OS distribution, as a string, if provided. Otherwise, the empty string is returned. The minor version is the second part of the dot-separated version string. For a description of the *best* parameter, see the :func:`distro.version` method. """ return _distro.minor_version(best) def build_number(best: bool = False) -> str: """ Return the build number of the current OS distribution, as a string, if provided. Otherwise, the empty string is returned. The build number is the third part of the dot-separated version string. For a description of the *best* parameter, see the :func:`distro.version` method. """ return _distro.build_number(best) def like() -> str: """ Return a space-separated list of distro IDs of distributions that are closely related to the current OS distribution in regards to packaging and programming interfaces, for example distributions the current distribution is a derivative from. **Lookup hierarchy:** This information item is only provided by the os-release file. For details, see the description of the "ID_LIKE" attribute in the `os-release man page <http://www.freedesktop.org/software/systemd/man/os-release.html>`_. """ return _distro.like() def codename() -> str: """ Return the codename for the release of the current OS distribution, as a string. If the distribution does not have a codename, an empty string is returned. Note that the returned codename is not always really a codename. For example, openSUSE returns "x86_64". This function does not handle such cases in any special way and just returns the string it finds, if any. **Lookup hierarchy:** * the codename within the "VERSION" attribute of the os-release file, if provided, * the value of the "Codename" attribute returned by the lsb_release command, * the value of the "<codename>" field of the distro release file. """ return _distro.codename() def info(pretty: bool = False, best: bool = False) -> InfoDict: """ Return certain machine-readable information items about the current OS distribution in a dictionary, as shown in the following example: .. sourcecode:: python { 'id': 'rhel', 'version': '7.0', 'version_parts': { 'major': '7', 'minor': '0', 'build_number': '' }, 'like': 'fedora', 'codename': 'Maipo' } The dictionary structure and keys are always the same, regardless of which information items are available in the underlying data sources. The values for the various keys are as follows: * ``id``: The result of :func:`distro.id`. * ``version``: The result of :func:`distro.version`. * ``version_parts -> major``: The result of :func:`distro.major_version`. * ``version_parts -> minor``: The result of :func:`distro.minor_version`. * ``version_parts -> build_number``: The result of :func:`distro.build_number`. * ``like``: The result of :func:`distro.like`. * ``codename``: The result of :func:`distro.codename`. For a description of the *pretty* and *best* parameters, see the :func:`distro.version` method. """ return _distro.info(pretty, best) def os_release_info() -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the os-release file data source of the current OS distribution. See `os-release file`_ for details about these information items. """ return _distro.os_release_info() def lsb_release_info() -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the lsb_release command data source of the current OS distribution. See `lsb_release command output`_ for details about these information items. """ return _distro.lsb_release_info() def distro_release_info() -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the distro release file data source of the current OS distribution. See `distro release file`_ for details about these information items. """ return _distro.distro_release_info() def uname_info() -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the distro release file data source of the current OS distribution. """ return _distro.uname_info() def os_release_attr(attribute: str) -> str: """ Return a single named information item from the os-release file data source of the current OS distribution. Parameters: * ``attribute`` (string): Key of the information item. Returns: * (string): Value of the information item, if the item exists. The empty string, if the item does not exist. See `os-release file`_ for details about these information items. """ return _distro.os_release_attr(attribute) def lsb_release_attr(attribute: str) -> str: """ Return a single named information item from the lsb_release command output data source of the current OS distribution. Parameters: * ``attribute`` (string): Key of the information item. Returns: * (string): Value of the information item, if the item exists. The empty string, if the item does not exist. See `lsb_release command output`_ for details about these information items. """ return _distro.lsb_release_attr(attribute) def distro_release_attr(attribute: str) -> str: """ Return a single named information item from the distro release file data source of the current OS distribution. Parameters: * ``attribute`` (string): Key of the information item. Returns: * (string): Value of the information item, if the item exists. The empty string, if the item does not exist. See `distro release file`_ for details about these information items. """ return _distro.distro_release_attr(attribute) def uname_attr(attribute: str) -> str: """ Return a single named information item from the distro release file data source of the current OS distribution. Parameters: * ``attribute`` (string): Key of the information item. Returns: * (string): Value of the information item, if the item exists. The empty string, if the item does not exist. """ return _distro.uname_attr(attribute) try: from functools import cached_property except ImportError: # Python < 3.8 class cached_property: # type: ignore """A version of @property which caches the value. On access, it calls the underlying function and sets the value in `__dict__` so future accesses will not re-call the property. """ def __init__(self, f: Callable[[Any], Any]) -> None: self._fname = f.__name__ self._f = f def __get__(self, obj: Any, owner: Type[Any]) -> Any: assert obj is not None, f"call {self._fname} on an instance" ret = obj.__dict__[self._fname] = self._f(obj) return ret
InfoDict
python
mkdocs__mkdocs
mkdocs/exceptions.py
{ "start": 77, "end": 283 }
class ____(ClickException): """ The base class which all MkDocs exceptions inherit from. This should not be raised directly. One of the subclasses should be raised instead. """
MkDocsException
python
automl__auto-sklearn
test/test_pipeline/implementations/test_MinorityCoalescer.py
{ "start": 144, "end": 3147 }
class ____(unittest.TestCase): @property def X1(self): # Generates an array with categories 3, 4, 5, 6, 7 and occurences of 30%, # 30%, 30%, 5% and 5% respectively X = np.vstack( ( np.ones((30, 10)) * 3, np.ones((30, 10)) * 4, np.ones((30, 10)) * 5, np.ones((5, 10)) * 6, np.ones((5, 10)) * 7, ) ) for col in range(X.shape[1]): np.random.shuffle(X[:, col]) return X @property def X2(self): # Generates an array with categories 3, 4, 5, 6, 7 and occurences of 5%, # 5%, 5%, 35% and 50% respectively X = np.vstack( ( np.ones((5, 10)) * 3, np.ones((5, 10)) * 4, np.ones((5, 10)) * 5, np.ones((35, 10)) * 6, np.ones((50, 10)) * 7, ) ) for col in range(X.shape[1]): np.random.shuffle(X[:, col]) return X def test_default(self): X = self.X1 X_copy = np.copy(X) Y = MinorityCoalescer().fit_transform(X) np.testing.assert_array_almost_equal(Y, X_copy) # Assert no copies were made self.assertEqual(id(X), id(Y)) def test_coalesce_10_percent(self): X = self.X1 Y = MinorityCoalescer(minimum_fraction=0.1).fit_transform(X) for col in range(Y.shape[1]): hist = np.histogram(Y[:, col], bins=np.arange(1, 7)) np.testing.assert_array_almost_equal(hist[0], [10, 0, 30, 30, 30]) # Assert no copies were made self.assertEqual(id(X), id(Y)) def test_coalesce_10_percent_sparse(self): X = scipy.sparse.csc_matrix(self.X1) Y = MinorityCoalescer(minimum_fraction=0.1).fit_transform(X) # Assert no copies were made self.assertEqual(id(X), id(Y)) Y = Y.todense() for col in range(Y.shape[1]): hist = np.histogram(Y[:, col], bins=np.arange(1, 7)) np.testing.assert_array_almost_equal(hist[0], [10, 0, 30, 30, 30]) def test_invalid_X(self): X = self.X1 - 2 with self.assertRaises(ValueError): MinorityCoalescer().fit_transform(X) def test_transform_after_fit(self): # On both X_fit and X_transf, the categories 3, 4, 5, 6, 7 are present. X_fit = self.X1 # Here categories 3, 4, 5 have ocurrence above 10% X_transf = self.X2 # Here it is the opposite, just categs 6 and 7 are above 10% mc = MinorityCoalescer(minimum_fraction=0.1).fit(X_fit) # transform() should coalesce categories as learned during fit. # Category distribution in X_transf should be irrelevant. Y = mc.transform(X_transf) for col in range(Y.shape[1]): hist = np.histogram(Y[:, col], bins=np.arange(1, 7)) np.testing.assert_array_almost_equal(hist[0], [85, 0, 5, 5, 5])
MinorityCoalescerTest
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 183983, "end": 184490 }
class ____(sgqlc.types.Input): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("pull_request_id", "expected_head_oid", "client_mutation_id") pull_request_id = sgqlc.types.Field( sgqlc.types.non_null(ID), graphql_name="pullRequestId" ) expected_head_oid = sgqlc.types.Field(GitObjectID, graphql_name="expectedHeadOid") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
UpdatePullRequestBranchInput
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/build_systems/perl.py
{ "start": 327, "end": 638 }
class ____(PackageBase): """Specialized class for packages that are built using Perl.""" build_system_class = "PerlPackage" default_buildsystem = "perl" build_system("perl") extends("perl", when="build_system=perl") def test_use(self): pass @register_builder("perl")
PerlPackage
python
streamlit__streamlit
lib/streamlit/elements/text.py
{ "start": 1127, "end": 4004 }
class ____: @gather_metrics("text") def text( self, body: SupportsStr, *, # keyword-only arguments: help: str | None = None, width: Width = "content", text_alignment: TextAlignment = "left", ) -> DeltaGenerator: r"""Write text without Markdown or HTML parsing. For monospace text, use |st.code|_. .. |st.code| replace:: ``st.code`` .. _st.code: https://docs.streamlit.io/develop/api-reference/text/st.code Parameters ---------- body : str The string to display. help : str or None A tooltip that gets displayed next to the text. If this is ``None`` (default), no tooltip is displayed. The tooltip can optionally contain GitHub-flavored Markdown, including the Markdown directives described in the ``body`` parameter of ``st.markdown``. width : "content", "stretch", or int The width of the text element. This can be one of the following: - ``"content"`` (default): The width of the element matches the width of its content, but doesn't exceed the width of the parent container. - ``"stretch"``: The width of the element matches the width of the parent container. - An integer specifying the width in pixels: The element has a fixed width. If the specified width is greater than the width of the parent container, the width of the element matches the width of the parent container. text_alignment : "left", "center", "right", or "justify" The horizontal alignment of the text within the element. This can be one of the following: - ``"left"`` (default): Text is aligned to the left edge. - ``"center"``: Text is centered. - ``"right"``: Text is aligned to the right edge. - ``"justify"``: Text is justified (stretched to align on both left and right edges, with the last line left-aligned). Example ------- >>> import streamlit as st >>> >>> st.text("This is text\n[and more text](that's not a Markdown link).") .. output :: https://doc-text.streamlit.app/ height: 220px """ text_proto = TextProto() text_proto.body = clean_text(body) if help: text_proto.help = help validate_width(width, allow_content=True) layout_config = LayoutConfig(width=width, text_alignment=text_alignment) return self.dg._enqueue("text", text_proto, layout_config=layout_config) @property def dg(self) -> DeltaGenerator: """Get our DeltaGenerator.""" return cast("DeltaGenerator", self)
TextMixin
python
python-poetry__poetry
src/poetry/console/commands/search.py
{ "start": 256, "end": 1616 }
class ____(Command): name = "search" description = "Searches for packages on remote repositories." arguments: ClassVar[list[Argument]] = [ argument("tokens", "The tokens to search for.", multiple=True) ] def handle(self) -> int: seen = set() table = self.table(style="compact") table.set_headers( ["<b>Package</>", "<b>Version</>", "<b>Source</>", "<b>Description</>"] ) rows = [] for repository in self.poetry.pool.repositories: for result in repository.search(self.argument("tokens")): key = f"{repository.name}::{result.pretty_string}" if key in seen: continue seen.add(key) rows.append((result, repository.name)) if not rows: self.line("<info>No matching packages were found.</>") return 0 for package, source in sorted( rows, key=lambda x: (x[0].name, x[0].version, x[1]) ): table.add_row( [ f"<c1>{package.name}</>", f"<b>{package.version}</b>", f"<fg=yellow;options=bold>{source}</>", str(package.description), ] ) table.render() return 0
SearchCommand
python
pytorch__pytorch
test/test_cuda_nvml_based_avail.py
{ "start": 1449, "end": 3715 }
class ____(TestCase): SUBPROCESS_REMINDER_MSG = ( "\n REMINDER: Tests defined in test_cuda_nvml_based_avail.py must be run in a process " "where there CUDA Driver API has not been initialized. Before further debugging, ensure you are either using " "run_test.py or have added --subprocess to run each test in a different subprocess." ) def setUp(self): super().setUp() torch.cuda._cached_device_count = ( None # clear the lru_cache on this method before our test ) @staticmethod def in_bad_fork_test() -> bool: _ = torch.cuda.is_available() return torch.cuda._is_in_bad_fork() # These tests validate the behavior and activation of the weaker, NVML-based, user-requested # `torch.cuda.is_available()` assessment. The NVML-based assessment should be attempted when # `PYTORCH_NVML_BASED_CUDA_CHECK` is set to 1, reverting to the default CUDA Runtime API check otherwise. # If the NVML-based assessment is attempted but fails, the CUDA Runtime API check should be executed @unittest.skipIf(IS_WINDOWS, "Needs fork") @parametrize("nvml_avail", [True, False]) @parametrize("avoid_init", ["1", "0", None]) def test_cuda_is_available(self, avoid_init, nvml_avail): if IS_JETSON and nvml_avail and avoid_init == "1": self.skipTest("Not working for Jetson") patch_env = {"PYTORCH_NVML_BASED_CUDA_CHECK": avoid_init} if avoid_init else {} with patch.dict(os.environ, **patch_env): if nvml_avail: _ = torch.cuda.is_available() else: with patch.object(torch.cuda, "_device_count_nvml", return_value=-1): _ = torch.cuda.is_available() with multiprocessing.get_context("fork").Pool(1) as pool: in_bad_fork = pool.apply(TestExtendedCUDAIsAvail.in_bad_fork_test) if os.getenv("PYTORCH_NVML_BASED_CUDA_CHECK") == "1" and nvml_avail: self.assertFalse( in_bad_fork, TestExtendedCUDAIsAvail.SUBPROCESS_REMINDER_MSG ) else: assert in_bad_fork @torch.testing._internal.common_utils.markDynamoStrictTest
TestExtendedCUDAIsAvail
python
scipy__scipy
scipy/spatial/distance.py
{ "start": 47330, "end": 91845 }
class ____: # Name of python distance function canonical_name: str # All aliases, including canonical_name aka: set[str] # unvectorized distance function dist_func: Callable # Optimized cdist function cdist_func: Callable # Optimized pdist function pdist_func: Callable # function that checks kwargs and computes default values: # f(X, m, n, **kwargs) validator: Callable | None = None # list of supported types: # X (pdist) and XA (cdist) are used to choose the type. if there is no # match the first type is used. Default double types: list[str] = dataclasses.field(default_factory=lambda: ['double']) # true if out array must be C-contiguous requires_contiguous_out: bool = True # Registry of implemented metrics: _METRIC_INFOS = [ MetricInfo( canonical_name='braycurtis', aka={'braycurtis'}, dist_func=braycurtis, cdist_func=_distance_pybind.cdist_braycurtis, pdist_func=_distance_pybind.pdist_braycurtis, ), MetricInfo( canonical_name='canberra', aka={'canberra'}, dist_func=canberra, cdist_func=_distance_pybind.cdist_canberra, pdist_func=_distance_pybind.pdist_canberra, ), MetricInfo( canonical_name='chebyshev', aka={'chebychev', 'chebyshev', 'cheby', 'cheb', 'ch'}, dist_func=chebyshev, cdist_func=_distance_pybind.cdist_chebyshev, pdist_func=_distance_pybind.pdist_chebyshev, ), MetricInfo( canonical_name='cityblock', aka={'cityblock', 'cblock', 'cb', 'c'}, dist_func=cityblock, cdist_func=_distance_pybind.cdist_cityblock, pdist_func=_distance_pybind.pdist_cityblock, ), MetricInfo( canonical_name='correlation', aka={'correlation', 'co'}, dist_func=correlation, cdist_func=CDistMetricWrapper('correlation'), pdist_func=PDistMetricWrapper('correlation'), ), MetricInfo( canonical_name='cosine', aka={'cosine', 'cos'}, dist_func=cosine, cdist_func=CDistMetricWrapper('cosine'), pdist_func=PDistMetricWrapper('cosine'), ), MetricInfo( canonical_name='dice', aka={'dice'}, types=['bool'], dist_func=dice, cdist_func=_distance_pybind.cdist_dice, pdist_func=_distance_pybind.pdist_dice, ), MetricInfo( canonical_name='euclidean', aka={'euclidean', 'euclid', 'eu', 'e'}, dist_func=euclidean, cdist_func=_distance_pybind.cdist_euclidean, pdist_func=_distance_pybind.pdist_euclidean, ), MetricInfo( canonical_name='hamming', aka={'matching', 'hamming', 'hamm', 'ha', 'h'}, types=['double', 'bool'], validator=_validate_hamming_kwargs, dist_func=hamming, cdist_func=_distance_pybind.cdist_hamming, pdist_func=_distance_pybind.pdist_hamming, ), MetricInfo( canonical_name='jaccard', aka={'jaccard', 'jacc', 'ja', 'j'}, types=['double', 'bool'], dist_func=jaccard, cdist_func=_distance_pybind.cdist_jaccard, pdist_func=_distance_pybind.pdist_jaccard, ), MetricInfo( canonical_name='jensenshannon', aka={'jensenshannon', 'js'}, dist_func=jensenshannon, cdist_func=CDistMetricWrapper('jensenshannon'), pdist_func=PDistMetricWrapper('jensenshannon'), ), MetricInfo( canonical_name='mahalanobis', aka={'mahalanobis', 'mahal', 'mah'}, validator=_validate_mahalanobis_kwargs, dist_func=mahalanobis, cdist_func=CDistMetricWrapper('mahalanobis'), pdist_func=PDistMetricWrapper('mahalanobis'), ), MetricInfo( canonical_name='minkowski', aka={'minkowski', 'mi', 'm', 'pnorm'}, validator=_validate_minkowski_kwargs, dist_func=minkowski, cdist_func=_distance_pybind.cdist_minkowski, pdist_func=_distance_pybind.pdist_minkowski, ), MetricInfo( canonical_name='rogerstanimoto', aka={'rogerstanimoto'}, types=['bool'], dist_func=rogerstanimoto, cdist_func=_distance_pybind.cdist_rogerstanimoto, pdist_func=_distance_pybind.pdist_rogerstanimoto, ), MetricInfo( canonical_name='russellrao', aka={'russellrao'}, types=['bool'], dist_func=russellrao, cdist_func=_distance_pybind.cdist_russellrao, pdist_func=_distance_pybind.pdist_russellrao, ), MetricInfo( canonical_name='seuclidean', aka={'seuclidean', 'se', 's'}, validator=_validate_seuclidean_kwargs, dist_func=seuclidean, cdist_func=CDistMetricWrapper('seuclidean'), pdist_func=PDistMetricWrapper('seuclidean'), ), MetricInfo( canonical_name='sokalsneath', aka={'sokalsneath'}, types=['bool'], dist_func=sokalsneath, cdist_func=_distance_pybind.cdist_sokalsneath, pdist_func=_distance_pybind.pdist_sokalsneath, ), MetricInfo( canonical_name='sqeuclidean', aka={'sqeuclidean', 'sqe', 'sqeuclid'}, dist_func=sqeuclidean, cdist_func=_distance_pybind.cdist_sqeuclidean, pdist_func=_distance_pybind.pdist_sqeuclidean, ), MetricInfo( canonical_name='yule', aka={'yule'}, types=['bool'], dist_func=yule, cdist_func=_distance_pybind.cdist_yule, pdist_func=_distance_pybind.pdist_yule, ), ] _METRICS = {info.canonical_name: info for info in _METRIC_INFOS} _METRIC_ALIAS = {alias: info for info in _METRIC_INFOS for alias in info.aka} _METRICS_NAMES = list(_METRICS.keys()) _TEST_METRICS = {'test_' + info.canonical_name: info for info in _METRIC_INFOS} def pdist(X, metric='euclidean', *, out=None, **kwargs): """ Pairwise distances between observations in n-dimensional space. See Notes for common calling conventions. Parameters ---------- X : array_like An m by n array of m original observations in an n-dimensional space. metric : str or function, optional The distance metric to use. The distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'jensenshannon', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalsneath', 'sqeuclidean', 'yule'. out : ndarray, optional The output array. If not None, condensed distance matrix Y is stored in this array. **kwargs : dict, optional Extra arguments to `metric`: refer to each metric documentation for a list of all possible arguments. Some possible arguments: p : scalar The p-norm to apply for Minkowski, weighted and unweighted. Default: 2. w : ndarray The weight vector for metrics that support weights (e.g., Minkowski). V : ndarray The variance vector for standardized Euclidean. Default: var(X, axis=0, ddof=1) VI : ndarray The inverse of the covariance matrix for Mahalanobis. Default: inv(cov(X.T)).T Returns ------- Y : ndarray Returns a condensed distance matrix Y. For each :math:`i` and :math:`j` (where :math:`i<j<m`),where m is the number of original observations. The metric ``dist(u=X[i], v=X[j])`` is computed and stored in entry ``m * i + j - ((i + 2) * (i + 1)) // 2``. See Also -------- squareform : converts between condensed distance matrices and square distance matrices. Notes ----- See ``squareform`` for information on how to calculate the index of this entry or to convert the condensed distance matrix to a redundant square matrix. The following are common calling conventions. 1. ``Y = pdist(X, 'euclidean')`` Computes the distance between m points using Euclidean distance (2-norm) as the distance metric between the points. The points are arranged as m n-dimensional row vectors in the matrix X. 2. ``Y = pdist(X, 'minkowski', p=2.)`` Computes the distances using the Minkowski distance :math:`\\|u-v\\|_p` (:math:`p`-norm) where :math:`p > 0` (note that this is only a quasi-metric if :math:`0 < p < 1`). 3. ``Y = pdist(X, 'cityblock')`` Computes the city block or Manhattan distance between the points. 4. ``Y = pdist(X, 'seuclidean', V=None)`` Computes the standardized Euclidean distance. The standardized Euclidean distance between two n-vectors ``u`` and ``v`` is .. math:: \\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}} V is the variance vector; V[i] is the variance computed over all the i'th components of the points. If not passed, it is automatically computed. 5. ``Y = pdist(X, 'sqeuclidean')`` Computes the squared Euclidean distance :math:`\\|u-v\\|_2^2` between the vectors. 6. ``Y = pdist(X, 'cosine')`` Computes the cosine distance between vectors u and v, .. math:: 1 - \\frac{u \\cdot v} {{\\|u\\|}_2 {\\|v\\|}_2} where :math:`\\|*\\|_2` is the 2-norm of its argument ``*``, and :math:`u \\cdot v` is the dot product of ``u`` and ``v``. 7. ``Y = pdist(X, 'correlation')`` Computes the correlation distance between vectors u and v. This is .. math:: 1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})} {{\\|(u - \\bar{u})\\|}_2 {\\|(v - \\bar{v})\\|}_2} where :math:`\\bar{v}` is the mean of the elements of vector v, and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`. 8. ``Y = pdist(X, 'hamming')`` Computes the normalized Hamming distance, or the proportion of those vector elements between two n-vectors ``u`` and ``v`` which disagree. To save memory, the matrix ``X`` can be of type boolean. 9. ``Y = pdist(X, 'jaccard')`` Computes the Jaccard distance between the points. Given two vectors, ``u`` and ``v``, the Jaccard distance is the proportion of those elements ``u[i]`` and ``v[i]`` that disagree. 10. ``Y = pdist(X, 'jensenshannon')`` Computes the Jensen-Shannon distance between two probability arrays. Given two probability vectors, :math:`p` and :math:`q`, the Jensen-Shannon distance is .. math:: \\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}} where :math:`m` is the pointwise mean of :math:`p` and :math:`q` and :math:`D` is the Kullback-Leibler divergence. 11. ``Y = pdist(X, 'chebyshev')`` Computes the Chebyshev distance between the points. The Chebyshev distance between two n-vectors ``u`` and ``v`` is the maximum norm-1 distance between their respective elements. More precisely, the distance is given by .. math:: d(u,v) = \\max_i {|u_i-v_i|} 12. ``Y = pdist(X, 'canberra')`` Computes the Canberra distance between the points. The Canberra distance between two points ``u`` and ``v`` is .. math:: d(u,v) = \\sum_i \\frac{|u_i-v_i|} {|u_i|+|v_i|} 13. ``Y = pdist(X, 'braycurtis')`` Computes the Bray-Curtis distance between the points. The Bray-Curtis distance between two points ``u`` and ``v`` is .. math:: d(u,v) = \\frac{\\sum_i {|u_i-v_i|}} {\\sum_i {|u_i+v_i|}} 14. ``Y = pdist(X, 'mahalanobis', VI=None)`` Computes the Mahalanobis distance between the points. The Mahalanobis distance between two points ``u`` and ``v`` is :math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI`` variable) is the inverse covariance. If ``VI`` is not None, ``VI`` will be used as the inverse covariance matrix. 15. ``Y = pdist(X, 'yule')`` Computes the Yule distance between each pair of boolean vectors. (see yule function documentation) 16. ``Y = pdist(X, 'matching')`` Synonym for 'hamming'. 17. ``Y = pdist(X, 'dice')`` Computes the Dice distance between each pair of boolean vectors. (see dice function documentation) 18. ``Y = pdist(X, 'rogerstanimoto')`` Computes the Rogers-Tanimoto distance between each pair of boolean vectors. (see rogerstanimoto function documentation) 19. ``Y = pdist(X, 'russellrao')`` Computes the Russell-Rao distance between each pair of boolean vectors. (see russellrao function documentation) 20. ``Y = pdist(X, 'sokalsneath')`` Computes the Sokal-Sneath distance between each pair of boolean vectors. (see sokalsneath function documentation) 21. ``Y = pdist(X, f)`` Computes the distance between all pairs of vectors in X using the user supplied 2-arity function f. For example, Euclidean distance between the vectors could be computed as follows:: dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum())) Note that you should avoid passing a reference to one of the distance functions defined in this library. For example,:: dm = pdist(X, sokalsneath) would calculate the pair-wise distances between the vectors in X using the Python function sokalsneath. This would result in sokalsneath being called :math:`{n \\choose 2}` times, which is inefficient. Instead, the optimized C version is more efficient, and we call it using the following syntax.:: dm = pdist(X, 'sokalsneath') Examples -------- >>> import numpy as np >>> from scipy.spatial.distance import pdist ``x`` is an array of five points in three-dimensional space. >>> x = np.array([[2, 0, 2], [2, 2, 3], [-2, 4, 5], [0, 1, 9], [2, 2, 4]]) ``pdist(x)`` with no additional arguments computes the 10 pairwise Euclidean distances: >>> pdist(x) array([2.23606798, 6.40312424, 7.34846923, 2.82842712, 4.89897949, 6.40312424, 1. , 5.38516481, 4.58257569, 5.47722558]) The following computes the pairwise Minkowski distances with ``p = 3.5``: >>> pdist(x, metric='minkowski', p=3.5) array([2.04898923, 5.1154929 , 7.02700737, 2.43802731, 4.19042714, 6.03956994, 1. , 4.45128103, 4.10636143, 5.0619695 ]) The pairwise city block or Manhattan distances: >>> pdist(x, metric='cityblock') array([ 3., 11., 10., 4., 8., 9., 1., 9., 7., 8.]) """ # You can also call this as: # Y = pdist(X, 'test_abc') # where 'abc' is the metric being tested. This computes the distance # between all pairs of vectors in X using the distance metric 'abc' but # with a more succinct, verifiable, but less efficient implementation. X = _asarray(X) if X.ndim != 2: raise ValueError('A 2-dimensional array must be passed. ' f'(Shape was {X.shape}).') n = X.shape[0] return xpx.lazy_apply(_np_pdist, X, out, # lazy_apply doesn't support Array kwargs kwargs.pop('w', None), kwargs.pop('V', None), kwargs.pop('VI', None), # See src/distance_pybind.cpp::pdist shape=((n * (n - 1)) // 2, ), dtype=X.dtype, as_numpy=True, metric=metric, **kwargs) def _np_pdist(X, out, w, V, VI, metric='euclidean', **kwargs): X = _asarray_validated(X, sparse_ok=False, objects_ok=True, mask_ok=True, check_finite=False) m, n = X.shape if w is not None: kwargs["w"] = w if V is not None: kwargs["V"] = V if VI is not None: kwargs["VI"] = VI if callable(metric): mstr = getattr(metric, '__name__', 'UnknownCustomMetric') metric_info = _METRIC_ALIAS.get(mstr, None) if metric_info is not None: X, typ, kwargs = _validate_pdist_input( X, m, n, metric_info, **kwargs) return _pdist_callable(X, metric=metric, out=out, **kwargs) elif isinstance(metric, str): mstr = metric.lower() metric_info = _METRIC_ALIAS.get(mstr, None) if metric_info is not None: pdist_fn = metric_info.pdist_func return pdist_fn(X, out=out, **kwargs) elif mstr.startswith("test_"): metric_info = _TEST_METRICS.get(mstr, None) if metric_info is None: raise ValueError(f'Unknown "Test" Distance Metric: {mstr[5:]}') X, typ, kwargs = _validate_pdist_input( X, m, n, metric_info, **kwargs) return _pdist_callable( X, metric=metric_info.dist_func, out=out, **kwargs) else: raise ValueError(f'Unknown Distance Metric: {mstr}') else: raise TypeError('2nd argument metric must be a string identifier ' 'or a function.') def squareform(X, force="no", checks=True): """ Convert a vector-form distance vector to a square-form distance matrix, and vice-versa. Parameters ---------- X : array_like Either a condensed or redundant distance matrix. force : str, optional As with MATLAB(TM), if force is equal to ``'tovector'`` or ``'tomatrix'``, the input will be treated as a distance matrix or distance vector respectively. checks : bool, optional If set to False, no checks will be made for matrix symmetry nor zero diagonals. This is useful if it is known that ``X - X.T1`` is small and ``diag(X)`` is close to zero. These values are ignored any way so they do not disrupt the squareform transformation. Returns ------- Y : ndarray If a condensed distance matrix is passed, a redundant one is returned, or if a redundant one is passed, a condensed distance matrix is returned. Notes ----- 1. ``v = squareform(X)`` Given a square n-by-n symmetric distance matrix ``X``, ``v = squareform(X)`` returns an ``n * (n-1) / 2`` (i.e. binomial coefficient n choose 2) sized vector `v` where :math:`v[{n \\choose 2} - {n-i \\choose 2} + (j-i-1)]` is the distance between distinct points ``i`` and ``j``. If ``X`` is non-square or asymmetric, an error is raised. 2. ``X = squareform(v)`` Given an ``n * (n-1) / 2`` sized vector ``v`` for some integer ``n >= 1`` encoding distances as described, ``X = squareform(v)`` returns an n-by-n distance matrix ``X``. The ``X[i, j]`` and ``X[j, i]`` values are set to :math:`v[{n \\choose 2} - {n-i \\choose 2} + (j-i-1)]` and all diagonal elements are zero. In SciPy 0.19.0, ``squareform`` stopped casting all input types to float64, and started returning arrays of the same dtype as the input. Examples -------- >>> import numpy as np >>> from scipy.spatial.distance import pdist, squareform ``x`` is an array of five points in three-dimensional space. >>> x = np.array([[2, 0, 2], [2, 2, 3], [-2, 4, 5], [0, 1, 9], [2, 2, 4]]) ``pdist(x)`` computes the Euclidean distances between each pair of points in ``x``. The distances are returned in a one-dimensional array with length ``5*(5 - 1)/2 = 10``. >>> distvec = pdist(x) >>> distvec array([2.23606798, 6.40312424, 7.34846923, 2.82842712, 4.89897949, 6.40312424, 1. , 5.38516481, 4.58257569, 5.47722558]) ``squareform(distvec)`` returns the 5x5 distance matrix. >>> m = squareform(distvec) >>> m array([[0. , 2.23606798, 6.40312424, 7.34846923, 2.82842712], [2.23606798, 0. , 4.89897949, 6.40312424, 1. ], [6.40312424, 4.89897949, 0. , 5.38516481, 4.58257569], [7.34846923, 6.40312424, 5.38516481, 0. , 5.47722558], [2.82842712, 1. , 4.58257569, 5.47722558, 0. ]]) When given a square distance matrix ``m``, ``squareform(m)`` returns the one-dimensional condensed distance vector associated with the matrix. In this case, we recover ``distvec``. >>> squareform(m) array([2.23606798, 6.40312424, 7.34846923, 2.82842712, 4.89897949, 6.40312424, 1. , 5.38516481, 4.58257569, 5.47722558]) """ X = np.ascontiguousarray(X) s = X.shape if force.lower() == 'tomatrix': if len(s) != 1: raise ValueError("Forcing 'tomatrix' but input X is not a " "distance vector.") elif force.lower() == 'tovector': if len(s) != 2: raise ValueError("Forcing 'tovector' but input X is not a " "distance matrix.") # X = squareform(v) if len(s) == 1: if s[0] == 0: return np.zeros((1, 1), dtype=X.dtype) # Grab the closest value to the square root of the number # of elements times 2 to see if the number of elements # is indeed a binomial coefficient. d = int(np.ceil(np.sqrt(s[0] * 2))) # Check that v is of valid dimensions. if d * (d - 1) != s[0] * 2: raise ValueError('Incompatible vector size. It must be a binomial ' 'coefficient n choose 2 for some integer n >= 2.') # Allocate memory for the distance matrix. M = np.zeros((d, d), dtype=X.dtype) # Since the C code does not support striding using strides. # The dimensions are used instead. X = _copy_array_if_base_present(X) # Fill in the values of the distance matrix. _distance_wrap.to_squareform_from_vector_wrap(M, X) # Return the distance matrix. return M elif len(s) == 2: if s[0] != s[1]: raise ValueError('The matrix argument must be square.') if checks: is_valid_dm(X, throw=True, name='X') # One-side of the dimensions is set here. d = s[0] if d <= 1: return np.array([], dtype=X.dtype) # Create a vector. v = np.zeros((d * (d - 1)) // 2, dtype=X.dtype) # Since the C code does not support striding using strides. # The dimensions are used instead. X = _copy_array_if_base_present(X) # Convert the vector to squareform. _distance_wrap.to_vector_from_squareform_wrap(X, v) return v else: raise ValueError("The first argument must be one or two dimensional " f"array. A {len(s)}-dimensional array is not permitted") def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False): """ Return True if input array satisfies basic distance matrix properties (symmetry and zero diagonal). This function checks whether the input is a 2-dimensional square NumPy array with a zero diagonal and symmetry within a specified tolerance. These are necessary properties for a distance matrix but not sufficient -- in particular, this function does **not** check the triangle inequality, which is required for a true metric distance matrix. The triangle inequality states that for any three points ``i``, ``j``, and ``k``: ``D[i,k] <= D[i,j] + D[j,k]`` Parameters ---------- D : array_like The candidate object to test for basic distance matrix properties. tol : float, optional The distance matrix is considered symmetric if the absolute difference between entries ``ij`` and ``ji`` is less than or equal to `tol`. The same tolerance is used to determine whether diagonal entries are effectively zero. throw : bool, optional If True, raises an exception when the input is invalid. name : str, optional The name of the variable to check. This is used in exception messages when `throw` is True to identify the offending variable. warning : bool, optional If True, a warning message is raised instead of throwing an exception. Returns ------- valid : bool True if the input satisfies the symmetry and zero-diagonal conditions. Raises ------ ValueError If `throw` is True and `D` is not a valid distance matrix. UserWarning If `warning` is True and `D` is not a valid distance matrix. Notes ----- This function does not check the triangle inequality, which is required for a complete validation of a metric distance matrix. Only structural properties (symmetry and zero diagonal) are verified. Small numerical deviations from symmetry or exact zero diagonal are tolerated within the `tol` parameter. Examples -------- >>> import numpy as np >>> from scipy.spatial.distance import is_valid_dm This matrix is a valid distance matrix. >>> d = np.array([[0.0, 1.1, 1.2, 1.3], ... [1.1, 0.0, 1.0, 1.4], ... [1.2, 1.0, 0.0, 1.5], ... [1.3, 1.4, 1.5, 0.0]]) >>> is_valid_dm(d) True In the following examples, the input is not a valid distance matrix. Not square: >>> is_valid_dm([[0, 2, 2], [2, 0, 2]]) False Nonzero diagonal element: >>> is_valid_dm([[0, 1, 1], [1, 2, 3], [1, 3, 0]]) False Not symmetric: >>> is_valid_dm([[0, 1, 3], [2, 0, 1], [3, 1, 0]]) False """ D = np.asarray(D, order='c') valid = True try: s = D.shape if len(D.shape) != 2: if name: raise ValueError(f"Distance matrix '{name}' must have shape=2 " "(i.e. be two-dimensional).") else: raise ValueError('Distance matrix must have shape=2 (i.e. ' 'be two-dimensional).') if tol == 0.0: if not (D == D.T).all(): if name: raise ValueError(f"Distance matrix '{name}' must be symmetric.") else: raise ValueError('Distance matrix must be symmetric.') if not (D[range(0, s[0]), range(0, s[0])] == 0).all(): if name: raise ValueError(f"Distance matrix '{name}' diagonal must be zero.") else: raise ValueError('Distance matrix diagonal must be zero.') else: if not (D - D.T <= tol).all(): if name: raise ValueError(f'Distance matrix \'{name}\' must be ' f'symmetric within tolerance {tol:5.5f}.') else: raise ValueError('Distance matrix must be symmetric within ' f'tolerance {tol:5.5f}.') if not (D[range(0, s[0]), range(0, s[0])] <= tol).all(): if name: raise ValueError(f'Distance matrix \'{name}\' diagonal must be ' f'close to zero within tolerance {tol:5.5f}.') else: raise ValueError(('Distance matrix \'{}\' diagonal must be close ' 'to zero within tolerance {:5.5f}.').format(*tol)) except Exception as e: if throw: raise if warning: warnings.warn(str(e), stacklevel=2) valid = False return valid def is_valid_y(y, warning=False, throw=False, name=None): """ Return True if the input array is a valid condensed distance matrix. Condensed distance matrices must be 1-dimensional numpy arrays. Their length must be a binomial coefficient :math:`{n \\choose 2}` for some positive integer n. Parameters ---------- y : array_like The condensed distance matrix. warning : bool, optional Invokes a warning if the variable passed is not a valid condensed distance matrix. The warning message explains why the distance matrix is not valid. `name` is used when referencing the offending variable. throw : bool, optional Throws an exception if the variable passed is not a valid condensed distance matrix. name : str, optional Used when referencing the offending variable in the warning or exception message. Returns ------- bool True if the input array is a valid condensed distance matrix, False otherwise. Examples -------- >>> from scipy.spatial.distance import is_valid_y This vector is a valid condensed distance matrix. The length is 6, which corresponds to ``n = 4``, since ``4*(4 - 1)/2`` is 6. >>> v = [1.0, 1.2, 1.0, 0.5, 1.3, 0.9] >>> is_valid_y(v) True An input vector with length, say, 7, is not a valid condensed distance matrix. >>> is_valid_y([1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7]) False """ y = _asarray(y) name_str = f"'{name}' " if name else "" try: if len(y.shape) != 1: raise ValueError(f"Condensed distance matrix {name_str}must " "have shape=1 (i.e. be one-dimensional).") n = y.shape[0] d = int(np.ceil(np.sqrt(n * 2))) if (d * (d - 1) / 2) != n: raise ValueError(f"Length n of condensed distance matrix {name_str}" "must be a binomial coefficient, i.e. " "there must be a k such that (k \\choose 2)=n)!") except Exception as e: if throw: raise if warning: warnings.warn(str(e), stacklevel=2) return False return True def num_obs_dm(d): """ Return the number of original observations that correspond to a square, redundant distance matrix. Parameters ---------- d : array_like The target distance matrix. Returns ------- num_obs_dm : int The number of observations in the redundant distance matrix. Examples -------- Find the number of original observations corresponding to a square redundant distance matrix d. >>> from scipy.spatial.distance import num_obs_dm >>> d = [[0, 100, 200], [100, 0, 150], [200, 150, 0]] >>> num_obs_dm(d) 3 """ d = np.asarray(d, order='c') is_valid_dm(d, tol=np.inf, throw=True, name='d') return d.shape[0] def num_obs_y(Y): """ Return the number of original observations that correspond to a condensed distance matrix. Parameters ---------- Y : array_like Condensed distance matrix. Returns ------- n : int The number of observations in the condensed distance matrix `Y`. Examples -------- Find the number of original observations corresponding to a condensed distance matrix Y. >>> from scipy.spatial.distance import num_obs_y >>> Y = [1, 2, 3.5, 7, 10, 4] >>> num_obs_y(Y) 4 """ Y = _asarray(Y) is_valid_y(Y, throw=True, name='Y') k = Y.shape[0] if k == 0: raise ValueError("The number of observations cannot be determined on " "an empty distance matrix.") d = int(np.ceil(np.sqrt(k * 2))) if (d * (d - 1) / 2) != k: raise ValueError("Invalid condensed distance matrix passed. Must be " "some k where k=(n choose 2) for some n >= 2.") return d def _prepare_out_argument(out, dtype, expected_shape): if out is None: return np.empty(expected_shape, dtype=dtype) if out.shape != expected_shape: raise ValueError("Output array has incorrect shape.") if not out.flags.c_contiguous: raise ValueError("Output array must be C-contiguous.") if out.dtype != np.float64: raise ValueError("Output array must be double type.") return out def _pdist_callable(X, *, out, metric, **kwargs): n = X.shape[0] out_size = (n * (n - 1)) // 2 dm = _prepare_out_argument(out, np.float64, (out_size,)) k = 0 for i in range(X.shape[0] - 1): for j in range(i + 1, X.shape[0]): dm[k] = metric(X[i], X[j], **kwargs) k += 1 return dm def _cdist_callable(XA, XB, *, out, metric, **kwargs): mA = XA.shape[0] mB = XB.shape[0] dm = _prepare_out_argument(out, np.float64, (mA, mB)) for i in range(mA): for j in range(mB): dm[i, j] = metric(XA[i], XB[j], **kwargs) return dm def cdist(XA, XB, metric='euclidean', *, out=None, **kwargs): """ Compute distance between each pair of the two collections of inputs. See Notes for common calling conventions. Parameters ---------- XA : array_like An :math:`m_A` by :math:`n` array of :math:`m_A` original observations in an :math:`n`-dimensional space. Inputs are converted to float type. XB : array_like An :math:`m_B` by :math:`n` array of :math:`m_B` original observations in an :math:`n`-dimensional space. Inputs are converted to float type. metric : str or callable, optional The distance metric to use. If a string, the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'jensenshannon', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalsneath', 'sqeuclidean', 'yule'. **kwargs : dict, optional Extra arguments to `metric`: refer to each metric documentation for a list of all possible arguments. Some possible arguments: p : scalar The p-norm to apply for Minkowski, weighted and unweighted. Default: 2. w : array_like The weight vector for metrics that support weights (e.g., Minkowski). V : array_like The variance vector for standardized Euclidean. Default: var(vstack([XA, XB]), axis=0, ddof=1) VI : array_like The inverse of the covariance matrix for Mahalanobis. Default: inv(cov(vstack([XA, XB].T))).T out : ndarray The output array If not None, the distance matrix Y is stored in this array. Returns ------- Y : ndarray A :math:`m_A` by :math:`m_B` distance matrix is returned. For each :math:`i` and :math:`j`, the metric ``dist(u=XA[i], v=XB[j])`` is computed and stored in the :math:`ij` th entry. Raises ------ ValueError An exception is thrown if `XA` and `XB` do not have the same number of columns. Notes ----- The following are common calling conventions: 1. ``Y = cdist(XA, XB, 'euclidean')`` Computes the distance between :math:`m` points using Euclidean distance (2-norm) as the distance metric between the points. The points are arranged as :math:`m` :math:`n`-dimensional row vectors in the matrix X. 2. ``Y = cdist(XA, XB, 'minkowski', p=2.)`` Computes the distances using the Minkowski distance :math:`\\|u-v\\|_p` (:math:`p`-norm) where :math:`p > 0` (note that this is only a quasi-metric if :math:`0 < p < 1`). 3. ``Y = cdist(XA, XB, 'cityblock')`` Computes the city block or Manhattan distance between the points. 4. ``Y = cdist(XA, XB, 'seuclidean', V=None)`` Computes the standardized Euclidean distance. The standardized Euclidean distance between two n-vectors ``u`` and ``v`` is .. math:: \\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}. V is the variance vector; V[i] is the variance computed over all the i'th components of the points. If not passed, it is automatically computed. 5. ``Y = cdist(XA, XB, 'sqeuclidean')`` Computes the squared Euclidean distance :math:`\\|u-v\\|_2^2` between the vectors. 6. ``Y = cdist(XA, XB, 'cosine')`` Computes the cosine distance between vectors u and v, .. math:: 1 - \\frac{u \\cdot v} {{\\|u\\|}_2 {\\|v\\|}_2} where :math:`\\|*\\|_2` is the 2-norm of its argument ``*``, and :math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`. 7. ``Y = cdist(XA, XB, 'correlation')`` Computes the correlation distance between vectors u and v. This is .. math:: 1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})} {{\\|(u - \\bar{u})\\|}_2 {\\|(v - \\bar{v})\\|}_2} where :math:`\\bar{v}` is the mean of the elements of vector v, and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`. 8. ``Y = cdist(XA, XB, 'hamming')`` Computes the normalized Hamming distance, or the proportion of those vector elements between two n-vectors ``u`` and ``v`` which disagree. To save memory, the matrix ``X`` can be of type boolean. 9. ``Y = cdist(XA, XB, 'jaccard')`` Computes the Jaccard distance between the points. Given two vectors, ``u`` and ``v``, the Jaccard distance is the proportion of those elements ``u[i]`` and ``v[i]`` that disagree where at least one of them is non-zero. 10. ``Y = cdist(XA, XB, 'jensenshannon')`` Computes the Jensen-Shannon distance between two probability arrays. Given two probability vectors, :math:`p` and :math:`q`, the Jensen-Shannon distance is .. math:: \\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}} where :math:`m` is the pointwise mean of :math:`p` and :math:`q` and :math:`D` is the Kullback-Leibler divergence. 11. ``Y = cdist(XA, XB, 'chebyshev')`` Computes the Chebyshev distance between the points. The Chebyshev distance between two n-vectors ``u`` and ``v`` is the maximum norm-1 distance between their respective elements. More precisely, the distance is given by .. math:: d(u,v) = \\max_i {|u_i-v_i|}. 12. ``Y = cdist(XA, XB, 'canberra')`` Computes the Canberra distance between the points. The Canberra distance between two points ``u`` and ``v`` is .. math:: d(u,v) = \\sum_i \\frac{|u_i-v_i|} {|u_i|+|v_i|}. 13. ``Y = cdist(XA, XB, 'braycurtis')`` Computes the Bray-Curtis distance between the points. The Bray-Curtis distance between two points ``u`` and ``v`` is .. math:: d(u,v) = \\frac{\\sum_i (|u_i-v_i|)} {\\sum_i (|u_i+v_i|)} 14. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)`` Computes the Mahalanobis distance between the points. The Mahalanobis distance between two points ``u`` and ``v`` is :math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI`` variable) is the inverse covariance. If ``VI`` is not None, ``VI`` will be used as the inverse covariance matrix. 15. ``Y = cdist(XA, XB, 'yule')`` Computes the Yule distance between the boolean vectors. (see `yule` function documentation) 16. ``Y = cdist(XA, XB, 'matching')`` Synonym for 'hamming'. 17. ``Y = cdist(XA, XB, 'dice')`` Computes the Dice distance between the boolean vectors. (see `dice` function documentation). 18. ``Y = cdist(XA, XB, 'rogerstanimoto')`` Computes the Rogers-Tanimoto distance between the boolean vectors. (see `rogerstanimoto` function documentation) 19. ``Y = cdist(XA, XB, 'russellrao')`` Computes the Russell-Rao distance between the boolean vectors. (see `russellrao` function documentation) 20. ``Y = cdist(XA, XB, 'sokalsneath')`` Computes the Sokal-Sneath distance between the vectors. (see `sokalsneath` function documentation) 21. ``Y = cdist(XA, XB, f)`` Computes the distance between all pairs of vectors in X using the user supplied 2-arity function f. For example, Euclidean distance between the vectors could be computed as follows:: dm = cdist(XA, XB, lambda u, v: np.sqrt(((u-v)**2).sum())) Note that you should avoid passing a reference to one of the distance functions defined in this library. For example,:: dm = cdist(XA, XB, sokalsneath) would calculate the pair-wise distances between the vectors in X using the Python function `sokalsneath`. This would result in sokalsneath being called :math:`{n \\choose 2}` times, which is inefficient. Instead, the optimized C version is more efficient, and we call it using the following syntax:: dm = cdist(XA, XB, 'sokalsneath') Examples -------- Find the Euclidean distances between four 2-D coordinates: >>> from scipy.spatial import distance >>> import numpy as np >>> coords = [(35.0456, -85.2672), ... (35.1174, -89.9711), ... (35.9728, -83.9422), ... (36.1667, -86.7833)] >>> distance.cdist(coords, coords, 'euclidean') array([[ 0. , 4.7044, 1.6172, 1.8856], [ 4.7044, 0. , 6.0893, 3.3561], [ 1.6172, 6.0893, 0. , 2.8477], [ 1.8856, 3.3561, 2.8477, 0. ]]) Find the Manhattan distance from a 3-D point to the corners of the unit cube: >>> a = np.array([[0, 0, 0], ... [0, 0, 1], ... [0, 1, 0], ... [0, 1, 1], ... [1, 0, 0], ... [1, 0, 1], ... [1, 1, 0], ... [1, 1, 1]]) >>> b = np.array([[ 0.1, 0.2, 0.4]]) >>> distance.cdist(a, b, 'cityblock') array([[ 0.7], [ 0.9], [ 1.3], [ 1.5], [ 1.5], [ 1.7], [ 2.1], [ 2.3]]) """ # You can also call this as: # Y = cdist(XA, XB, 'test_abc') # where 'abc' is the metric being tested. This computes the distance # between all pairs of vectors in XA and XB using the distance metric 'abc' # but with a more succinct, verifiable, but less efficient implementation. XA = np.asarray(XA) XB = np.asarray(XB) s = XA.shape sB = XB.shape if len(s) != 2: raise ValueError('XA must be a 2-dimensional array.') if len(sB) != 2: raise ValueError('XB must be a 2-dimensional array.') if s[1] != sB[1]: raise ValueError('XA and XB must have the same number of columns ' '(i.e. feature dimension.)') mA = s[0] mB = sB[0] n = s[1] if callable(metric): mstr = getattr(metric, '__name__', 'Unknown') metric_info = _METRIC_ALIAS.get(mstr, None) if metric_info is not None: XA, XB, typ, kwargs = _validate_cdist_input( XA, XB, mA, mB, n, metric_info, **kwargs) return _cdist_callable(XA, XB, metric=metric, out=out, **kwargs) elif isinstance(metric, str): mstr = metric.lower() metric_info = _METRIC_ALIAS.get(mstr, None) if metric_info is not None: cdist_fn = metric_info.cdist_func return cdist_fn(XA, XB, out=out, **kwargs) elif mstr.startswith("test_"): metric_info = _TEST_METRICS.get(mstr, None) if metric_info is None: raise ValueError(f'Unknown "Test" Distance Metric: {mstr[5:]}') XA, XB, typ, kwargs = _validate_cdist_input( XA, XB, mA, mB, n, metric_info, **kwargs) return _cdist_callable( XA, XB, metric=metric_info.dist_func, out=out, **kwargs) else: raise ValueError(f'Unknown Distance Metric: {mstr}') else: raise TypeError('2nd argument metric must be a string identifier ' 'or a function.')
MetricInfo
python
encode__django-rest-framework
rest_framework/schemas/views.py
{ "start": 343, "end": 1836 }
class ____(APIView): _ignore_model_permissions = True schema = None # exclude from schema renderer_classes = None schema_generator = None public = False def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.renderer_classes is None: if coreapi.is_enabled(): self.renderer_classes = [ renderers.CoreAPIOpenAPIRenderer, renderers.CoreJSONRenderer ] else: self.renderer_classes = [ renderers.OpenAPIRenderer, renderers.JSONOpenAPIRenderer, ] if renderers.BrowsableAPIRenderer in api_settings.DEFAULT_RENDERER_CLASSES: self.renderer_classes += [renderers.BrowsableAPIRenderer] def get(self, request, *args, **kwargs): schema = self.schema_generator.get_schema(request, self.public) if schema is None: raise exceptions.PermissionDenied() return Response(schema) def handle_exception(self, exc): # Schema renderers do not render exceptions, so re-perform content # negotiation with default renderers. self.renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES neg = self.perform_content_negotiation(self.request, force=True) self.request.accepted_renderer, self.request.accepted_media_type = neg return super().handle_exception(exc)
SchemaView
python
pydata__xarray
xarray/groupers.py
{ "start": 11190, "end": 16154 }
class ____(Grouper): """ Grouper object for binning numeric data. Attributes ---------- bins : int, sequence of scalars, or IntervalIndex The criteria to bin by. * int : Defines the number of equal-width bins in the range of `x`. The range of `x` is extended by .1% on each side to include the minimum and maximum values of `x`. * sequence of scalars : Defines the bin edges allowing for non-uniform width. No extension of the range of `x` is done. * IntervalIndex : Defines the exact bins to be used. Note that IntervalIndex for `bins` must be non-overlapping. right : bool, default True Indicates whether `bins` includes the rightmost edge or not. If ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]`` indicate (1,2], (2,3], (3,4]. This argument is ignored when `bins` is an IntervalIndex. labels : array or False, default None Specifies the labels for the returned bins. Must be the same length as the resulting bins. If False, returns only integer indicators of the bins. This affects the type of the output container (see below). This argument is ignored when `bins` is an IntervalIndex. If True, raises an error. retbins : bool, default False Whether to return the bins or not. Useful when bins is provided as a scalar. precision : int, default 3 The precision at which to store and display the bins labels. include_lowest : bool, default False Whether the first interval should be left-inclusive or not. duplicates : {"raise", "drop"}, default: "raise" If bin edges are not unique, raise ValueError or drop non-uniques. """ bins: Bins # The rest are copied from pandas right: bool = True labels: Any = None precision: int = 3 include_lowest: bool = False duplicates: Literal["raise", "drop"] = "raise" def reset(self) -> Self: return type(self)( bins=self.bins, right=self.right, labels=self.labels, precision=self.precision, include_lowest=self.include_lowest, duplicates=self.duplicates, ) def __post_init__(self) -> None: if array_all(isnull(self.bins)): raise ValueError("All bin edges are NaN.") def _cut(self, data): return pd.cut( np.asarray(data).ravel(), bins=self.bins, right=self.right, labels=self.labels, precision=self.precision, include_lowest=self.include_lowest, duplicates=self.duplicates, retbins=True, ) def _pandas_cut_wrapper(self, data, **kwargs): binned, bins = self._cut(data) if isinstance(self.bins, int): # we are running eagerly, update self.bins with actual edges instead self.bins = bins return binned.codes.reshape(data.shape) def factorize(self, group: T_Group) -> EncodedGroups: if isinstance(group, _DummyGroup): group = DataArray(group.data, dims=group.dims, name=group.name) by_is_chunked = is_chunked_array(group.data) if isinstance(self.bins, int) and by_is_chunked: raise ValueError( f"Bin edges must be provided when grouping by chunked arrays. Received {self.bins=!r} instead" ) codes = apply_ufunc( self._pandas_cut_wrapper, group, dask="parallelized", keep_attrs=True, output_dtypes=[np.int64], ) if not by_is_chunked and array_all(codes == -1): raise ValueError( f"None of the data falls within bins with edges {self.bins!r}" ) new_dim_name = f"{group.name}_bins" codes.name = new_dim_name # This seems silly, but it lets us have Pandas handle the complexity # of `labels`, `precision`, and `include_lowest`, even when group is a chunked array # Pandas ignores labels when IntervalIndex is passed if self.labels is None or not isinstance(self.bins, pd.IntervalIndex): dummy, _ = self._cut(np.array([0]).astype(group.dtype)) full_index = dummy.categories else: full_index = pd.Index(self.labels) if not by_is_chunked: uniques = np.sort(pd.unique(codes.data.ravel())) unique_values = full_index[uniques[uniques != -1]] else: unique_values = full_index unique_coord = Variable( dims=new_dim_name, data=unique_values, attrs=group.attrs ) return EncodedGroups( codes=codes, full_index=full_index, unique_coord=unique_coord, coords=coordinates_from_variable(unique_coord), ) @dataclass(repr=False)
BinGrouper
python
apache__airflow
providers/arangodb/tests/unit/arangodb/operators/test_arangodb.py
{ "start": 1429, "end": 2229 }
class ____: @mock.patch("airflow.providers.arangodb.operators.arangodb.ArangoDBHook") def test_insert_documents(self, mock_hook): documents_to_insert = [{"_key": "lola", "first": "Lola", "last": "Martin"}] op = ArangoDBCollectionOperator( task_id="insert_task", collection_name="students", documents_to_insert=documents_to_insert, documents_to_update=None, documents_to_replace=None, documents_to_delete=None, delete_collection=False, ) op.execute(mock.MagicMock()) mock_hook.assert_called_once_with(arangodb_conn_id="arangodb_default") mock_hook.return_value.insert_documents.assert_called_once_with("students", documents_to_insert)
TestArangoDBCollectionOperator
python
celery__celery
celery/backends/database/__init__.py
{ "start": 1506, "end": 8133 }
class ____(BaseBackend): """The database result backend.""" # ResultSet.iterate should sleep this much between each pool, # to not bombard the database with queries. subpolling_interval = 0.5 task_cls = Task taskset_cls = TaskSet def __init__(self, dburi=None, engine_options=None, url=None, **kwargs): # The `url` argument was added later and is used by # the app to set backend by url (celery.app.backends.by_url) super().__init__(expires_type=maybe_timedelta, url=url, **kwargs) conf = self.app.conf if self.extended_result: self.task_cls = TaskExtended self.url = url or dburi or conf.database_url self.engine_options = dict( engine_options or {}, **conf.database_engine_options or {}) self.short_lived_sessions = kwargs.get( 'short_lived_sessions', conf.database_short_lived_sessions) schemas = conf.database_table_schemas or {} tablenames = conf.database_table_names or {} self.task_cls.configure( schema=schemas.get('task'), name=tablenames.get('task')) self.taskset_cls.configure( schema=schemas.get('group'), name=tablenames.get('group')) if not self.url: raise ImproperlyConfigured( 'Missing connection string! Do you have the' ' database_url setting set to a real value?') self.session_manager = SessionManager() create_tables_at_setup = conf.database_create_tables_at_setup if create_tables_at_setup is True: self._create_tables() @property def extended_result(self): return self.app.conf.find_value_for_key('extended', 'result') def _create_tables(self): """Create the task and taskset tables.""" self.ResultSession() def ResultSession(self, session_manager=None): if session_manager is None: session_manager = self.session_manager return session_manager.session_factory( dburi=self.url, short_lived_sessions=self.short_lived_sessions, **self.engine_options) @retry def _store_result(self, task_id, result, state, traceback=None, request=None, **kwargs): """Store return value and state of an executed task.""" session = self.ResultSession() with session_cleanup(session): task = list(session.query(self.task_cls).filter(self.task_cls.task_id == task_id)) task = task and task[0] if not task: task = self.task_cls(task_id) task.task_id = task_id session.add(task) session.flush() self._update_result(task, result, state, traceback=traceback, request=request) session.commit() def _update_result(self, task, result, state, traceback=None, request=None): meta = self._get_result_meta(result=result, state=state, traceback=traceback, request=request, format_date=False, encode=True) # Exclude the primary key id and task_id columns # as we should not set it None columns = [column.name for column in self.task_cls.__table__.columns if column.name not in {'id', 'task_id'}] # Iterate through the columns name of the table # to set the value from meta. # If the value is not present in meta, set None for column in columns: value = meta.get(column) setattr(task, column, value) @retry def _get_task_meta_for(self, task_id): """Get task meta-data for a task by id.""" session = self.ResultSession() with session_cleanup(session): task = list(session.query(self.task_cls).filter(self.task_cls.task_id == task_id)) task = task and task[0] if not task: task = self.task_cls(task_id) task.status = states.PENDING task.result = None data = task.to_dict() if data.get('args', None) is not None: data['args'] = self.decode(data['args']) if data.get('kwargs', None) is not None: data['kwargs'] = self.decode(data['kwargs']) return self.meta_from_decoded(data) @retry def _save_group(self, group_id, result): """Store the result of an executed group.""" session = self.ResultSession() with session_cleanup(session): group = self.taskset_cls(group_id, result) session.add(group) session.flush() session.commit() return result @retry def _restore_group(self, group_id): """Get meta-data for group by id.""" session = self.ResultSession() with session_cleanup(session): group = session.query(self.taskset_cls).filter( self.taskset_cls.taskset_id == group_id).first() if group: return group.to_dict() @retry def _delete_group(self, group_id): """Delete meta-data for group by id.""" session = self.ResultSession() with session_cleanup(session): session.query(self.taskset_cls).filter( self.taskset_cls.taskset_id == group_id).delete() session.flush() session.commit() @retry def _forget(self, task_id): """Forget about result.""" session = self.ResultSession() with session_cleanup(session): session.query(self.task_cls).filter(self.task_cls.task_id == task_id).delete() session.commit() def cleanup(self): """Delete expired meta-data.""" session = self.ResultSession() expires = self.expires now = self.app.now() with session_cleanup(session): session.query(self.task_cls).filter( self.task_cls.date_done < (now - expires)).delete() session.query(self.taskset_cls).filter( self.taskset_cls.date_done < (now - expires)).delete() session.commit() def __reduce__(self, args=(), kwargs=None): kwargs = {} if not kwargs else kwargs kwargs.update( {'dburi': self.url, 'expires': self.expires, 'engine_options': self.engine_options}) return super().__reduce__(args, kwargs)
DatabaseBackend
python
astropy__astropy
astropy/modeling/projections.py
{ "start": 14120, "end": 15089 }
class ____(Pix2SkyProjection, Zenithal): r""" Slant orthographic projection - pixel to sky. Corresponds to the ``SIN`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. The following transformation applies when :math:`\xi` and :math:`\eta` are both zero. .. math:: \theta = \cos^{-1}\left(\frac{\pi}{180^{\circ}}R_\theta\right) The parameters :math:`\xi` and :math:`\eta` are defined from the reference point :math:`(\phi_c, \theta_c)` as: .. math:: \xi &= \cot \theta_c \sin \phi_c \\ \eta &= - \cot \theta_c \cos \phi_c Parameters ---------- xi : float Obliqueness parameter, ξ. Default is 0.0. eta : float Obliqueness parameter, η. Default is 0.0. """ xi = _ParameterDS(default=0.0, description="Obliqueness parameter") eta = _ParameterDS(default=0.0, description="Obliqueness parameter")
Pix2Sky_SlantOrthographic
python
nryoung__algorithms
algorithms/data_structures/stack.py
{ "start": 362, "end": 1059 }
class ____: def __init__(self): self.stack_list = [] def add(self, value): """ Add element at last Time Complexity: O(1) """ self.stack_list.append(value) def remove(self): """ Remove element from last return value Time Complexity: O(1) """ return self.stack_list.pop() def is_empty(self): """ 1 value returned on empty 0 value returned on not empty Time Complexity: O(1) """ return not self.size() def size(self): """ Return size of stack Time Complexity: O(1) """ return len(self.stack_list)
Stack
python
doocs__leetcode
solution/2400-2499/2487.Remove Nodes From Linked List/Solution2.py
{ "start": 151, "end": 517 }
class ____: def removeNodes(self, head: Optional[ListNode]) -> Optional[ListNode]: dummy = ListNode(inf, head) cur = head stk = [dummy] while cur: while stk[-1].val < cur.val: stk.pop() stk[-1].next = cur stk.append(cur) cur = cur.next return dummy.next
Solution
python
realpython__materials
python-oop/dogbreeds.py
{ "start": 0, "end": 282 }
class ____: species = "Canis familiaris" def __init__(self, name, age): self.name = name self.age = age def __str__(self): return f"{self.name} is {self.age} years old" def speak(self, sound): return f"{self.name} barks: {sound}"
Dog
python
spack__spack
lib/spack/spack/util/environment.py
{ "start": 13151, "end": 13723 }
class ____(NamePathModifier): def execute(self, env: MutableMapping[str, str]): tty.debug(f"RemoveLastPath: {self.name}-{self.value}", level=3) environment_value = env.get(self.name, "") directories = environment_value.split(self.separator)[::-1] directories = [path_to_os_path(os.path.normpath(x)).pop() for x in directories] val = path_to_os_path(os.path.normpath(self.value)).pop() if val in directories: directories.remove(val) env[self.name] = self.separator.join(directories[::-1])
RemoveLastPath
python
pytorch__pytorch
torch/masked/maskedtensor/_ops_refs.py
{ "start": 2739, "end": 3539 }
class ____(torch.autograd.Function): @staticmethod # pyrefly: ignore [bad-override] def forward(ctx, input): if not is_masked_tensor(input): raise ValueError("MaskedToSparse forward: input must be a MaskedTensor.") # Following the convention from sparse tensors that to_sparse always means that we convert to sparse_coo if input.layout == torch.sparse_coo: return input data = input.get_data() mask = input.get_mask() sparse_mask = mask.to_sparse_coo().coalesce() sparse_data = data.sparse_mask(sparse_mask) return MaskedTensor(sparse_data, sparse_mask) @staticmethod # pyrefly: ignore [bad-override] def backward(ctx, grad_output): return grad_output.to_dense()
_MaskedToSparse
python
pytorch__pytorch
test/distributed/elastic/timer/api_test.py
{ "start": 602, "end": 1564 }
class ____(TimerServer): """ Mock implementation of TimerServer for testing purposes. This mock has the following behavior: 1. reaping worker 1 throws 2. reaping worker 2 succeeds 3. reaping worker 3 fails (caught exception) For each workers 1 - 3 returns 2 expired timers """ def __init__(self, request_queue, max_interval): super().__init__(request_queue, max_interval) def register_timers(self, timer_requests): pass def clear_timers(self, worker_ids): pass def get_expired_timers(self, deadline): return { i: [TimerRequest(i, f"test_{i}_0", 0), TimerRequest(i, f"test_{i}_1", 0)] for i in range(1, 4) } def _reap_worker(self, worker_id): if worker_id == 1: raise RuntimeError("test error") elif worker_id == 2: return True elif worker_id == 3: return False
MockTimerServer
python
mlflow__mlflow
tests/store/artifact/test_azure_blob_artifact_repo.py
{ "start": 783, "end": 20857 }
class ____: def __init__(self, items, next_marker=None): self.items = items self.next_marker = next_marker def __iter__(self): return iter(self.items) @pytest.fixture def mock_client(): # Make sure that our environment variable aren't set to actually access Azure old_access_key = os.environ.get("AZURE_STORAGE_ACCESS_KEY") if old_access_key is not None: del os.environ["AZURE_STORAGE_ACCESS_KEY"] old_conn_string = os.environ.get("AZURE_STORAGE_CONNECTION_STRING") if old_conn_string is not None: del os.environ["AZURE_STORAGE_CONNECTION_STRING"] yield mock.MagicMock(autospec=BlobServiceClient) if old_access_key is not None: os.environ["AZURE_STORAGE_ACCESS_KEY"] = old_access_key if old_conn_string is not None: os.environ["AZURE_STORAGE_CONNECTION_STRING"] = old_conn_string def test_artifact_uri_factory(mock_client, monkeypatch): # We pass in the mock_client here to clear Azure environment variables, but we don't use it; # We do need to set up a fake access key for the code to run though monkeypatch.setenv("AZURE_STORAGE", "") repo = get_artifact_repository(TEST_URI) assert isinstance(repo, AzureBlobArtifactRepository) def test_default_az_cred_if_no_env_vars(mock_client): # We pass in the mock_client here to clear Azure environment variables, but we don't use it with mock.patch("azure.identity.DefaultAzureCredential") as mock_default_azure_credential: AzureBlobArtifactRepository(TEST_URI) assert mock_default_azure_credential.call_count == 1 def test_parse_global_wasbs_uri(): parse = AzureBlobArtifactRepository.parse_wasbs_uri global_api_suffix = "blob.core.windows.net" global_wasb_with_short_path = "wasbs://cont@acct.blob.core.windows.net/path" assert parse(global_wasb_with_short_path) == ("cont", "acct", "path", global_api_suffix) global_wasb_without_path = "wasbs://cont@acct.blob.core.windows.net" assert parse(global_wasb_without_path) == ("cont", "acct", "", global_api_suffix) global_wasb_without_path2 = "wasbs://cont@acct.blob.core.windows.net/" assert parse(global_wasb_without_path2) == ("cont", "acct", "", global_api_suffix) global_wasb_with_multi_path = "wasbs://cont@acct.blob.core.windows.net/a/b" assert parse(global_wasb_with_multi_path) == ("cont", "acct", "a/b", global_api_suffix) with pytest.raises(Exception, match="WASBS URI must be of the form"): parse("wasbs://cont@acct.blob.core.evil.net/path") with pytest.raises(Exception, match="WASBS URI must be of the form"): parse("wasbs://cont@acct/path") with pytest.raises(Exception, match="WASBS URI must be of the form"): parse("wasbs://acct.blob.core.windows.net/path") with pytest.raises(Exception, match="WASBS URI must be of the form"): parse("wasbs://@acct.blob.core.windows.net/path") with pytest.raises(Exception, match="WASBS URI must be of the form"): parse("wasbs://cont@acctxblob.core.windows.net/path") with pytest.raises(Exception, match="Not a WASBS URI"): parse("wasb://cont@acct.blob.core.windows.net/path") def test_parse_cn_wasbs_uri(): parse = AzureBlobArtifactRepository.parse_wasbs_uri cn_api_suffix = "blob.core.chinacloudapi.cn" cn_wasb_with_short_path = "wasbs://cont@acct.blob.core.chinacloudapi.cn/path" assert parse(cn_wasb_with_short_path) == ("cont", "acct", "path", cn_api_suffix) cn_wasb_without_path = "wasbs://cont@acct.blob.core.chinacloudapi.cn" assert parse(cn_wasb_without_path) == ("cont", "acct", "", cn_api_suffix) cn_wasb_without_path2 = "wasbs://cont@acct.blob.core.chinacloudapi.cn/" assert parse(cn_wasb_without_path2) == ("cont", "acct", "", cn_api_suffix) cn_wasb_with_multi_path = "wasbs://cont@acct.blob.core.chinacloudapi.cn/a/b" assert parse(cn_wasb_with_multi_path) == ("cont", "acct", "a/b", cn_api_suffix) with pytest.raises(Exception, match="WASBS URI must be of the form"): parse("wasbs://cont@acct.blob.core.evil.cn/path") with pytest.raises(Exception, match="WASBS URI must be of the form"): parse("wasbs://cont@acct/path") with pytest.raises(Exception, match="WASBS URI must be of the form"): parse("wasbs://acct.blob.core.chinacloudapi.cn/path") with pytest.raises(Exception, match="WASBS URI must be of the form"): parse("wasbs://@acct.blob.core.chinacloudapi.cn/path") with pytest.raises(Exception, match="WASBS URI must be of the form"): parse("wasbs://cont@acctxblob.core.chinacloudapi.cn/path") with pytest.raises(Exception, match="Not a WASBS URI"): parse("wasb://cont@acct.blob.core.chinacloudapi.cn/path") def test_list_artifacts_empty(mock_client): repo = AzureBlobArtifactRepository(TEST_URI, client=mock_client) mock_client.get_container_client().walk_blobs.return_value = MockBlobList([]) assert repo.list_artifacts() == [] def test_list_artifacts_single_file(mock_client): repo = AzureBlobArtifactRepository(TEST_URI, client=mock_client) # Evaluate single file blob_props = BlobProperties() blob_props.name = posixpath.join(TEST_ROOT_PATH, "file") mock_client.get_container_client().walk_blobs.return_value = MockBlobList([blob_props]) assert repo.list_artifacts("file") == [] @pytest.mark.parametrize("root_path", ["some/path", "some/path/"]) def test_list_artifacts(mock_client, root_path): repo = AzureBlobArtifactRepository( posixpath.join(TEST_BLOB_CONTAINER_ROOT, root_path), client=mock_client ) # Create some files to return dir_prefix = BlobPrefix() dir_prefix.name = posixpath.join(TEST_ROOT_PATH, "dir") blob_props = BlobProperties() blob_props.size = 42 blob_props.name = posixpath.join(TEST_ROOT_PATH, "file") mock_client.get_container_client().walk_blobs.return_value = MockBlobList( [dir_prefix, blob_props] ) artifacts = repo.list_artifacts() mock_client.get_container_client().walk_blobs.assert_called_with(name_starts_with="some/path/") assert artifacts[0].path == "dir" assert artifacts[0].is_dir is True assert artifacts[0].file_size is None assert artifacts[1].path == "file" assert artifacts[1].is_dir is False assert artifacts[1].file_size == 42 def test_log_artifact(mock_client, tmp_path): repo = AzureBlobArtifactRepository(TEST_URI, client=mock_client) d = tmp_path.joinpath("data") d.mkdir() f = d.joinpath("test.txt") f.write_text("hello world!") fpath = posixpath.join(str(d), "test.txt") repo.log_artifact(fpath) mock_client.get_container_client.assert_called_with("container") arg1, arg2 = mock_client.get_container_client().upload_blob.call_args[0] assert arg1 == posixpath.join(TEST_ROOT_PATH, "test.txt") # arg2 should be a filebuffer assert arg2.name == fpath def test_log_artifacts(mock_client, tmp_path): repo = AzureBlobArtifactRepository(TEST_URI, client=mock_client) parentd = tmp_path.joinpath("data") parentd.mkdir() subd = parentd.joinpath("subdir") subd.mkdir() a_txt = parentd.joinpath("a.txt") a_txt.write_text("A") b_txt = subd.joinpath("b.txt") b_txt.write_text("B") c_txt = subd.joinpath("c.txt") c_txt.write_text("C") repo.log_artifacts(parentd) mock_client.get_container_client.assert_called_with("container") call_list = mock_client.get_container_client().upload_blob.call_args_list # Ensure that the order of the calls do not matter for call in call_list: arg1, arg2 = call[0] assert arg1 in [ posixpath.join(TEST_ROOT_PATH, x) for x in ["a.txt", "subdir/b.txt", "subdir/c.txt"] ] # arg2 should be a filebuffer if arg1.endswith("/a.txt"): assert arg2.name == str(a_txt) elif arg1.endswith("/b.txt"): assert arg2.name == str(b_txt) elif arg1.endswith("/c.txt"): assert arg2.name == str(c_txt) else: # This should be unreachable assert False def test_download_file_artifact(mock_client, tmp_path): repo = AzureBlobArtifactRepository(TEST_URI, client=mock_client) mock_client.get_container_client().walk_blobs.return_value = MockBlobList([]) def create_file(buffer): local_path = os.path.basename(buffer.name) f = tmp_path.joinpath(local_path) f.write_text("hello world!") mock_client.get_container_client().download_blob().readinto.side_effect = create_file repo.download_artifacts("test.txt") assert os.path.exists(os.path.join(tmp_path, "test.txt")) mock_client.get_container_client().download_blob.assert_called_with( posixpath.join(TEST_ROOT_PATH, "test.txt") ) def test_download_directory_artifact_succeeds_when_artifact_root_is_not_blob_container_root( mock_client, tmp_path ): assert TEST_URI is not TEST_BLOB_CONTAINER_ROOT repo = AzureBlobArtifactRepository(TEST_URI, client=mock_client) file_path_1 = "file_1" file_path_2 = "file_2" blob_props_1 = BlobProperties() blob_props_1.size = 42 blob_props_1.name = posixpath.join(TEST_ROOT_PATH, file_path_1) blob_props_2 = BlobProperties() blob_props_2.size = 42 blob_props_2.name = posixpath.join(TEST_ROOT_PATH, file_path_2) def get_mock_listing(*args, **kwargs): """ Produces a mock listing that only contains content if the specified prefix is the artifact root. This allows us to mock `list_artifacts` during the `_download_artifacts_into` subroutine without recursively listing the same artifacts at every level of the directory traversal. """ if posixpath.abspath(kwargs["name_starts_with"]) == posixpath.abspath(TEST_ROOT_PATH): return MockBlobList([blob_props_1, blob_props_2]) else: return MockBlobList([]) def create_file(buffer): fname = os.path.basename(buffer.name) f = tmp_path.joinpath(fname) f.write_text("hello world!") mock_client.get_container_client().walk_blobs.side_effect = get_mock_listing mock_client.get_container_client().download_blob().readinto.side_effect = create_file # Ensure that the root directory can be downloaded successfully repo.download_artifacts("") # Ensure that the `mkfile` side effect copied all of the download artifacts into `tmpdir` dir_contents = os.listdir(tmp_path) assert file_path_1 in dir_contents assert file_path_2 in dir_contents def test_download_directory_artifact_succeeds_when_artifact_root_is_blob_container_root( mock_client, tmp_path ): repo = AzureBlobArtifactRepository(TEST_BLOB_CONTAINER_ROOT, client=mock_client) subdir_path = "my_directory" dir_prefix = BlobPrefix() dir_prefix.name = subdir_path file_path_1 = "file_1" file_path_2 = "file_2" blob_props_1 = BlobProperties() blob_props_1.size = 42 blob_props_1.name = posixpath.join(subdir_path, file_path_1) blob_props_2 = BlobProperties() blob_props_2.size = 42 blob_props_2.name = posixpath.join(subdir_path, file_path_2) def get_mock_listing(*args, **kwargs): """ Produces a mock listing that only contains content if the specified prefix is the artifact root or a relevant subdirectory. This allows us to mock `list_artifacts` during the `_download_artifacts_into` subroutine without recursively listing the same artifacts at every level of the directory traversal. """ if posixpath.abspath(kwargs["name_starts_with"]) == "/": return MockBlobList([dir_prefix]) if posixpath.abspath(kwargs["name_starts_with"]) == posixpath.abspath(subdir_path): return MockBlobList([blob_props_1, blob_props_2]) else: return MockBlobList([]) def create_file(buffer): fname = os.path.basename(buffer.name) f = tmp_path.joinpath(fname) f.write_text("hello world!") mock_client.get_container_client().walk_blobs.side_effect = get_mock_listing mock_client.get_container_client().download_blob().readinto.side_effect = create_file # Ensure that the root directory can be downloaded successfully repo.download_artifacts("") # Ensure that the `mkfile` side effect copied all of the download artifacts into `tmpdir` dir_contents = os.listdir(tmp_path) assert file_path_1 in dir_contents assert file_path_2 in dir_contents def test_download_artifact_throws_value_error_when_listed_blobs_do_not_contain_artifact_root_prefix( mock_client, ): repo = AzureBlobArtifactRepository(TEST_URI, client=mock_client) # Create a "bad blob" with a name that is not prefixed by the root path of the artifact store bad_blob_props = BlobProperties() bad_blob_props.size = 42 bad_blob_props.name = "file_path" def get_mock_listing(*args, **kwargs): """ Produces a mock listing that only contains content if the specified prefix is the artifact root. This allows us to mock `list_artifacts` during the `_download_artifacts_into` subroutine without recursively listing the same artifacts at every level of the directory traversal. """ if posixpath.abspath(kwargs["name_starts_with"]) == posixpath.abspath(TEST_ROOT_PATH): # Return a blob that is not prefixed by the root path of the artifact store. This # should result in an exception being raised return MockBlobList([bad_blob_props]) else: return MockBlobList([]) mock_client.get_container_client().walk_blobs.side_effect = get_mock_listing with pytest.raises( MlflowException, match="Azure blob does not begin with the specified artifact path" ): repo.download_artifacts("") def test_create_multipart_upload(mock_client): repo = AzureBlobArtifactRepository(TEST_URI, client=mock_client) mock_client.url = "some-url" mock_client.account_name = "some-account" mock_client.credential.account_key = base64.b64encode(b"some-key").decode("utf-8") create = repo.create_multipart_upload("local_file") assert create.upload_id is None assert len(create.credentials) == 1 assert create.credentials[0].url.startswith( "some-url/container/some/path/local_file?comp=block" ) def test_complete_multipart_upload(mock_client, tmp_path): repo = AzureBlobArtifactRepository(TEST_URI, client=mock_client) parts = [ MultipartUploadPart(1, "", "some-url?comp=block&blockid=YQ%3D%3D%3D%3D"), MultipartUploadPart(2, "", "some-url?comp=block&blockid=Yg%3D%3D%3D%3D"), ] repo.complete_multipart_upload("local_file", "", parts) mock_client.get_blob_client.assert_called_with("container", f"{TEST_ROOT_PATH}/local_file") mock_client.get_blob_client().commit_block_list.assert_called_with(["a", "b"]) def test_trace_data(mock_client, tmp_path): repo = AzureBlobArtifactRepository(TEST_URI, client=mock_client) with pytest.raises(MlflowException, match=r"Trace data not found for path="): repo.download_trace_data() trace_data_path = tmp_path.joinpath("traces.json") trace_data_path.write_text("invalid data") with ( mock.patch( "mlflow.store.artifact.artifact_repo.try_read_trace_data", side_effect=lambda x: try_read_trace_data(trace_data_path), ), pytest.raises(MlflowTraceDataCorrupted, match=r"Trace data is corrupted for path="), ): repo.download_trace_data() mock_trace_data = {"spans": [], "request": {"test": 1}, "response": {"test": 2}} trace_data_path.write_text(json.dumps(mock_trace_data)) with mock.patch( "mlflow.store.artifact.artifact_repo.try_read_trace_data", side_effect=lambda x: try_read_trace_data(trace_data_path), ): assert repo.download_trace_data() == mock_trace_data def test_delete_artifacts_single_file(mock_client): repo = AzureBlobArtifactRepository(TEST_URI, client=mock_client) # Mock the list_blobs method to return a single file blob_props = BlobProperties() blob_props.name = posixpath.join(TEST_ROOT_PATH, "file") mock_client.get_container_client().list_blobs.return_value = [blob_props] repo.delete_artifacts("file") mock_client.get_container_client().delete_blob.assert_called_with(blob_props.name) def test_delete_artifacts_directory(mock_client): repo = AzureBlobArtifactRepository(TEST_URI, client=mock_client) # Mock the list_blobs method to return multiple files in a directory blob_props_1 = BlobProperties() blob_props_1.name = posixpath.join(TEST_ROOT_PATH, "dir/file1") blob_props_2 = BlobProperties() blob_props_2.name = posixpath.join(TEST_ROOT_PATH, "dir/file2") mock_client.get_container_client().list_blobs.return_value = [blob_props_1, blob_props_2] repo.delete_artifacts("dir") mock_client.get_container_client().delete_blob.assert_any_call(blob_props_1.name) mock_client.get_container_client().delete_blob.assert_any_call(blob_props_2.name) def test_delete_artifacts_nonexistent_path(mock_client): repo = AzureBlobArtifactRepository(TEST_URI, client=mock_client) # Mock the list_blobs method to return an empty list mock_client.get_container_client().list_blobs.return_value = [] with pytest.raises(MlflowException, match="No such file or directory"): repo.delete_artifacts("nonexistent_path") def test_delete_artifacts_failure(mock_client): repo = AzureBlobArtifactRepository(TEST_URI, client=mock_client) # Mock the list_blobs method to return a single file blob_props = BlobProperties() blob_props.name = posixpath.join(TEST_ROOT_PATH, "file") mock_client.get_container_client().list_blobs.return_value = [blob_props] # Mock the delete_blob method to raise an exception mock_client.get_container_client().delete_blob.side_effect = ResourceNotFoundError( "Deletion failed" ) with pytest.raises(MlflowException, match=f"No such file or directory: '{blob_props.name}'"): repo.delete_artifacts("file") def test_delete_artifacts_folder(mock_client): repo = AzureBlobArtifactRepository(TEST_URI, client=mock_client) # Mock the list_blobs method to return multiple files in a folder blob_props_1 = BlobProperties() blob_props_1.name = posixpath.join(TEST_ROOT_PATH, "folder/file1") blob_props_2 = BlobProperties() blob_props_2.name = posixpath.join(TEST_ROOT_PATH, "folder/file2") mock_client.get_container_client().list_blobs.return_value = [blob_props_1, blob_props_2] repo.delete_artifacts("folder") mock_client.get_container_client().delete_blob.assert_any_call(blob_props_1.name) mock_client.get_container_client().delete_blob.assert_any_call(blob_props_2.name) def test_delete_artifacts_folder_with_nested_folders_and_files(mock_client): repo = AzureBlobArtifactRepository(TEST_URI, client=mock_client) # Mock the list_blobs method to return multiple files in a folder with nested folders and files blob_props_1 = BlobProperties() blob_props_1.name = posixpath.join(TEST_ROOT_PATH, "folder/nested_folder/file1") blob_props_2 = BlobProperties() blob_props_2.name = posixpath.join(TEST_ROOT_PATH, "folder/nested_folder/file2") blob_props_3 = BlobProperties() blob_props_3.name = posixpath.join(TEST_ROOT_PATH, "folder/nested_folder/nested_file") mock_client.get_container_client().list_blobs.return_value = [ blob_props_1, blob_props_2, blob_props_3, ] repo.delete_artifacts("folder") mock_client.get_container_client().delete_blob.assert_any_call(blob_props_1.name) mock_client.get_container_client().delete_blob.assert_any_call(blob_props_2.name) mock_client.get_container_client().delete_blob.assert_any_call(blob_props_3.name)
MockBlobList
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 859731, "end": 861702 }
class ____(sgqlc.types.Type, Node): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ( "cards", "created_at", "database_id", "name", "project", "purpose", "resource_path", "updated_at", "url", ) cards = sgqlc.types.Field( sgqlc.types.non_null(ProjectCardConnection), graphql_name="cards", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ( "archived_states", sgqlc.types.Arg( sgqlc.types.list_of(ProjectCardArchivedState), graphql_name="archivedStates", default=("ARCHIVED", "NOT_ARCHIVED"), ), ), ) ), ) created_at = sgqlc.types.Field( sgqlc.types.non_null(DateTime), graphql_name="createdAt" ) database_id = sgqlc.types.Field(Int, graphql_name="databaseId") name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name") project = sgqlc.types.Field(sgqlc.types.non_null(Project), graphql_name="project") purpose = sgqlc.types.Field(ProjectColumnPurpose, graphql_name="purpose") resource_path = sgqlc.types.Field( sgqlc.types.non_null(URI), graphql_name="resourcePath" ) updated_at = sgqlc.types.Field( sgqlc.types.non_null(DateTime), graphql_name="updatedAt" ) url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
ProjectColumn
python
numpy__numpy
numpy/polynomial/tests/test_legendre.py
{ "start": 11406, "end": 13030 }
class ____: # some random values in [-1, 1) x = np.random.random((3, 5)) * 2 - 1 def test_legvander(self): # check for 1d x x = np.arange(3) v = leg.legvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): coef = [0] * i + [1] assert_almost_equal(v[..., i], leg.legval(x, coef)) # check for 2d x x = np.array([[1, 2], [3, 4], [5, 6]]) v = leg.legvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): coef = [0] * i + [1] assert_almost_equal(v[..., i], leg.legval(x, coef)) def test_legvander2d(self): # also tests polyval2d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3)) van = leg.legvander2d(x1, x2, [1, 2]) tgt = leg.legval2d(x1, x2, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = leg.legvander2d([x1], [x2], [1, 2]) assert_(van.shape == (1, 5, 6)) def test_legvander3d(self): # also tests polyval3d for non-square coefficient array x1, x2, x3 = self.x c = np.random.random((2, 3, 4)) van = leg.legvander3d(x1, x2, x3, [1, 2, 3]) tgt = leg.legval3d(x1, x2, x3, c) res = np.dot(van, c.flat) assert_almost_equal(res, tgt) # check shape van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3]) assert_(van.shape == (1, 5, 24)) def test_legvander_negdeg(self): assert_raises(ValueError, leg.legvander, (1, 2, 3), -1)
TestVander
python
skorch-dev__skorch
skorch/classifier.py
{ "start": 2063, "end": 9695 }
class ____(ClassifierMixin, NeuralNet): __doc__ = get_neural_net_clf_doc(NeuralNet.__doc__) def __init__( self, module, *args, criterion=torch.nn.NLLLoss, train_split=ValidSplit(5, stratified=True), classes=None, **kwargs ): super(NeuralNetClassifier, self).__init__( module, *args, criterion=criterion, train_split=train_split, **kwargs ) self.classes = classes @property def _default_callbacks(self): return [ ('epoch_timer', EpochTimer()), ('train_loss', PassthroughScoring( name='train_loss', on_train=True, )), ('valid_loss', PassthroughScoring( name='valid_loss', )), ('valid_acc', EpochScoring( 'accuracy', name='valid_acc', lower_is_better=False, )), ('print_log', PrintLog()), ] @property def classes_(self): if self.classes is not None: if not len(self.classes): raise AttributeError("{} has no attribute 'classes_'".format( self.__class__.__name__)) return np.asarray(self.classes) try: return self.classes_inferred_ except AttributeError as exc: # It's not easily possible to track exactly what circumstances led # to this, so try to make an educated guess and provide a possible # solution. msg = ( f"{self.__class__.__name__} could not infer the classes from y; " "this error probably occurred because the net was trained without y " "and some function tried to access the '.classes_' attribute; " "a possible solution is to provide the 'classes' argument when " f"initializing {self.__class__.__name__}" ) raise AttributeError(msg) from exc # pylint: disable=signature-differs def check_data(self, X, y): if ( (y is None) and (not is_dataset(X)) and (self.iterator_train is DataLoader) ): msg = ("No y-values are given (y=None). You must either supply a " "Dataset as X or implement your own DataLoader for " "training (and your validation) and supply it using the " "``iterator_train`` and ``iterator_valid`` parameters " "respectively.") raise ValueError(msg) if (y is None) and is_dataset(X): try: _, y_ds = data_from_dataset(X) self.classes_inferred_ = np.unique(to_numpy(y_ds)) except AttributeError: # If this fails, we might still be good to go, so don't raise pass if y is not None: # pylint: disable=attribute-defined-outside-init self.classes_inferred_ = np.unique(to_numpy(y)) # pylint: disable=arguments-differ def get_loss(self, y_pred, y_true, *args, **kwargs): # we can assume that the attribute criterion_ exists; if users define # custom criteria, they have to override get_loss anyway if isinstance(self.criterion_, torch.nn.NLLLoss): eps = torch.finfo(y_pred.dtype).eps y_pred = torch.log(y_pred + eps) return super().get_loss(y_pred, y_true, *args, **kwargs) # pylint: disable=signature-differs def fit(self, X, y, **fit_params): """See ``NeuralNet.fit``. In contrast to ``NeuralNet.fit``, ``y`` is non-optional to avoid mistakenly forgetting about ``y``. However, ``y`` can be set to ``None`` in case it is derived dynamically from ``X``. """ # pylint: disable=useless-super-delegation # this is actually a pylint bug: # https://github.com/PyCQA/pylint/issues/1085 return super(NeuralNetClassifier, self).fit(X, y, **fit_params) def predict_proba(self, X): """Where applicable, return probability estimates for samples. If the module's forward method returns multiple outputs as a tuple, it is assumed that the first output contains the relevant information and the other values are ignored. If all values are relevant, consider using :func:`~skorch.NeuralNet.forward` instead. Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. Returns ------- y_proba : numpy ndarray """ # Only the docstring changed from parent. # pylint: disable=useless-super-delegation return super().predict_proba(X) def predict(self, X): """Where applicable, return class labels for samples in X. If the module's forward method returns multiple outputs as a tuple, it is assumed that the first output contains the relevant information and the other values are ignored. If all values are relevant, consider using :func:`~skorch.NeuralNet.forward` instead. Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. Returns ------- y_pred : numpy ndarray """ return self.predict_proba(X).argmax(axis=1) neural_net_binary_clf_doc_start = """NeuralNet for binary classification tasks Use this specifically if you have a binary classification task, with input data X and target y. y must be 1d. """ neural_net_binary_clf_criterion_text = """ criterion : torch criterion (class, default=torch.nn.BCEWithLogitsLoss) Binary cross entropy loss with logits. Note that the module should return the logit of probabilities with shape (batch_size, ). threshold : float (default=0.5) Probabilities above this threshold is classified as 1. ``threshold`` is used by ``predict`` and ``predict_proba`` for classification.""" def get_neural_net_binary_clf_doc(doc): # dedent/indent roundtrip required for consistent indention in both # Python <3.13 and Python >=3.13 # Because <3.13 => no automatic dedent, but it is the case in >=3.13 indentation = " " doc = textwrap.indent(textwrap.dedent(doc.split("\n", 5)[-1]), indentation) doc = neural_net_binary_clf_doc_start + " " + doc pattern = re.compile(r'(\n\s+)(criterion .*\n)(\s.+|.){1,99}') start, end = pattern.search(doc).span() doc = doc[:start] + neural_net_binary_clf_criterion_text + doc[end:] return doc
NeuralNetClassifier
python
great-expectations__great_expectations
great_expectations/profile/base.py
{ "start": 2707, "end": 3447 }
class ____(Enum): """Useful cardinality categories for building profilers.""" NONE = "none" ONE = "one" TWO = "two" FEW = "few" VERY_FEW = "very few" MANY = "many" VERY_MANY = "very many" UNIQUE = "unique" profiler_data_types_with_mapping = { "INT": list(ProfilerTypeMapping.INT_TYPE_NAMES), "FLOAT": list(ProfilerTypeMapping.FLOAT_TYPE_NAMES), "NUMERIC": ( list(ProfilerTypeMapping.INT_TYPE_NAMES) + list(ProfilerTypeMapping.FLOAT_TYPE_NAMES) ), "STRING": list(ProfilerTypeMapping.STRING_TYPE_NAMES), "BOOLEAN": list(ProfilerTypeMapping.BOOLEAN_TYPE_NAMES), "DATETIME": list(ProfilerTypeMapping.DATETIME_TYPE_NAMES), "UNKNOWN": ["unknown"], }
ProfilerCardinality
python
matplotlib__matplotlib
lib/matplotlib/axes/_base.py
{ "start": 3578, "end": 7420 }
class ____: """ Axes locator for `.Axes.inset_axes` and similarly positioned Axes. The locator is a callable object used in `.Axes.set_aspect` to compute the Axes location depending on the renderer. """ def __init__(self, bounds, transform): """ *bounds* (a ``[l, b, w, h]`` rectangle) and *transform* together specify the position of the inset Axes. """ self._bounds = bounds self._transform = transform def __call__(self, ax, renderer): # Subtracting transSubfigure will typically rely on inverted(), # freezing the transform; thus, this needs to be delayed until draw # time as transSubfigure may otherwise change after this is evaluated. return mtransforms.TransformedBbox( mtransforms.Bbox.from_bounds(*self._bounds), self._transform - ax.get_figure(root=False).transSubfigure) def _process_plot_format(fmt, *, ambiguous_fmt_datakey=False): """ Convert a MATLAB style color/line style format string to a (*linestyle*, *marker*, *color*) tuple. Example format strings include: * 'ko': black circles * '.b': blue dots * 'r--': red dashed lines * 'C2--': the third color in the color cycle, dashed lines The format is absolute in the sense that if a linestyle or marker is not defined in *fmt*, there is no line or marker. This is expressed by returning 'None' for the respective quantity. See Also -------- matplotlib.Line2D.lineStyles, matplotlib.colors.cnames All possible styles and color format strings. """ linestyle = None marker = None color = None # First check whether fmt is just a colorspec, but specifically exclude the # grayscale string "1" (not "1.0"), which is interpreted as the tri_down # marker "1". The grayscale string "0" could be unambiguously understood # as a color (black) but also excluded for consistency. if fmt not in ["0", "1"]: try: color = mcolors.to_rgba(fmt) return linestyle, marker, color except ValueError: pass errfmt = ("{!r} is neither a data key nor a valid format string ({})" if ambiguous_fmt_datakey else "{!r} is not a valid format string ({})") i = 0 while i < len(fmt): c = fmt[i] if fmt[i:i+2] in mlines.lineStyles: # First, the two-char styles. if linestyle is not None: raise ValueError(errfmt.format(fmt, "two linestyle symbols")) linestyle = fmt[i:i+2] i += 2 elif c in mlines.lineStyles: if linestyle is not None: raise ValueError(errfmt.format(fmt, "two linestyle symbols")) linestyle = c i += 1 elif c in mlines.lineMarkers: if marker is not None: raise ValueError(errfmt.format(fmt, "two marker symbols")) marker = c i += 1 elif c in mcolors.get_named_colors_mapping(): if color is not None: raise ValueError(errfmt.format(fmt, "two color symbols")) color = c i += 1 elif c == "C": cn_color = re.match(r"C\d+", fmt[i:]) if not cn_color: raise ValueError(errfmt.format(fmt, "'C' must be followed by a number")) color = mcolors.to_rgba(cn_color[0]) i += len(cn_color[0]) else: raise ValueError(errfmt.format(fmt, f"unrecognized character {c!r}")) if linestyle is None and marker is None: linestyle = mpl.rcParams['lines.linestyle'] if linestyle is None: linestyle = 'None' if marker is None: marker = 'None' return linestyle, marker, color
_TransformedBoundsLocator
python
tensorflow__tensorflow
tensorflow/python/client/session_benchmark.py
{ "start": 1164, "end": 8878 }
class ____(test.Benchmark): """Tests and benchmarks for interacting with the `tf.compat.v1.Session`.""" def _benchmarkFeed(self, name, target, size, iters): """Runs a microbenchmark to measure the cost of feeding a tensor. Reports the median cost of feeding a tensor of `size` * `sizeof(float)` bytes. Args: name: A human-readable name for logging the output. target: The session target to use for the benchmark. size: The number of floating-point numbers to be feed. iters: The number of iterations to perform. """ feed_val = np.random.rand(size).astype(np.float32) times = [] with ops.Graph().as_default(): p = array_ops.placeholder(dtypes.float32, shape=[size]) # Fetch the operation rather than the tensor, to avoid measuring the time # to fetch back the value. no_op = array_ops.identity(p).op with session.Session(target) as sess: sess.run(no_op, feed_dict={p: feed_val}) # Warm-up run. for _ in range(iters): start_time = time.time() sess.run(no_op, feed_dict={p: feed_val}) end_time = time.time() times.append(end_time - start_time) print("%s %d %f" % (name, size, np.median(times))) self.report_benchmark(iters=1, wall_time=np.median(times), name=name) def _benchmarkFetch(self, name, target, size, iters): """Runs a microbenchmark to measure the cost of fetching a tensor. Reports the median cost of fetching a tensor of `size` * `sizeof(float)` bytes. Args: name: A human-readable name for logging the output. target: The session target to use for the benchmark. size: The number of floating-point numbers to be fetched. iters: The number of iterations to perform. """ times = [] with ops.Graph().as_default(): # Define the tensor to be fetched as a variable, to avoid # constant-folding. v = variables.Variable(random_ops.random_normal([size])) with session.Session(target) as sess: sess.run(v.initializer) sess.run(v) # Warm-up run. for _ in range(iters): start_time = time.time() sess.run(v) end_time = time.time() times.append(end_time - start_time) print("%s %d %f" % (name, size, np.median(times))) self.report_benchmark(iters=1, wall_time=np.median(times), name=name) def _benchmarkFetchPrebuilt(self, name, target, size, iters): """Runs a microbenchmark to measure the cost of fetching a tensor. Reports the median cost of fetching a tensor of `size` * `sizeof(float)` bytes. Args: name: A human-readable name for logging the output. target: The session target to use for the benchmark. size: The number of floating-point numbers to be fetched. iters: The number of iterations to perform. """ times = [] with ops.Graph().as_default(): # Define the tensor to be fetched as a variable, to avoid # constant-folding. v = variables.Variable(random_ops.random_normal([size])) with session.Session(target) as sess: sess.run(v.initializer) runner = sess.make_callable(v) runner() # Warm-up run. for _ in range(iters): start_time = time.time() runner() end_time = time.time() times.append(end_time - start_time) print("%s %d %f" % (name, size, np.median(times))) self.report_benchmark(iters=1, wall_time=np.median(times), name=name) def _benchmarkRunOp(self, name, target, iters): """Runs a microbenchmark to measure the cost of running an op. Reports the median cost of running a trivial (Variable) op. Args: name: A human-readable name for logging the output. target: The session target to use for the benchmark. iters: The number of iterations to perform. """ times = [] with ops.Graph().as_default(): # Define the op to be run as a variable, to avoid # constant-folding. v = variables.Variable(random_ops.random_normal([])) with session.Session(target) as sess: sess.run(v.initializer) sess.run(v.op) # Warm-up run. for _ in range(iters): start_time = time.time() sess.run(v.op) end_time = time.time() times.append(end_time - start_time) print("%s %f" % (name, np.median(times))) self.report_benchmark(iters=1, wall_time=np.median(times), name=name) def _benchmarkRunOpPrebuilt(self, name, target, iters): """Runs a microbenchmark to measure the cost of running an op. Reports the median cost of running a trivial (Variable) op. Args: name: A human-readable name for logging the output. target: The session target to use for the benchmark. iters: The number of iterations to perform. """ times = [] with ops.Graph().as_default(): # Define the op to be run as a variable, to avoid # constant-folding. v = variables.Variable(random_ops.random_normal([])) with session.Session(target) as sess: sess.run(v.initializer) runner = sess.make_callable(v.op) runner() # Warm-up run. for _ in range(iters): start_time = time.time() runner() end_time = time.time() times.append(end_time - start_time) print("%s %f" % (name, np.median(times))) self.report_benchmark(iters=1, wall_time=np.median(times), name=name) def benchmarkGrpcSession(self): server = server_lib.Server.create_local_server() self._benchmarkFeed("benchmark_session_feed_grpc_4B", server.target, 1, 30000) session.Session.reset(server.target) self._benchmarkFeed("benchmark_session_feed_grpc_4MB", server.target, 1 << 20, 25000) session.Session.reset(server.target) self._benchmarkFetch("benchmark_session_fetch_grpc_4B", server.target, 1, 40000) session.Session.reset(server.target) self._benchmarkFetch("benchmark_session_fetch_grpc_4MB", server.target, 1 << 20, 20000) session.Session.reset(server.target) self._benchmarkFetchPrebuilt("benchmark_session_fetchprebuilt_grpc_4B", server.target, 1, 50000) session.Session.reset(server.target) self._benchmarkFetchPrebuilt("benchmark_session_fetchprebuilt_grpc_4MB", server.target, 1 << 20, 50000) session.Session.reset(server.target) self._benchmarkRunOp("benchmark_session_runop_grpc", server.target, 50000) session.Session.reset(server.target) self._benchmarkRunOpPrebuilt("benchmark_session_runopprebuilt_grpc", server.target, 100000) session.Session.reset(server.target) def benchmarkDirectSession(self): self._benchmarkFeed("benchmark_session_feed_direct_4B", "", 1, 80000) self._benchmarkFeed("benchmark_session_feed_direct_4MB", "", 1 << 20, 20000) self._benchmarkFetch("benchmark_session_fetch_direct_4B", "", 1, 100000) self._benchmarkFetch("benchmark_session_fetch_direct_4MB", "", 1 << 20, 20000) self._benchmarkFetchPrebuilt("benchmark_session_fetchprebuilt_direct_4B", "", 1, 200000) self._benchmarkFetchPrebuilt("benchmark_session_fetchprebuilt_direct_4MB", "", 1 << 20, 200000) self._benchmarkRunOp("benchmark_session_runop_direct", "", 200000) self._benchmarkRunOpPrebuilt("benchmark_session_runopprebuilt_direct", "", 200000) if __name__ == "__main__": test.main()
SessionBenchmark
python
faif__python-patterns
patterns/structural/bridge.py
{ "start": 552, "end": 1404 }
class ____: def __init__( self, x: int, y: int, radius: int, drawing_api: Union[DrawingAPI2, DrawingAPI1] ) -> None: self._x = x self._y = y self._radius = radius self._drawing_api = drawing_api # low-level i.e. Implementation specific def draw(self) -> None: self._drawing_api.draw_circle(self._x, self._y, self._radius) # high-level i.e. Abstraction specific def scale(self, pct: float) -> None: self._radius *= pct def main(): """ >>> shapes = (CircleShape(1, 2, 3, DrawingAPI1()), CircleShape(5, 7, 11, DrawingAPI2())) >>> for shape in shapes: ... shape.scale(2.5) ... shape.draw() API1.circle at 1:2 radius 7.5 API2.circle at 5:7 radius 27.5 """ if __name__ == "__main__": import doctest doctest.testmod()
CircleShape
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/pipelines/pipeline.py
{ "start": 8522, "end": 8782 }
class ____(graphene.Enum): MATERIALIZATION = "MATERIALIZATION" FAILED_TO_MATERIALIZE = "FAILED_TO_MATERIALIZE" OBSERVATION = "OBSERVATION" class Meta: name = "AssetEventHistoryEventTypeSelector"
GrapheneAssetEventHistoryEventTypeSelector
python
ansible__ansible
test/units/plugins/connection/test_ssh.py
{ "start": 20988, "end": 27065 }
class ____(object): def test_retry_then_success(self, monkeypatch): self.conn.set_option('host_key_checking', False) self.conn.set_option('reconnection_retries', 3) monkeypatch.setattr('time.sleep', lambda x: None) self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"] self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"] type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 3 + [0] * 4) self.mock_selector.select.side_effect = [ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], [], [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], [] ] self.mock_selector.get_map.side_effect = lambda: True self.conn._build_command = MagicMock() self.conn._build_command.return_value = 'ssh' return_code, b_stdout, b_stderr = self.conn.exec_command('ssh', 'some data') assert return_code == 0 assert b_stdout == b'my_stdout\nsecond_line' assert b_stderr == b'my_stderr' def test_multiple_failures(self, monkeypatch): self.conn.set_option('host_key_checking', False) self.conn.set_option('reconnection_retries', 9) monkeypatch.setattr('time.sleep', lambda x: None) self.mock_popen_res.stdout.read.side_effect = [b""] * 10 self.mock_popen_res.stderr.read.side_effect = [b""] * 10 type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 30) self.mock_selector.select.side_effect = [ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], [], ] * 10 self.mock_selector.get_map.side_effect = lambda: True self.conn._build_command = MagicMock() self.conn._build_command.return_value = 'ssh' pytest.raises(AnsibleConnectionFailure, self.conn.exec_command, 'ssh', 'some data') assert self.mock_popen.call_count == 10 def test_abitrary_exceptions(self, monkeypatch): self.conn.set_option('host_key_checking', False) self.conn.set_option('reconnection_retries', 9) monkeypatch.setattr('time.sleep', lambda x: None) self.conn._build_command = MagicMock() self.conn._build_command.return_value = 'ssh' self.mock_popen.side_effect = [Exception('bad')] * 10 pytest.raises(Exception, self.conn.exec_command, 'ssh', 'some data') assert self.mock_popen.call_count == 10 def test_put_file_retries(self, monkeypatch): self.conn.set_option('host_key_checking', False) self.conn.set_option('reconnection_retries', 3) monkeypatch.setattr('time.sleep', lambda x: None) monkeypatch.setattr('ansible.plugins.connection.ssh.os.path.exists', lambda x: True) self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"] self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"] type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 4 + [0] * 4) self.mock_selector.select.side_effect = [ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], [], [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], [] ] self.mock_selector.get_map.side_effect = lambda: True self.conn._build_command = MagicMock() self.conn._build_command.return_value = 'sftp' return_code, b_stdout, b_stderr = self.conn.put_file('/path/to/in/file', '/path/to/dest/file') assert return_code == 0 assert b_stdout == b"my_stdout\nsecond_line" assert b_stderr == b"my_stderr" assert self.mock_popen.call_count == 2 def test_fetch_file_retries(self, monkeypatch): self.conn.set_option('host_key_checking', False) self.conn.set_option('reconnection_retries', 3) monkeypatch.setattr('time.sleep', lambda x: None) self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"] self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"] type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 4 + [0] * 4) self.mock_selector.select.side_effect = [ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], [], [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)], [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)], [] ] self.mock_selector.get_map.side_effect = lambda: True self.conn._build_command = MagicMock() self.conn._build_command.return_value = 'sftp' return_code, b_stdout, b_stderr = self.conn.fetch_file('/path/to/in/file', '/path/to/dest/file') assert return_code == 0 assert b_stdout == b"my_stdout\nsecond_line" assert b_stderr == b"my_stderr" assert self.mock_popen.call_count == 2
TestSSHConnectionRetries
python
facebook__pyre-check
client/json_rpc.py
{ "start": 3537, "end": 3735 }
class ____: values: Mapping[str, object] = dataclasses.field(default_factory=dict) Parameters = Union[ByPositionParameters, ByNameParameters] @dataclasses.dataclass(frozen=True)
ByNameParameters
python
pytorch__pytorch
test/distributed/_composable/fsdp/test_fully_shard_training.py
{ "start": 60748, "end": 64740 }
class ____(FSDPTest): @property def world_size(self) -> int: return min(torch.get_device_module(device_type).device_count(), 2) @skip_if_lt_x_gpu(2) def test_share_comm_context(self): torch.manual_seed(42) n_layers = 3 lin_dim = 16 model = nn.Sequential( *[MLP(lin_dim, torch.device("cpu")) for _ in range(n_layers)] ) ref_model = copy.deepcopy(model).to(device_type) for layer in model: fully_shard(layer) layer._get_fsdp_state()._lazy_init() share_comm_ctx(list(model)) torch.manual_seed(42 + self.rank + 1) inp = torch.randn(4, 3, lin_dim, device=device_type.type) ref_loss = ref_model(inp).sum() all_gather_streams = set() reduce_scatter_streams = set() from torch.distributed.fsdp._fully_shard._fsdp_api import ( AllGather, ReduceScatter, ) from torch.distributed.fsdp._fully_shard._fsdp_param import FSDPParam orig_foreach_all_gather = foreach_all_gather def foreach_all_gather_with_assert( fsdp_params: list[FSDPParam], group: dist.ProcessGroup, async_op: bool, all_gather_copy_in_stream: torch.Stream, all_gather_stream: torch.Stream, device: torch.device, all_gather_comm: AllGather, ): nonlocal all_gather_streams all_gather_streams.add(all_gather_stream) return orig_foreach_all_gather( fsdp_params, group, async_op, all_gather_copy_in_stream, all_gather_stream, device, all_gather_comm, ) orig_foreach_reduce = foreach_reduce @torch.no_grad() def foreach_reduce_with_assert( fsdp_params: list[FSDPParam], unsharded_grads: list[torch.Tensor], reduce_scatter_group: dist.ProcessGroup, reduce_scatter_stream: torch.Stream, reduce_scatter_comm: ReduceScatter, orig_dtype: Optional[torch.dtype], reduce_dtype: Optional[torch.dtype], device: torch.device, gradient_divide_factor: Optional[float], all_reduce_group: Optional[dist.ProcessGroup], # not `None` iff HSDP all_reduce_stream: torch.Stream, all_reduce_grads: bool, partial_reduce_output: Optional[torch.Tensor], # only used for HSDP all_reduce_hook: Optional[Callable[[torch.Tensor], None]], force_sum_reduction_for_comms: bool = False, ): nonlocal reduce_scatter_streams reduce_scatter_streams.add(reduce_scatter_stream) return orig_foreach_reduce( fsdp_params, unsharded_grads, reduce_scatter_group, reduce_scatter_stream, reduce_scatter_comm, orig_dtype, reduce_dtype, device, gradient_divide_factor, all_reduce_group, all_reduce_stream, all_reduce_grads, partial_reduce_output, all_reduce_hook, force_sum_reduction_for_comms, ) with ( patch_foreach_all_gather(foreach_all_gather_with_assert), patch_foreach_reduce(foreach_reduce_with_assert), ): loss = model(inp).sum() self.assertEqual(ref_loss, loss) ref_loss.backward() loss.backward() for param in ref_model.parameters(): dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) self.assertEqual(len(all_gather_streams), 1) self.assertEqual(len(reduce_scatter_streams), 1) check_sharded_parity(self, ref_model, model)
TestFullyShardShareCommContext
python
numba__numba
numba/core/compiler.py
{ "start": 10635, "end": 11742 }
class ____(dict): """ A dictionary that has an overloaded getattr and setattr to permit getting and setting key/values through the use of attributes. """ def __getattr__(self, attr): try: return self[attr] except KeyError: raise AttributeError(attr) def __setattr__(self, attr, value): self[attr] = value def _make_subtarget(targetctx, flags): """ Make a new target context from the given target context and flags. """ subtargetoptions = {} if flags.debuginfo: subtargetoptions['enable_debuginfo'] = True if flags.boundscheck: subtargetoptions['enable_boundscheck'] = True if flags.nrt: subtargetoptions['enable_nrt'] = True if flags.auto_parallel: subtargetoptions['auto_parallel'] = flags.auto_parallel if flags.fastmath: subtargetoptions['fastmath'] = flags.fastmath error_model = callconv.create_error_model(flags.error_model, targetctx) subtargetoptions['error_model'] = error_model return targetctx.subtarget(**subtargetoptions)
StateDict
python
xlwings__xlwings
xlwings/main.py
{ "start": 53292, "end": 89102 }
class ____: """ Returns a Range object that represents a cell or a range of cells. Arguments --------- cell1 : str or tuple or Range Name of the range in the upper-left corner in A1 notation or as index-tuple or as name or as xw.Range object. It can also specify a range using the range operator (a colon), .e.g. 'A1:B2' cell2 : str or tuple or Range, default None Name of the range in the lower-right corner in A1 notation or as index-tuple or as name or as xw.Range object. Examples -------- .. code-block:: python import xlwings as xw sheet1 = xw.Book("MyBook.xlsx").sheets[0] sheet1.range("A1") sheet1.range("A1:C3") sheet1.range((1,1)) sheet1.range((1,1), (3,3)) sheet1.range("NamedRange") # Or using index/slice notation sheet1["A1"] sheet1["A1:C3"] sheet1[0, 0] sheet1[0:4, 0:4] sheet1["NamedRange"] """ def __init__(self, cell1=None, cell2=None, **options): # Arguments impl = options.pop("impl", None) if impl is None: if ( cell2 is not None and isinstance(cell1, Range) and isinstance(cell2, Range) ): if cell1.sheet != cell2.sheet: raise ValueError("Ranges are not on the same sheet") impl = cell1.sheet.range(cell1, cell2).impl elif cell2 is None and isinstance(cell1, str): impl = apps.active.range(cell1).impl elif cell2 is None and isinstance(cell1, tuple): impl = sheets.active.range(cell1, cell2).impl elif ( cell2 is not None and isinstance(cell1, tuple) and isinstance(cell2, tuple) ): impl = sheets.active.range(cell1, cell2).impl else: raise ValueError("Invalid arguments") self._impl = impl # Keyword Arguments self._impl.options = options self._options = options @property def impl(self): return self._impl @property def api(self): """ Returns the native object (``pywin32`` or ``appscript`` obj) of the engine being used. .. versionadded:: 0.9.0 """ return self.impl.api def __eq__(self, other): return ( isinstance(other, Range) and self.sheet == other.sheet and self.row == other.row and self.column == other.column and self.shape == other.shape ) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash((self.sheet, self.row, self.column, self.shape)) def __iter__(self): # Iterator object that returns cell Ranges: (1, 1), (1, 2) etc. for i in range(len(self)): yield self(i + 1) def adjust_indent(self, amount): """ Adjusts the indentation in a Range. Arguments --------- amount : int Number of spaces by which the indent is adjusted. Can be positive or negative. """ self.impl.adjust_indent(amount) def group(self, by=None): """ Group rows or columns. Arguments --------- by : str, optional "columns" or "rows". Figured out automatically if the range is defined as '1:3' or 'A:C', respectively. """ if ":" in self.impl.arg1_input and by is None: by = utils.determine_columns_or_rows(self.impl.arg1_input) elif by is None: raise ValueError( "Either provide a range in the form '1:3' or 'A:C', respectively, or provide by='column' or by='rows' as argument" ) self.impl.group(by) def ungroup(self, by=None): """ Ungroup rows or columns Arguments --------- by : str, optional "columns" or "rows". Figured out automatically if the range is defined as '1:3' or 'A:C', respectively. """ if ":" in self.impl.arg1_input and by is None: by = utils.determine_columns_or_rows(self.impl.arg1_input) elif by is None: raise ValueError( "Either provide a range in the form '1:3' or 'A:C', respectively, or provide by='column' or by='rows' as argument" ) self.impl.ungroup(by) def options(self, convert=None, **options): """ Allows you to set a converter and their options. Converters define how Excel Ranges and their values are being converted both during reading and writing operations. If no explicit converter is specified, the base converter is being applied, see :ref:`converters`. Arguments --------- ``convert`` : object, default None A converter, e.g. ``dict``, ``np.array``, ``pd.DataFrame``, ``pd.Series``, defaults to default converter Keyword Arguments ----------------- ndim : int, default None number of dimensions numbers : type, default None type of numbers, e.g. ``int`` dates : type, default None e.g. ``datetime.date`` defaults to ``datetime.datetime`` empty : object, default None transformation of empty cells transpose : Boolean, default False transpose values expand : str, default None One of ``'table'``, ``'down'``, ``'right'`` chunksize : int Use a chunksize, e.g. ``10000`` to prevent timeout or memory issues when reading or writing large amounts of data. Works with all formats, including DataFrames, NumPy arrays, and list of lists. err_to_str : Boolean, default False If ``True``, will include cell errors such as ``#N/A`` as strings. By default, they will be converted to ``None``. .. versionadded:: 0.28.0 => For converter-specific options, see :ref:`converters`. Returns ------- Range object """ options["convert"] = convert return Range(impl=self.impl, **options) @property def sheet(self): """ Returns the Sheet object to which the Range belongs. .. versionadded:: 0.9.0 """ return Sheet(impl=self.impl.sheet) def __len__(self): return len(self.impl) @property def count(self): """ Returns the number of cells. """ return len(self) @property def row(self): """ Returns the number of the first row in the specified range. Read-only. Returns ------- Integer .. versionadded:: 0.3.5 """ return self.impl.row @property def column(self): """ Returns the number of the first column in the in the specified range. Read-only. Returns ------- Integer .. versionadded:: 0.3.5 """ return self.impl.column @property def raw_value(self): """ Gets and sets the values directly as delivered from/accepted by the engine that s being used (``pywin32`` or ``appscript``) without going through any of xlwings' data cleaning/converting. This can be helpful if speed is an issue but naturally will be engine specific, i.e. might remove the cross-platform compatibility. """ return self.impl.raw_value @raw_value.setter def raw_value(self, data): self.impl.raw_value = data def clear_contents(self): """Clears the content of a Range but leaves the formatting.""" return self.impl.clear_contents() def clear_formats(self): """Clears the format of a Range but leaves the content. .. versionadded:: 0.26.2 """ return self.impl.clear_formats() def clear(self): """Clears the content and the formatting of a Range.""" return self.impl.clear() @property def has_array(self): """ ``True`` if the range is part of a legacy CSE Array formula and ``False`` otherwise. """ return self.impl.has_array def end(self, direction): """ Returns a Range object that represents the cell at the end of the region that contains the source range. Equivalent to pressing Ctrl+Up, Ctrl+down, Ctrl+left, or Ctrl+right. Parameters ---------- direction : One of 'up', 'down', 'right', 'left' Examples -------- >>> import xlwings as xw >>> wb = xw.Book() >>> sheet1 = xw.sheets[0] >>> sheet1.range('A1:B2').value = 1 >>> sheet1.range('A1').end('down') <Range [Book1]Sheet1!$A$2> >>> sheet1.range('B2').end('right') <Range [Book1]Sheet1!$B$2> .. versionadded:: 0.9.0 """ return Range(impl=self.impl.end(direction)) @property def formula(self): """Gets or sets the formula for the given Range.""" return self.impl.formula @formula.setter def formula(self, value): self.impl.formula = value @property def formula2(self): """Gets or sets the formula2 for the given Range.""" return self.impl.formula2 @formula2.setter def formula2(self, value): self.impl.formula2 = value @property def formula_array(self): """ Gets or sets an array formula for the given Range. .. versionadded:: 0.7.1 """ return self.impl.formula_array @formula_array.setter def formula_array(self, value): self.impl.formula_array = value @property def font(self): return Font(impl=self.impl.font) @property def characters(self): return Characters(impl=self.impl.characters) @property def column_width(self): """ Gets or sets the width, in characters, of a Range. One unit of column width is equal to the width of one character in the Normal style. For proportional fonts, the width of the character 0 (zero) is used. If all columns in the Range have the same width, returns the width. If columns in the Range have different widths, returns None. column_width must be in the range: 0 <= column_width <= 255 Note: If the Range is outside the used range of the Worksheet, and columns in the Range have different widths, returns the width of the first column. Returns ------- float .. versionadded:: 0.4.0 """ return self.impl.column_width @column_width.setter def column_width(self, value): self.impl.column_width = value @property def row_height(self): """ Gets or sets the height, in points, of a Range. If all rows in the Range have the same height, returns the height. If rows in the Range have different heights, returns None. row_height must be in the range: 0 <= row_height <= 409.5 Note: If the Range is outside the used range of the Worksheet, and rows in the Range have different heights, returns the height of the first row. Returns ------- float .. versionadded:: 0.4.0 """ return self.impl.row_height @row_height.setter def row_height(self, value): self.impl.row_height = value @property def width(self): """ Returns the width, in points, of a Range. Read-only. Returns ------- float .. versionadded:: 0.4.0 """ return self.impl.width @property def height(self): """ Returns the height, in points, of a Range. Read-only. Returns ------- float .. versionadded:: 0.4.0 """ return self.impl.height @property def left(self): """ Returns the distance, in points, from the left edge of column A to the left edge of the range. Read-only. Returns ------- float .. versionadded:: 0.6.0 """ return self.impl.left @property def top(self): """ Returns the distance, in points, from the top edge of row 1 to the top edge of the range. Read-only. Returns ------- float .. versionadded:: 0.6.0 """ return self.impl.top @property def number_format(self): """ Gets and sets the number_format of a Range. Examples -------- >>> import xlwings as xw >>> wb = xw.Book() >>> sheet1 = wb.sheets[0] >>> sheet1.range('A1').number_format 'General' >>> sheet1.range('A1:C3').number_format = '0.00%' >>> sheet1.range('A1:C3').number_format '0.00%' .. versionadded:: 0.2.3 """ return self.impl.number_format @number_format.setter def number_format(self, value): self.impl.number_format = value def get_address( self, row_absolute=True, column_absolute=True, include_sheetname=False, external=False, ): """ Returns the address of the range in the specified format. ``address`` can be used instead if none of the defaults need to be changed. Arguments --------- row_absolute : bool, default True Set to True to return the row part of the reference as an absolute reference. column_absolute : bool, default True Set to True to return the column part of the reference as an absolute reference. include_sheetname : bool, default False Set to True to include the Sheet name in the address. Ignored if external=True. external : bool, default False Set to True to return an external reference with workbook and worksheet name. Returns ------- str Examples -------- :: >>> import xlwings as xw >>> wb = xw.Book() >>> sheet1 = wb.sheets[0] >>> sheet1.range((1,1)).get_address() '$A$1' >>> sheet1.range((1,1)).get_address(False, False) 'A1' >>> sheet1.range((1,1), (3,3)).get_address(True, False, True) 'Sheet1!A$1:C$3' >>> sheet1.range((1,1), (3,3)).get_address(True, False, external=True) '[Book1]Sheet1!A$1:C$3' .. versionadded:: 0.2.3 """ if include_sheetname and not external: # TODO: when the Workbook name contains spaces but not the Worksheet name, # it will still be surrounded # by '' when include_sheetname=True. Also, should probably changed to regex temp_str = self.impl.get_address(row_absolute, column_absolute, True) if temp_str.find("[") > -1: results_address = temp_str[temp_str.rfind("]") + 1 :] if results_address.find("'") > -1: results_address = "'" + results_address return results_address else: return temp_str else: return self.impl.get_address(row_absolute, column_absolute, external) @property def address(self): """ Returns a string value that represents the range reference. Use ``get_address()`` to be able to provide parameters. .. versionadded:: 0.9.0 """ return self.impl.address @property def current_region(self): """ This property returns a Range object representing a range bounded by (but not including) any combination of blank rows and blank columns or the edges of the worksheet. It corresponds to ``Ctrl-*`` on Windows and ``Shift-Ctrl-Space`` on Mac. Returns ------- Range object """ return Range(impl=self.impl.current_region) def autofit(self): """ Autofits the width and height of all cells in the range. * To autofit only the width of the columns use ``myrange.columns.autofit()`` * To autofit only the height of the rows use ``myrange.rows.autofit()`` .. versionchanged:: 0.9.0 """ return self.impl.autofit() @property def color(self): """ Gets and sets the background color of the specified Range. To set the color, either use an RGB tuple ``(0, 0, 0)`` or a hex string like ``#efefef`` or an Excel color constant. To remove the background, set the color to ``None``, see Examples. Returns ------- RGB : tuple Examples -------- >>> import xlwings as xw >>> wb = xw.Book() >>> sheet1 = xw.sheets[0] >>> sheet1.range('A1').color = (255, 255, 255) # or '#ffffff' >>> sheet1.range('A2').color (255, 255, 255) >>> sheet1.range('A2').color = None >>> sheet1.range('A2').color is None True .. versionadded:: 0.3.0 """ return self.impl.color @color.setter def color(self, color_or_rgb): self.impl.color = color_or_rgb @property def name(self): """ Sets or gets the name of a Range. .. versionadded:: 0.4.0 """ impl = self.impl.name return impl and Name(impl=impl) @name.setter def name(self, value): self.impl.name = value def __call__(self, *args): return Range(impl=self.impl(*args)) @property def rows(self): """ Returns a :class:`RangeRows` object that represents the rows in the specified range. .. versionadded:: 0.9.0 """ return RangeRows(self) @property def columns(self): """ Returns a :class:`RangeColumns` object that represents the columns in the specified range. .. versionadded:: 0.9.0 """ return RangeColumns(self) @property def shape(self): """ Tuple of Range dimensions. .. versionadded:: 0.3.0 """ return self.impl.shape @property def size(self): """ Number of elements in the Range. .. versionadded:: 0.3.0 """ a, b = self.shape return a * b @property def value(self): """ Gets and sets the values for the given Range. See :meth:`xlwings.Range.options` about how to set options, e.g., to transform it into a DataFrame or how to set a chunksize. Returns ------- object : returned object depends on the converter being used, see :meth:`xlwings.Range.options` """ return conversion.read(self, None, self._options) @value.setter def value(self, data): conversion.write(data, self, self._options) def expand(self, mode="table"): """ Expands the range according to the mode provided. Ignores empty top-left cells (unlike ``Range.end()``). Parameters ---------- mode : str, default 'table' One of ``'table'`` (=down and right), ``'down'``, ``'right'``. Returns ------- Range Examples -------- >>> import xlwings as xw >>> wb = xw.Book() >>> sheet1 = wb.sheets[0] >>> sheet1.range('A1').value = [[None, 1], [2, 3]] >>> sheet1.range('A1').expand().address $A$1:$B$2 >>> sheet1.range('A1').expand('right').address $A$1:$B$1 .. versionadded:: 0.9.0 """ return expansion.expanders.get(mode, mode).expand(self) def __getitem__(self, key): if type(key) is tuple: row, col = key n = self.shape[0] if isinstance(row, slice): row1, row2, step = row.indices(n) if step != 1: raise ValueError("Slice steps not supported.") row2 -= 1 elif isinstance(row, int): if row < 0: row += n if row < 0 or row >= n: raise IndexError("Row index %s out of range (%s rows)." % (row, n)) row1 = row2 = row else: raise TypeError( "Row indices must be integers or slices, not %s" % type(row).__name__ ) n = self.shape[1] if isinstance(col, slice): col1, col2, step = col.indices(n) if step != 1: raise ValueError("Slice steps not supported.") col2 -= 1 elif isinstance(col, int): if col < 0: col += n if col < 0 or col >= n: raise IndexError( "Column index %s out of range (%s columns)." % (col, n) ) col1 = col2 = col else: raise TypeError( "Column indices must be integers or slices, not %s" % type(col).__name__ ) return self.sheet.range( ( self.row + row1, self.column + col1, max(0, row2 - row1 + 1), max(0, col2 - col1 + 1), ) ) elif isinstance(key, slice): if self.shape[0] > 1 and self.shape[1] > 1: raise IndexError( "One-dimensional slicing is not allowed on two-dimensional ranges" ) if self.shape[0] > 1: return self[key, :] else: return self[:, key] elif isinstance(key, int): n = len(self) k = key + n if key < 0 else key if k < 0 or k >= n: raise IndexError("Index %s out of range (%s elements)." % (key, n)) else: return self(k + 1) else: raise TypeError( "Cell indices must be integers or slices, not %s" % type(key).__name__ ) def __repr__(self): return "<Range [{1}]{0}!{2}>".format( self.sheet.name, self.sheet.book.name, self.address ) def insert(self, shift, copy_origin="format_from_left_or_above"): """ Insert a cell or range of cells into the sheet. Parameters ---------- shift : str Use ``right`` or ``down``. copy_origin : str, default format_from_left_or_above Use ``format_from_left_or_above`` or ``format_from_right_or_below``. Note that copy_origin is only supported on Windows. Returns ------- None .. versionchanged:: 0.30.3 ``shift`` is now a required argument. """ self.impl.insert(shift, copy_origin) def delete(self, shift=None): """ Deletes a cell or range of cells. Parameters ---------- shift : str, default None Use ``left`` or ``up``. If omitted, Excel decides based on the shape of the range. Returns ------- None """ self.impl.delete(shift) def copy(self, destination=None): """ Copy a range to a destination range or clipboard. Parameters ---------- destination : xlwings.Range xlwings Range to which the specified range will be copied. If omitted, the range is copied to the clipboard. Returns ------- None """ self.impl.copy(destination) def copy_from( self, source_range, copy_type="all", skip_blanks=False, transpose=False ): """ A newer variant of copy that replaces copy/paste. Parameters ---------- source_range : xlwings.Range copy_type : string, default "all" One of "all", "formats", "formulas", "link", "values" skip_blanks : bool, default False transpose : bool, default False """ self.impl.copy_from(source_range, copy_type, skip_blanks, transpose) def paste(self, paste=None, operation=None, skip_blanks=False, transpose=False): """ Pastes a range from the clipboard into the specified range. Parameters ---------- paste : str, default None One of ``all_merging_conditional_formats``, ``all``, ``all_except_borders``, ``all_using_source_theme``, ``column_widths``, ``comments``, ``formats``, ``formulas``, ``formulas_and_number_formats``, ``validation``, ``values``, ``values_and_number_formats``. operation : str, default None One of "add", "divide", "multiply", "subtract". skip_blanks : bool, default False Set to ``True`` to skip over blank cells transpose : bool, default False Set to ``True`` to transpose rows and columns. Returns ------- None """ self.impl.paste( paste=paste, operation=operation, skip_blanks=skip_blanks, transpose=transpose, ) @property def hyperlink(self): """ Returns the hyperlink address of the specified Range (single Cell only) Examples -------- >>> import xlwings as xw >>> wb = xw.Book() >>> sheet1 = wb.sheets[0] >>> sheet1.range('A1').value 'www.xlwings.org' >>> sheet1.range('A1').hyperlink 'http://www.xlwings.org' .. versionadded:: 0.3.0 """ if self.formula.lower().startswith("="): # If it's a formula, extract the URL from the formula string formula = self.formula try: return re.compile(r"\"(.+?)\"").search(formula).group(1) except AttributeError: raise Exception("The cell doesn't seem to contain a hyperlink!") else: # If it has been set pragmatically return self.impl.hyperlink def add_hyperlink(self, address, text_to_display=None, screen_tip=None): """ Adds a hyperlink to the specified Range (single Cell) Arguments --------- address : str The address of the hyperlink. text_to_display : str, default None The text to be displayed for the hyperlink. Defaults to the hyperlink address. screen_tip: str, default None The screen tip to be displayed when the mouse pointer is paused over the hyperlink. Default is set to '<address> - Click once to follow. Click and hold to select this cell.' .. versionadded:: 0.3.0 """ if text_to_display is None: text_to_display = address if address[:4] == "www.": address = "http://" + address if screen_tip is None: screen_tip = ( address + " - Click once to follow. Click and hold to select this cell." ) self.impl.add_hyperlink(address, text_to_display, screen_tip) def resize(self, row_size=None, column_size=None): """ Resizes the specified Range Arguments --------- row_size: int > 0 The number of rows in the new range (if None, the number of rows in the range is unchanged). column_size: int > 0 The number of columns in the new range (if None, the number of columns in the range is unchanged). Returns ------- Range object: Range .. versionadded:: 0.3.0 """ if row_size is not None: assert row_size > 0 else: row_size = self.shape[0] if column_size is not None: assert column_size > 0 else: column_size = self.shape[1] return Range(self(1, 1), self(row_size, column_size)).options(**self._options) def offset(self, row_offset=0, column_offset=0): """ Returns a Range object that represents a Range that's offset from the specified range. Returns ------- Range object : Range .. versionadded:: 0.3.0 """ return Range( self(row_offset + 1, column_offset + 1), self(row_offset + self.shape[0], column_offset + self.shape[1]), ).options(**self._options) @property def last_cell(self): """ Returns the bottom right cell of the specified range. Read-only. Returns ------- Range Example ------- >>> import xlwings as xw >>> wb = xw.Book() >>> sheet1 = wb.sheets[0] >>> myrange = sheet1.range('A1:E4') >>> myrange.last_cell.row, myrange.last_cell.column (4, 5) .. versionadded:: 0.3.5 """ return self(self.shape[0], self.shape[1]).options(**self._options) def select(self): """ Selects the range. Select only works on the active book. .. versionadded:: 0.9.0 """ self.impl.select() @property def merge_area(self): """ Returns a Range object that represents the merged Range containing the specified cell. If the specified cell isn't in a merged range, this property returns the specified cell. """ return Range(impl=self.impl.merge_area) @property def merge_cells(self): """ Returns ``True`` if the Range contains merged cells, otherwise ``False`` """ return self.impl.merge_cells def merge(self, across=False): """ Creates a merged cell from the specified Range object. Parameters ---------- across : bool, default False True to merge cells in each row of the specified Range as separate merged cells. """ with self.sheet.book.app.properties(display_alerts=False): self.impl.merge(across) def unmerge(self): """ Separates a merged area into individual cells. """ self.impl.unmerge() @property def table(self): """ Returns a Table object if the range is part of one, otherwise ``None``. .. versionadded:: 0.21.0 """ if self.impl.table: return Table(impl=self.impl.table) else: return None @property def wrap_text(self): """ Returns ``True`` if the wrap_text property is enabled and ``False`` if it's disabled. If not all cells have the same value in a range, on Windows it returns ``None`` and on macOS ``False``. .. versionadded:: 0.23.2 """ return self.impl.wrap_text @wrap_text.setter def wrap_text(self, value): self.impl.wrap_text = value @property def note(self): """ Returns a Note object. Before the introduction of threaded comments, a Note was called a Comment. .. versionadded:: 0.24.2 """ return Note(impl=self.impl.note) if self.impl.note else None def copy_picture(self, appearance="screen", format="picture"): """ Copies the range to the clipboard as picture. Parameters ---------- appearance : str, default 'screen' Either 'screen' or 'printer'. format : str, default 'picture' Either 'picture' or 'bitmap'. .. versionadded:: 0.24.8 """ self.impl.copy_picture(appearance, format) def to_png(self, path=None): """ Exports the range as PNG picture. Parameters ---------- path : str or path-like, default None Path where you want to store the picture. Defaults to the name of the range in the same directory as the Excel file if the Excel file is stored and to the current working directory otherwise. .. versionadded:: 0.24.8 """ if not PIL: raise XlwingsError("Range.to_png() requires an installation of Pillow.") path = utils.fspath(path) if path is None: # TODO: factor this out as it's used in multiple locations directory, _ = os.path.split(self.sheet.book.fullname) default_name = ( str(self) .replace("<", "") .replace(">", "") .replace(":", "_") .replace(" ", "") ) if directory: path = os.path.join(directory, default_name + ".png") else: path = str(Path.cwd() / default_name) + ".png" self.impl.to_png(path) def to_pdf(self, path=None, layout=None, show=None, quality="standard"): """ Exports the range as PDF. Parameters ---------- path : str or path-like, default None Path where you want to store the pdf. Defaults to the address of the range in the same directory as the Excel file if the Excel file is stored and to the current working directory otherwise. layout : str or path-like object, default None This argument requires xlwings :bdg-secondary:`PRO`. Path to a PDF file on which the report will be printed. This is ideal for headers and footers as well as borderless printing of graphics/artwork. The PDF file either needs to have only 1 page (every report page uses the same layout) or otherwise needs the same amount of pages as the report (each report page is printed on the respective page in the layout PDF). show : bool, default False Once created, open the PDF file with the default application. quality : str, default ``'standard'`` Quality of the PDF file. Can either be ``'standard'`` or ``'minimum'``. .. versionadded:: 0.26.2 """ return utils.to_pdf(self, path=path, layout=layout, show=show, quality=quality) def autofill(self, destination, type_="fill_default"): """ Autofills the destination Range. Note that the destination Range must include the origin Range. Arguments --------- destination : Range The origin. type_ : str, default ``"fill_default"`` One of the following strings: ``"fill_copy"``, ``"fill_days"``, ``"fill_default"``, ``"fill_formats"``, ``"fill_months"``, ``"fill_series"``, ``"fill_values"``, ``"fill_weekdays"``, ``"fill_years"``, ``"growth_trend"``, ``"linear_trend"``, ``"flash_fill`` .. versionadded:: 0.30.1 """ self.impl.autofill(destination=destination, type_=type_) # These have to be after definition of Range to resolve circular reference from . import conversion, expansion
Range
python
anthropics__anthropic-sdk-python
src/anthropic/types/beta/beta_tool_computer_use_20251124_param.py
{ "start": 362, "end": 1438 }
class ____(TypedDict, total=False): display_height_px: Required[int] """The height of the display in pixels.""" display_width_px: Required[int] """The width of the display in pixels.""" name: Required[Literal["computer"]] """Name of the tool. This is how the tool will be called by the model and in `tool_use` blocks. """ type: Required[Literal["computer_20251124"]] allowed_callers: List[Literal["direct", "code_execution_20250825"]] cache_control: Optional[BetaCacheControlEphemeralParam] """Create a cache control breakpoint at this content block.""" defer_loading: bool """If true, tool will not be included in initial system prompt. Only loaded when returned via tool_reference from tool search. """ display_number: Optional[int] """The X11 display number (e.g. 0, 1) for the display.""" enable_zoom: bool """Whether to enable an action to take a zoomed-in screenshot of the screen.""" input_examples: Iterable[Dict[str, object]] strict: bool
BetaToolComputerUse20251124Param
python
tensorflow__tensorflow
tensorflow/python/training/training_util_test.py
{ "start": 1014, "end": 3912 }
class ____(test.TestCase): def _assert_global_step(self, global_step, expected_dtype=dtypes.int64): self.assertEqual('%s:0' % ops.GraphKeys.GLOBAL_STEP, global_step.name) self.assertEqual(expected_dtype, global_step.dtype.base_dtype) self.assertEqual([], global_step.get_shape().as_list()) def test_invalid_dtype(self): with ops.Graph().as_default() as g: self.assertIsNone(training_util.get_global_step()) variable_v1.VariableV1( 0.0, trainable=False, dtype=dtypes.float32, name=ops.GraphKeys.GLOBAL_STEP, collections=[ops.GraphKeys.GLOBAL_STEP]) self.assertRaisesRegex(TypeError, 'does not have integer type', training_util.get_global_step) self.assertRaisesRegex(TypeError, 'does not have integer type', training_util.get_global_step, g) def test_invalid_shape(self): with ops.Graph().as_default() as g: self.assertIsNone(training_util.get_global_step()) variable_v1.VariableV1([0], trainable=False, dtype=dtypes.int32, name=ops.GraphKeys.GLOBAL_STEP, collections=[ops.GraphKeys.GLOBAL_STEP]) self.assertRaisesRegex(TypeError, 'not scalar', training_util.get_global_step) self.assertRaisesRegex(TypeError, 'not scalar', training_util.get_global_step, g) def test_create_global_step(self): self.assertIsNone(training_util.get_global_step()) with ops.Graph().as_default() as g: global_step = training_util.create_global_step() self._assert_global_step(global_step) self.assertRaisesRegex(ValueError, 'already exists', training_util.create_global_step) self.assertRaisesRegex(ValueError, 'already exists', training_util.create_global_step, g) self._assert_global_step(training_util.create_global_step(ops.Graph())) def test_get_global_step(self): with ops.Graph().as_default() as g: self.assertIsNone(training_util.get_global_step()) variable_v1.VariableV1( 0, trainable=False, dtype=dtypes.int32, name=ops.GraphKeys.GLOBAL_STEP, collections=[ops.GraphKeys.GLOBAL_STEP]) self._assert_global_step( training_util.get_global_step(), expected_dtype=dtypes.int32) self._assert_global_step( training_util.get_global_step(g), expected_dtype=dtypes.int32) def test_get_or_create_global_step(self): with ops.Graph().as_default() as g: self.assertIsNone(training_util.get_global_step()) self._assert_global_step(training_util.get_or_create_global_step()) self._assert_global_step(training_util.get_or_create_global_step(g))
GlobalStepTest
python
astropy__astropy
astropy/units/quantity.py
{ "start": 1641, "end": 2173 }
class ____(_config.ConfigNamespace): """ Configuration parameters for Quantity. """ latex_array_threshold = _config.ConfigItem( 100, "The maximum size an array Quantity can be before its LaTeX " 'representation for IPython gets "summarized" (meaning only the first ' 'and last few elements are shown with "..." between). Setting this to a ' "negative number means that the value will instead be whatever numpy " "gets from get_printoptions.", ) conf = Conf()
Conf
python
pallets__jinja
tests/test_ext.py
{ "start": 16845, "end": 18102 }
class ____: def test_basic_scope_behavior(self): # This is what the old with statement compiled down to class ScopeExt(Extension): tags = {"scope"} def parse(self, parser): node = nodes.Scope(lineno=next(parser.stream).lineno) assignments = [] while parser.stream.current.type != "block_end": lineno = parser.stream.current.lineno if assignments: parser.stream.expect("comma") target = parser.parse_assign_target() parser.stream.expect("assign") expr = parser.parse_expression() assignments.append(nodes.Assign(target, expr, lineno=lineno)) node.body = assignments + list( parser.parse_statements(("name:endscope",), drop_needle=True) ) return node env = Environment(extensions=[ScopeExt]) tmpl = env.from_string( """\ {%- scope a=1, b=2, c=b, d=e, e=5 -%} {{ a }}|{{ b }}|{{ c }}|{{ d }}|{{ e }} {%- endscope -%} """ ) assert tmpl.render(b=3, e=4) == "1|2|2|4|5"
TestScope
python
apache__airflow
providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/operators/pod.py
{ "start": 63330, "end": 64553 }
class ____(AbstractContextManager): """ Returns context manager that will swallow and log exceptions. By default swallows descendents of Exception, but you can provide other classes through the vararg ``exceptions``. Suppression behavior can be disabled with reraise=True. :meta private: """ def __init__(self, *exceptions, reraise: bool = False) -> None: self._exceptions = exceptions or (Exception,) self.reraise = reraise self.exception = None def __enter__(self): return self def __exit__(self, exctype, excinst, exctb) -> bool: error = exctype is not None matching_error = error and issubclass(exctype, self._exceptions) if (error and not matching_error) or (matching_error and self.reraise): return False if matching_error: self.exception = excinst logger = logging.getLogger(__name__) logger.exception(excinst) return True def _normalize_labels_dict(labels: dict) -> dict: """Return a copy of the labels dict with all None values replaced by empty strings.""" return {k: ("" if v is None else v) for k, v in labels.items()}
_optionally_suppress
python
tensorflow__tensorflow
tensorflow/python/eager/cancellation_test.py
{ "start": 785, "end": 1058 }
class ____(test.TestCase): def testStartCancel(self): manager = cancellation.CancellationManager() self.assertFalse(manager.is_cancelled) manager.start_cancel() self.assertTrue(manager.is_cancelled) if __name__ == '__main__': test.main()
CancellationTest
python
kamyu104__LeetCode-Solutions
Python/spiral-matrix-iii.py
{ "start": 39, "end": 573 }
class ____(object): def spiralMatrixIII(self, R, C, r0, c0): """ :type R: int :type C: int :type r0: int :type c0: int :rtype: List[List[int]] """ r, c = r0, c0 result = [[r, c]] x, y, n, i = 0, 1, 0, 0 while len(result) < R*C: r, c, i = r+x, c+y, i+1 if 0 <= r < R and 0 <= c < C: result.append([r, c]) if i == n//2+1: x, y, n, i = y, -x, n+1, 0 return result
Solution
python
tensorflow__tensorflow
tensorflow/tools/proto_splitter/split.py
{ "start": 1186, "end": 1934 }
class ____(abc.ABC): """An abstract class for splitting and writing protos that are > 2GB. See the README on how to use or subclass this class. """ @property @abc.abstractmethod def version_def(self) -> versions_pb2.VersionDef: """Version info about the splitter and merge implementation required.""" @abc.abstractmethod def split( self, ) -> tuple[Sequence[Union[message.Message, bytes]], chunk_pb2.ChunkedMessage]: """Splits proto message into a Sequence of protos/bytes.""" @abc.abstractmethod def write(self, file_prefix: str) -> str: """Serializes proto to disk. Args: file_prefix: string prefix of the filepath. Returns: The actual path the proto is written to. """
Splitter
python
psf__black
tests/data/cases/preview_long_strings__regression.py
{ "start": 28686, "end": 33224 }
class ____: def xxxx_xxx_xx_xxxxxxxxxx_xxxx_xxxxxxxxx(xxxx): xxxxxxxx = [ xxxxxxxxxxxxxxxx( "xxxx", xxxxxxxxxxx={ "xxxx": 1.0, }, xxxxxx={"xxxxxx 1": xxxxxx(xxxx="xxxxxx 1", xxxxxx=600.0)}, xxxxxxxx_xxxxxxx=0.0, ), xxxxxxxxxxxxxxxx( "xxxxxxx", xxxxxxxxxxx={ "xxxx": 1.0, }, xxxxxx={"xxxxxx 1": xxxxxx(xxxx="xxxxxx 1", xxxxxx=200.0)}, xxxxxxxx_xxxxxxx=0.0, ), xxxxxxxxxxxxxxxx( "xxxx", ), ] some_dictionary = { "xxxxx006": [ ( "xxx-xxx" " xxxxx3xxxx1xx2xxxxxxxxxxxxxx0xx6xxxxxxxxxx2xxxxxx9xxxxxxxxxx0xxxxx1xxx2x/xx9xx6+x+xxxxxxxxxxxxxx4xxxxxxxxxxxxxxxxxxxxx43xxx2xx2x4x++xxx6xxxxxxxxx+xxxxx/xx9x+xxxxxxxxxxxxxx8x15xxxxxxxxxxxxxxxxx82xx/xxxxxxxxxxxxxx/x5xxxxxxxxxxxxxx6xxxxxx74x4/xxx4x+xxxxxxxxx2xxxxxxxx87xxxxx4xxxxxxxx3xx0xxxxx4xxx1xx9xx5xxxxxxx/xxxxx5xx6xx4xxxx1x/x2xxxxxxxxxxxx64xxxxxxx1x0xx5xxxxxxxxxxxxxx==" " xxxxx000 xxxxxxxxxx\n" ), ( "xxx-xxx" " xxxxx3xxxx1xx2xxxxxxxxxxxxxx6xxxxxxxxxxxxxx9xxxxxxxxxxxxx3xxx9xxxxxxxxxxxxxxxx0xxxxxxxxxxxxxxxxx2xxxx2xxx6xxxxx/xx54xxxxxxxxx4xxx3xxxxxx9xx3xxxxx39xxxxxxxxx5xx91xxxx7xxxxxx8xxxxxxxxxxxxxxxx9xxx93xxxxxxxxxxxxxxxxx7xxx8xx8xx4/x1xxxxx1x3xxxxxxxxxxxxx3xxxxxx9xx4xx4x7xxxxxxxxxxxxx1xxxxxxxxx7xxxxxxxxxxxxxx4xx6xxxxxxxxx9xxx7xxxx2xxxxxxxxxxxxxxxxxxxxxx8xxxxxxxxxxxxxxxxxxxx6xx==" " xxxxx010 xxxxxxxxxx\n" ), ], "xxxxx016": [ ( "xxx-xxx" " xxxxx3xxxx1xx2xxxxxxxxxxxxxx0xx6xxxxxxxxxx2xxxxxx9xxxxxxxxxx0xxxxx1xxx2x/xx9xx6+x+xxxxxxxxxxxxxx4xxxxxxxxxxxxxxxxxxxxx43xxx2xx2x4x++xxx6xxxxxxxxx+xxxxx/xx9x+xxxxxxxxxxxxxx8x15xxxxxxxxxxxxxxxxx82xx/xxxxxxxxxxxxxx/x5xxxxxxxxxxxxxx6xxxxxx74x4/xxx4x+xxxxxxxxx2xxxxxxxx87xxxxx4xxxxxxxx3xx0xxxxx4xxx1xx9xx5xxxxxxx/xxxxx5xx6xx4xxxx1x/x2xxxxxxxxxxxx64xxxxxxx1x0xx5xxxxxxxxxxxxxx==" " xxxxx000 xxxxxxxxxx\n" ), ( "xxx-xxx" " xxxxx3xxxx1xx2xxxxxxxxxxxxxx6xxxxxxxxxxxxxx9xxxxxxxxxxxxx3xxx9xxxxxxxxxxxxxxxx0xxxxxxxxxxxxxxxxx2xxxx2xxx6xxxxx/xx54xxxxxxxxx4xxx3xxxxxx9xx3xxxxx39xxxxxxxxx5xx91xxxx7xxxxxx8xxxxxxxxxxxxxxxx9xxx93xxxxxxxxxxxxxxxxx7xxx8xx8xx4/x1xxxxx1x3xxxxxxxxxxxxx3xxxxxx9xx4xx4x7xxxxxxxxxxxxx1xxxxxxxxx7xxxxxxxxxxxxxx4xx6xxxxxxxxx9xxx7xxxx2xxxxxxxxxxxxxxxxxxxxxx8xxxxxxxxxxxxxxxxxxxx6xx==" " xxxxx010 xxxxxxxxxx\n" ), ], } def foo(): xxx_xxx = ( # xxxx xxxxxxxxxx xxxx xx xxxx xx xxx xxxxxxxx xxxxxx xxxxx. 'xxxx xxx xxxxxxxx_xxxx xx "xxxxxxxxxx".\n xxx: xxxxxx xxxxxxxx_xxxx=xxxxxxxxxx' ) some_tuple = ("some string", "some string which should be joined") some_commented_string = ( # This comment stays at the top. "This string is long but not so long that it needs hahahah toooooo be so greatttt" " {} that I just can't think of any more good words to say about it at" " allllllllllll".format("ha") # comments here are fine ) some_commented_string = ( "This string is long but not so long that it needs hahahah toooooo be so greatttt" # But these " {} that I just can't think of any more good words to say about it at" # comments will stay " allllllllllll".format("ha") # comments here are fine ) lpar_and_rpar_have_comments = func_call( # LPAR Comment "Long really ridiculous type of string that shouldn't really even exist at all. I" " mean commmme onnn!!!", # Comma Comment ) # RPAR Comment cmd_fstring = ( "sudo -E deluge-console info --detailed --sort-reverse=time_added " f"{'' if ID is None else ID} | perl -nE 'print if /^{field}:/'" ) cmd_fstring = ( "sudo -E deluge-console info --detailed --sort-reverse=time_added" f" {'' if ID is None else ID} | perl -nE 'print if /^{field}:/'" ) cmd_fstring = ( "sudo -E deluge-console info --detailed --sort-reverse=time_added" f" {'{{}}' if ID is None else ID} | perl -nE 'print if /^{field}:/'" ) cmd_fstring = ( "sudo -E deluge-console info --detailed --sort-reverse=time_added {'' if ID is" f" None else ID}} | perl -nE 'print if /^{field}:/'" ) fstring = ( "This string really doesn't need to be an {{fstring}}, but this one most" f" certainly, absolutely {does}." ) fstring = f"We have to remember to escape {braces}. Like {{these}}. But not {this}."
A
python
apache__airflow
providers/microsoft/mssql/tests/unit/microsoft/mssql/dialects/test_mssql.py
{ "start": 914, "end": 9114 }
class ____: @pytest.mark.parametrize( "create_db_api_hook", [ ( [ {"name": "index", "identity": True}, {"name": "name"}, {"name": "firstname"}, {"name": "age"}, ], # columns [("index",)], # primary_keys {"index", "user"}, # reserved_words False, # escape_column_names ), ], indirect=True, ) def test_placeholder(self, create_db_api_hook): assert MsSqlDialect(create_db_api_hook).placeholder == "?" @pytest.mark.parametrize( "create_db_api_hook", [ ( [ {"name": "index", "identity": True}, {"name": "name"}, {"name": "firstname"}, {"name": "age"}, ], # columns [("index",)], # primary_keys {"index", "user"}, # reserved_words False, # escape_column_names ), ], indirect=True, ) def test_get_column_names(self, create_db_api_hook): assert MsSqlDialect(create_db_api_hook).get_column_names("hollywood.actors") == [ "index", "name", "firstname", "age", ] @pytest.mark.parametrize( "create_db_api_hook", [ ( [ {"name": "index", "identity": True}, {"name": "name"}, {"name": "firstname"}, {"name": "age"}, ], # columns [("index",)], # primary_keys {"index", "user"}, # reserved_words False, # escape_column_names ), ], indirect=True, ) def test_get_target_fields(self, create_db_api_hook): assert MsSqlDialect(create_db_api_hook).get_target_fields("hollywood.actors") == [ "name", "firstname", "age", ] @pytest.mark.parametrize( "create_db_api_hook", [ ( [ {"name": "index", "identity": True}, {"name": "name"}, {"name": "firstname"}, {"name": "age"}, ], # columns [("index",)], # primary_keys {"index", "user"}, # reserved_words False, # escape_column_names ), ], indirect=True, ) def test_get_primary_keys(self, create_db_api_hook): assert MsSqlDialect(create_db_api_hook).get_primary_keys("hollywood.actors") == ["index"] @pytest.mark.parametrize( "create_db_api_hook", [ ( [ {"name": "index", "identity": True}, {"name": "name"}, {"name": "firstname"}, {"name": "age"}, ], # columns [("index",)], # primary_keys {"index", "user"}, # reserved_words False, # escape_column_names ), ], indirect=True, ) def test_generate_replace_sql(self, create_db_api_hook): values = [ {"index": 1, "name": "Stallone", "firstname": "Sylvester", "age": "78"}, {"index": 2, "name": "Statham", "firstname": "Jason", "age": "57"}, {"index": 3, "name": "Li", "firstname": "Jet", "age": "61"}, {"index": 4, "name": "Lundgren", "firstname": "Dolph", "age": "66"}, {"index": 5, "name": "Norris", "firstname": "Chuck", "age": "84"}, ] target_fields = ["index", "name", "firstname", "age"] sql = MsSqlDialect(create_db_api_hook).generate_replace_sql("hollywood.actors", values, target_fields) assert ( sql == """ MERGE INTO hollywood.actors WITH (ROWLOCK) AS target USING (SELECT ? AS [index], ? AS name, ? AS firstname, ? AS age) AS source ON target.[index] = source.[index] WHEN MATCHED THEN UPDATE SET target.name = source.name, target.firstname = source.firstname, target.age = source.age WHEN NOT MATCHED THEN INSERT ([index], name, firstname, age) VALUES (source.[index], source.name, source.firstname, source.age); """.strip() ) @pytest.mark.parametrize( "create_db_api_hook", [ ( [ {"name": "index", "identity": True}, {"name": "name", "identity": True}, {"name": "firstname", "identity": True}, {"name": "age", "identity": True}, ], # columns [("index",), ("name",), ("firstname",), ("age",)], # primary_keys {"index", "user"}, # reserved_words False, # escape_column_names ), ], indirect=True, ) def test_generate_replace_sql_when_all_columns_are_part_of_primary_key(self, create_db_api_hook): values = [ {"index": 1, "name": "Stallone", "firstname": "Sylvester", "age": "78"}, {"index": 2, "name": "Statham", "firstname": "Jason", "age": "57"}, {"index": 3, "name": "Li", "firstname": "Jet", "age": "61"}, {"index": 4, "name": "Lundgren", "firstname": "Dolph", "age": "66"}, {"index": 5, "name": "Norris", "firstname": "Chuck", "age": "84"}, ] target_fields = ["index", "name", "firstname", "age"] sql = MsSqlDialect(create_db_api_hook).generate_replace_sql("hollywood.actors", values, target_fields) assert ( sql == """ MERGE INTO hollywood.actors WITH (ROWLOCK) AS target USING (SELECT ? AS [index], ? AS name, ? AS firstname, ? AS age) AS source ON target.[index] = source.[index] AND target.name = source.name AND target.firstname = source.firstname AND target.age = source.age WHEN NOT MATCHED THEN INSERT ([index], name, firstname, age) VALUES (source.[index], source.name, source.firstname, source.age); """.strip() ) @pytest.mark.parametrize( "create_db_api_hook", [ ( [ {"name": "index", "identity": True}, {"name": "name"}, {"name": "firstname"}, {"name": "age"}, ], # columns [("index",)], # primary_keys {"index", "user"}, # reserved_words True, # escape_column_names ), ], indirect=True, ) def test_generate_replace_sql_when_escape_column_names_is_enabled(self, create_db_api_hook): values = [ {"index": 1, "name": "Stallone", "firstname": "Sylvester", "age": "78"}, {"index": 2, "name": "Statham", "firstname": "Jason", "age": "57"}, {"index": 3, "name": "Li", "firstname": "Jet", "age": "61"}, {"index": 4, "name": "Lundgren", "firstname": "Dolph", "age": "66"}, {"index": 5, "name": "Norris", "firstname": "Chuck", "age": "84"}, ] target_fields = ["index", "name", "firstname", "age"] sql = MsSqlDialect(create_db_api_hook).generate_replace_sql("hollywood.actors", values, target_fields) assert ( sql == """ MERGE INTO hollywood.actors WITH (ROWLOCK) AS target USING (SELECT ? AS [index], ? AS [name], ? AS [firstname], ? AS [age]) AS source ON target.[index] = source.[index] WHEN MATCHED THEN UPDATE SET target.[name] = source.[name], target.[firstname] = source.[firstname], target.[age] = source.[age] WHEN NOT MATCHED THEN INSERT ([index], [name], [firstname], [age]) VALUES (source.[index], source.[name], source.[firstname], source.[age]); """.strip() )
TestMsSqlDialect
python
pytorch__pytorch
torch/distributed/distributed_c10d.py
{ "start": 18566, "end": 19863 }
class ____: """ A class to capture collective operations. Args: op (Callable): A collective function, e.g. ``torch.distributed.all_reduce``. tensor (Tensor): Tensor to operate on. dst_tensor (Tensor, optional): Provided when source and destination tensors are not the same. redop (ReduceOp, optional): reduce operation. root (int, optional): root of broadcast or reduce. """ def __init__( self, op: Callable, tensor: torch.Tensor, dst_tensor: torch.Tensor | None = None, redop: ReduceOp | None = None, root: int | None = None, ): self.op = op self.tensor = tensor self.dst_tensor = dst_tensor self.redop = redop self.root = root # DO NOT USE THESE FIELDS DIRECTLY. # Use them through the _world object to make sure the _world override mechanism _pg_map: dict[ProcessGroup, tuple[str, Store]] = {} _pg_names: dict[ProcessGroup, str] = {} _pg_group_ranks: dict[ProcessGroup, dict[int, int]] = {} # For a pg, it is a map from ProcessGroup to BackendConfig _pg_backend_config: dict[ProcessGroup, str] = {} _group_count = 0 _tags_to_pg: dict[str, list[ProcessGroup]] = {} _pg_to_tag: dict[ProcessGroup, str] = {} _backend: str | None = None
_CollOp
python
scipy__scipy
scipy/interpolate/tests/test_interpolate.py
{ "start": 38822, "end": 47061 }
class ____: def test_eval(self, xp): x = xp.arange(0., 11., dtype=xp.float64) y = xp.asarray( [0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.], dtype=xp.float64 ) ak = Akima1DInterpolator(x, y) xi = xp.asarray([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2, 8.6, 9.9, 10.], dtype=xp.float64) yi = xp.asarray([0., 1.375, 2., 1.5, 1.953125, 2.484375, 4.1363636363636366866103344, 5.9803623910336236590978842, 5.5067291516462386624652936, 5.2031367459745245795943447, 4.1796554159017080820603951, 3.4110386597938129327189927, 3.], dtype=xp.float64) xp_assert_close(ak(xi), yi) def test_eval_mod(self, xp): # Reference values generated with the following MATLAB code: # format longG # x = 0:10; y = [0. 2. 1. 3. 2. 6. 5.5 5.5 2.7 5.1 3.]; # xi = [0. 0.5 1. 1.5 2.5 3.5 4.5 5.1 6.5 7.2 8.6 9.9 10.]; # makima(x, y, xi) x = xp.arange(0., 11., dtype=xp.float64) y = xp.asarray( [0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.], dtype=xp.float64 ) ak = Akima1DInterpolator(x, y, method="makima") xi = xp.asarray([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2, 8.6, 9.9, 10.], dtype=xp.float64) yi = xp.asarray([ 0.0, 1.34471153846154, 2.0, 1.44375, 1.94375, 2.51939102564103, 4.10366931918656, 5.98501550899192, 5.51756330960439, 5.1757231914014, 4.12326636931311, 3.32931513157895, 3.0], dtype=xp.float64) xp_assert_close(ak(xi), yi) def test_eval_2d(self, xp): x = xp.arange(0., 11., dtype=xp.float64) y = xp.asarray( [0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.], dtype=xp.float64 ) y = xp.stack((y, 2. * y), axis=1) ak = Akima1DInterpolator(x, y) xi = xp.asarray([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2, 8.6, 9.9, 10.], dtype=xp.float64) yi = xp.asarray([0., 1.375, 2., 1.5, 1.953125, 2.484375, 4.1363636363636366866103344, 5.9803623910336236590978842, 5.5067291516462386624652936, 5.2031367459745245795943447, 4.1796554159017080820603951, 3.4110386597938129327189927, 3.], dtype=xp.float64) yi = xp.stack((yi, 2. * yi), axis=1) xp_assert_close(ak(xi), yi) def test_eval_3d(self): x = np.arange(0., 11.) y_ = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.]) y = np.empty((11, 2, 2)) y[:, 0, 0] = y_ y[:, 1, 0] = 2. * y_ y[:, 0, 1] = 3. * y_ y[:, 1, 1] = 4. * y_ ak = Akima1DInterpolator(x, y) xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2, 8.6, 9.9, 10.]) yi = np.empty((13, 2, 2)) yi_ = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375, 4.1363636363636366866103344, 5.9803623910336236590978842, 5.5067291516462386624652936, 5.2031367459745245795943447, 4.1796554159017080820603951, 3.4110386597938129327189927, 3.]) yi[:, 0, 0] = yi_ yi[:, 1, 0] = 2. * yi_ yi[:, 0, 1] = 3. * yi_ yi[:, 1, 1] = 4. * yi_ xp_assert_close(ak(xi), yi) def test_linear_interpolant_edge_case_1d(self, xp): x = xp.asarray([0.0, 1.0], dtype=xp.float64) y = xp.asarray([0.5, 1.0]) akima = Akima1DInterpolator(x, y, axis=0, extrapolate=None) xp_assert_close(akima(0.45), xp.asarray(0.725, dtype=xp.float64)) def test_linear_interpolant_edge_case_2d(self, xp): x = xp.asarray([0., 1.]) y = xp.stack((x, 2. * x, 3. * x, 4. * x), axis=1) ak = Akima1DInterpolator(x, y) xi = xp.asarray([0.5, 1.]) yi = xp.asarray([[0.5, 1., 1.5, 2.], [1., 2., 3., 4.]], dtype=xp.float64 ) xp_assert_close(ak(xi), yi) ak = Akima1DInterpolator(x, y.T, axis=1) xp_assert_close(ak(xi), yi.T) def test_linear_interpolant_edge_case_3d(self): x = np.arange(0., 2.) y_ = np.array([0., 1.]) y = np.empty((2, 2, 2)) y[:, 0, 0] = y_ y[:, 1, 0] = 2. * y_ y[:, 0, 1] = 3. * y_ y[:, 1, 1] = 4. * y_ ak = Akima1DInterpolator(x, y) yi_ = np.array([0.5, 1.]) yi = np.empty((2, 2, 2)) yi[:, 0, 0] = yi_ yi[:, 1, 0] = 2. * yi_ yi[:, 0, 1] = 3. * yi_ yi[:, 1, 1] = 4. * yi_ xi = yi_ xp_assert_close(ak(xi), yi) ak = Akima1DInterpolator(x, y.transpose(1, 0, 2), axis=1) xp_assert_close(ak(xi), yi.transpose(1, 0, 2)) ak = Akima1DInterpolator(x, y.transpose(2, 1, 0), axis=2) xp_assert_close(ak(xi), yi.transpose(2, 1, 0)) def test_degenerate_case_multidimensional(self, xp): # This test is for issue #5683. x = xp.asarray([0, 1, 2], dtype=xp.float64) y = xp.stack((x, x**2)).T ak = Akima1DInterpolator(x, y) x_eval = xp.asarray([0.5, 1.5], dtype=xp.float64) y_eval = ak(x_eval) xp_assert_close(y_eval, xp.stack((x_eval, x_eval**2)).T) def test_extend(self): x = np.arange(0., 11.) y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.]) ak = Akima1DInterpolator(x, y) match = "Extending a 1-D Akima interpolator is not yet implemented" with pytest.raises(NotImplementedError, match=match): ak.extend(None, None) def test_mod_invalid_method(self): x = np.arange(0., 11.) y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.]) match = "`method`=invalid is unsupported." with pytest.raises(NotImplementedError, match=match): Akima1DInterpolator(x, y, method="invalid") # type: ignore def test_extrapolate_attr(self): # x = np.linspace(-5, 5, 11) y = x**2 x_ext = np.linspace(-10, 10, 17) y_ext = x_ext**2 # Testing all extrapolate cases. ak_true = Akima1DInterpolator(x, y, extrapolate=True) ak_false = Akima1DInterpolator(x, y, extrapolate=False) ak_none = Akima1DInterpolator(x, y, extrapolate=None) # None should default to False; extrapolated points are NaN. xp_assert_close(ak_false(x_ext), ak_none(x_ext), atol=1e-15) xp_assert_equal(ak_false(x_ext)[0:4], np.full(4, np.nan)) xp_assert_equal(ak_false(x_ext)[-4:-1], np.full(3, np.nan)) # Extrapolation on call and attribute should be equal. xp_assert_close(ak_false(x_ext, extrapolate=True), ak_true(x_ext), atol=1e-15) # Testing extrapoation to actual function. xp_assert_close(y_ext, ak_true(x_ext), atol=1e-15) def test_no_overflow(self): # check a large jump does not cause a float overflow x = np.arange(1, 10) y = 1.e6*np.sqrt(np.finfo(float).max)*np.heaviside(x-4, 0.5) ak1 = Akima1DInterpolator(x, y, method='makima') ak2 = Akima1DInterpolator(x, y, method='akima') y_eval1 = ak1(x) y_eval2 = ak2(x) assert np.isfinite(y_eval1).all() assert np.isfinite(y_eval2).all() @pytest.mark.parametrize("method", [Akima1DInterpolator, PchipInterpolator]) def test_complex(method): # Complex-valued data deprecated x = np.arange(0., 11.) y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.]) y = y - 2j*y msg = "real values" with pytest.raises(ValueError, match=msg): method(x, y) def test_concurrency(self): # Check that no segfaults appear with concurrent access to Akima1D x = np.linspace(-5, 5, 11) y = x**2 x_ext = np.linspace(-10, 10, 17) ak = Akima1DInterpolator(x, y, extrapolate=True) def worker_fn(_, ak, x_ext): ak(x_ext) _run_concurrent_barrier(10, worker_fn, ak, x_ext) @make_xp_test_case(PPoly, BPoly)
TestAkima1DInterpolator
python
scipy__scipy
benchmarks/benchmarks/interpolate.py
{ "start": 4726, "end": 5469 }
class ____(Benchmark): param_names = ['n_samples', 'function'] params = [ [10, 50, 100], ['multiquadric', 'inverse', 'gaussian', 'linear', 'cubic', 'quintic', 'thin_plate'] ] def setup(self, n_samples, function): self.x = np.arange(n_samples) self.y = np.sin(self.x) r_samples = n_samples / 2. self.X = np.arange(-r_samples, r_samples, 0.25) self.Y = np.arange(-r_samples, r_samples, 0.25) self.z = np.exp(-self.X**2-self.Y**2) def time_rbf_1d(self, n_samples, function): interpolate.Rbf(self.x, self.y, function=function) def time_rbf_2d(self, n_samples, function): interpolate.Rbf(self.X, self.Y, self.z, function=function)
Rbf
python
dask__distributed
distributed/diagnostics/progressbar.py
{ "start": 7182, "end": 9973 }
class ____: def __init__( self, keys, scheduler=None, *, func=None, group_by="prefix", interval="100ms", complete=False, **kwargs, ): self.scheduler = get_scheduler(scheduler) self.client = None for key in keys: if hasattr(key, "client"): self.client = weakref.ref(key.client) break if func is not None: warnings.warn( "`func` is deprecated, use `group_by` instead", category=DeprecationWarning, ) group_by = func elif group_by in (None, "prefix"): group_by = key_split self.keys = {k.key if hasattr(k, "key") else k for k in keys} self.group_by = group_by self.interval = interval self.complete = complete self._start_time = default_timer() @property def elapsed(self): return default_timer() - self._start_time async def listen(self): complete = self.complete keys = self.keys group_by = self.group_by async def setup(scheduler): p = MultiProgress( keys, scheduler, complete=complete, group_by=group_by, ) await p.setup() return p def function(scheduler, p): result = { "all": valmap(len, p.all_keys), "remaining": valmap(len, p.keys), "status": p.status, } if p.status == "error": result.update(p.extra) return result self.comm = await connect( self.scheduler, **(self.client().connection_args if self.client else {}) ) logger.debug("Progressbar Connected to scheduler") await self.comm.write( { "op": "feed", "setup": dumps(setup), "function": dumps(function), "interval": self.interval, } ) while True: response = await self.comm.read( deserializers=self.client()._deserializers if self.client else None ) self._last_response = response self.status = response["status"] self._draw_bar(**response) if response["status"] in ("error", "finished"): await self.comm.close() self._draw_stop(**response) break logger.debug("Progressbar disconnected from scheduler") def _draw_stop(self, **kwargs): pass def __del__(self): with suppress(AttributeError): self.comm.abort()
MultiProgressBar
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/dataproc.py
{ "start": 56374, "end": 57386 }
class ____(_DataprocStartStopClusterBaseOperator): """Stop a cluster in a project.""" def execute(self, context: Context) -> dict | None: self.log.info("Stopping the cluster: %s", self.cluster_name) cluster = super().execute(context) self.log.info("Cluster stopped") return cluster def _check_desired_cluster_state(self, cluster: Cluster) -> tuple[bool, str | None]: if cluster.status.state in [cluster.status.State.STOPPED, cluster.status.State.STOPPING]: return True, f'The cluster "{self.cluster_name}" already stopped!' return False, None def _get_operation(self) -> operation.Operation: return self.hook.stop_cluster( region=self.region, project_id=self._get_project_id(), cluster_name=self.cluster_name, cluster_uuid=self.cluster_uuid, retry=self.retry, timeout=self.timeout, metadata=self.metadata, )
DataprocStopClusterOperator
python
walkccc__LeetCode
solutions/1852. Distinct Numbers in Each Subarray/1852.py
{ "start": 0, "end": 428 }
class ____: def distinctNumbers(self, nums: list[int], k: int) -> list[int]: ans = [] count = collections.Counter() distinct = 0 for i, num in enumerate(nums): count[num] += 1 if count[num] == 1: distinct += 1 if i >= k: count[nums[i - k]] -= 1 if count[nums[i - k]] == 0: distinct -= 1 if i >= k - 1: ans.append(distinct) return ans
Solution
python
Textualize__textual
tests/snapshot_tests/snapshot_apps/log_write_lines.py
{ "start": 457, "end": 1146 }
class ____(App): def compose(self) -> ComposeResult: with Horizontal(): yield Log(id="log1", auto_scroll=False) yield Log(id="log2", auto_scroll=True) yield Log(id="log3") yield Log(id="log4", max_lines=6) def on_ready(self) -> None: self.query_one("#log1", Log).write_line(TEXT) self.query_one("#log2", Log).write_line(TEXT) self.query_one("#log3", Log).write_line(TEXT) self.query_one("#log4", Log).write_line(TEXT) self.query_one("#log3", Log).clear() self.query_one("#log3", Log).write_line("Hello, World") if __name__ == "__main__": app = LogApp() app.run()
LogApp
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/solverHigherOrder2.py
{ "start": 297, "end": 526 }
class ____(Generic[T]): def fun(self, x: Iterable[T], f: Callable[[T], T]): ... def caller(self, x: Iterable[T]): self.fun(x, identity) # This should generate an error. self.fun(x, not_identity)
Test
python
apache__airflow
task-sdk/src/airflow/sdk/bases/sensor.py
{ "start": 1448, "end": 2119 }
class ____: """ Optional return value for poke methods. Sensors can optionally return an instance of the PokeReturnValue class in the poke method. If an XCom value is supplied when the sensor is done, then the XCom value will be pushed through the operator return value. :param is_done: Set to true to indicate the sensor can stop poking. :param xcom_value: An optional XCOM value to be returned by the operator. """ def __init__(self, is_done: bool, xcom_value: Any | None = None) -> None: self.xcom_value = xcom_value self.is_done = is_done def __bool__(self) -> bool: return self.is_done
PokeReturnValue
python
ray-project__ray
python/ray/tune/search/repeater.py
{ "start": 909, "end": 2468 }
class ____: """Internal class for grouping trials of same parameters. This is used when repeating trials for reducing training variance. Args: primary_trial_id: Trial ID of the "primary trial". This trial is the one that the Searcher is aware of. config: Suggested configuration shared across all trials in the trial group. max_trials: Max number of trials to execute within this group. """ def __init__(self, primary_trial_id: str, config: Dict, max_trials: int = 1): assert type(config) is dict, "config is not a dict, got {}".format(config) self.primary_trial_id = primary_trial_id self.config = config self._trials = {primary_trial_id: None} self.max_trials = max_trials def add(self, trial_id: str): assert len(self._trials) < self.max_trials self._trials.setdefault(trial_id, None) def full(self) -> bool: return len(self._trials) == self.max_trials def report(self, trial_id: str, score: float): assert trial_id in self._trials if score is None: raise ValueError("Internal Error: Score cannot be None.") self._trials[trial_id] = score def finished_reporting(self) -> bool: return ( None not in self._trials.values() and len(self._trials) == self.max_trials ) def scores(self) -> List[Optional[float]]: return list(self._trials.values()) def count(self) -> int: return len(self._trials) @PublicAPI
_TrialGroup
python
getsentry__sentry
tests/sentry/uptime/endpoints/test_organization_uptime_stats.py
{ "start": 340, "end": 6200 }
class ____(APITestCase): __test__ = False endpoint = "sentry-api-0-organization-uptime-stats" features: dict[str, bool] = {} def setUp(self) -> None: super().setUp() self.login_as(user=self.user) self.subscription_id = uuid.uuid4().hex self.subscription = self.create_uptime_subscription( url="https://santry.io", subscription_id=self.subscription_id ) self.detector = self.create_uptime_detector(uptime_subscription=self.subscription) scenarios: list[dict] = [ {"check_status": "success"}, {"check_status": "failure"}, {"check_status": "success"}, {"check_status": "failure"}, {"check_status": "success"}, {"check_status": "failure"}, {"check_status": "failure"}, {"check_status": "failure", "incident_status": IncidentStatus.IN_INCIDENT}, ] for scenario in scenarios: self.store_uptime_data(self.subscription_id, **scenario) def store_uptime_data( self, subscription_id, check_status, incident_status=IncidentStatus.NO_INCIDENT, scheduled_check_time=None, ): """Store a single uptime data row. Must be implemented by subclasses.""" raise NotImplementedError("Subclasses must implement store_uptime_data") def test_simple(self) -> None: """Test that the endpoint returns data for a simple uptime check using detector IDs.""" with self.feature(self.features): response = self.get_success_response( self.organization.slug, project=[self.project.id], uptimeDetectorId=[str(self.detector.id)], since=(datetime.now(timezone.utc) - timedelta(days=7)).timestamp(), until=datetime.now(timezone.utc).timestamp(), resolution="1d", ) assert response.data is not None data = json.loads(json.dumps(response.data)) assert len(data[str(self.detector.id)]) == 7 assert data[str(self.detector.id)][-1][1] == { "failure": 4, "failure_incident": 1, "success": 3, "missed_window": 0, } assert data[str(self.detector.id)][0][1] == { "failure": 0, "failure_incident": 0, "success": 0, "missed_window": 0, } def test_invalid_detector_id(self) -> None: """ Test that an invalid detector ID produces a 400 response. """ with self.feature(self.features): response = self.get_response( self.organization.slug, project=[self.project.id], uptimeDetectorId=["999999"], since=(datetime.now(timezone.utc) - timedelta(days=7)).timestamp(), until=datetime.now(timezone.utc).timestamp(), resolution="1d", ) assert response.status_code == 400 assert response.json() == "Invalid uptime detector ids provided" def test_no_detector_id(self) -> None: """Test that not sending any detector ID produces a 400 response.""" with self.feature(self.features): response = self.get_response( self.organization.slug, project=[self.project.id], uptimeDetectorId=[], since=(datetime.now(timezone.utc) - timedelta(days=7)).timestamp(), until=datetime.now(timezone.utc).timestamp(), resolution="1d", ) assert response.status_code == 400 assert response.json() == "Uptime detector ids must be provided" def test_too_many_periods(self) -> None: """Test that requesting a high resolution across a large period of time produces a 400 response.""" with self.feature(self.features): response = self.get_response( self.organization.slug, project=[self.project.id], uptimeDetectorId=[str(self.detector.id)], since=(datetime.now(timezone.utc) - timedelta(days=90)).timestamp(), until=datetime.now(timezone.utc).timestamp(), resolution="1m", ) assert response.status_code == 400 assert response.json() == "error making request" def test_too_many_detector_ids_limit(self) -> None: """Test that sending a large number of detector IDs produces a 400.""" with self.feature(self.features): response = self.get_response( self.organization.slug, project=[self.project.id], uptimeDetectorId=[str(i) for i in range(101)], since=(datetime.now(timezone.utc) - timedelta(days=90)).timestamp(), until=datetime.now(timezone.utc).timestamp(), resolution="1h", ) assert response.status_code == 400 assert response.json() == "Too many uptime detector ids provided. Maximum is 100" def test_no_ids_provided_error(self) -> None: """Test that providing no IDs produces an error.""" with self.feature(self.features): response = self.get_response( self.organization.slug, project=[self.project.id], since=(datetime.now(timezone.utc) - timedelta(days=7)).timestamp(), until=datetime.now(timezone.utc).timestamp(), resolution="1d", ) assert response.status_code == 400 assert response.json() == "Uptime detector ids must be provided" @freeze_time(MOCK_DATETIME)
OrganizationUptimeStatsBaseTest
python
realpython__materials
python-guitar-synthesizer/source_code_final/src/digitar/track.py
{ "start": 62, "end": 1050 }
class ____: def __init__(self, sampling_rate: Hertz) -> None: self.sampling_rate = int(sampling_rate) self.samples = np.array([], dtype=np.float64) def __len__(self) -> int: return self.samples.size @property def duration(self) -> Time: return Time(seconds=len(self) / self.sampling_rate) def add(self, samples: np.ndarray) -> None: self.samples = np.append(self.samples, samples) def add_at(self, instant: Time, samples: np.ndarray) -> None: samples_offset = round(instant.seconds * self.sampling_rate) if samples_offset == len(self): self.add(samples) elif samples_offset > len(self): self.add(np.zeros(samples_offset - len(self))) self.add(samples) else: end = samples_offset + len(samples) if end > len(self): self.add(np.zeros(end - len(self))) self.samples[samples_offset:end] += samples
AudioTrack
python
keras-team__keras
keras/src/ops/numpy.py
{ "start": 205196, "end": 206071 }
class ____(Operation): def __init__(self, k=0, *, name=None): super().__init__(name=name) self.k = k def call(self, x): return backend.numpy.tril(x, k=self.k) def compute_output_spec(self, x): return KerasTensor(x.shape, dtype=x.dtype) @keras_export(["keras.ops.tril", "keras.ops.numpy.tril"]) def tril(x, k=0): """Return lower triangle of a tensor. For tensors with `ndim` exceeding 2, `tril` will apply to the final two axes. Args: x: Input tensor. k: Diagonal above which to zero elements. Defaults to `0`. the main diagonal. `k < 0` is below it, and `k > 0` is above it. Returns: Lower triangle of `x`, of same shape and data type as `x`. """ if any_symbolic_tensors((x,)): return Tril(k=k).symbolic_call(x) return backend.numpy.tril(x, k=k)
Tril
python
tiangolo__fastapi
docs_src/cookie_param_models/tutorial002_pv1_py310.py
{ "start": 86, "end": 347 }
class ____(BaseModel): class Config: extra = "forbid" session_id: str fatebook_tracker: str | None = None googall_tracker: str | None = None @app.get("/items/") async def read_items(cookies: Cookies = Cookie()): return cookies
Cookies
python
python-poetry__poetry
src/poetry/console/commands/debug/info.py
{ "start": 128, "end": 993 }
class ____(Command): name = "debug info" description = "Shows debug information." def handle(self) -> int: poetry_python_version = ".".join(str(s) for s in sys.version_info[:3]) self.line("") self.line("<b>Poetry</b>") self.line( "\n".join( [ f"<info>Version</info>: <comment>{self.poetry.VERSION}</>", f"<info>Python</info>: <comment>{poetry_python_version}</>", f"<info>Path</info>: <comment>{Path(sys.prefix)}</>", f"<info>Executable</info>: <comment>{Path(sys.executable) if sys.executable else 'Unknown'}</>", ] ) ) command = self.get_application().get("env info") exit_code: int = command.run(self.io) return exit_code
DebugInfoCommand
python
FactoryBoy__factory_boy
tests/test_django.py
{ "start": 3299, "end": 3595 }
class ____(factory.django.DjangoModelFactory): class Meta: model = models.MultifieldUniqueModel django_get_or_create = ("slug", "text",) slug = factory.Sequence(lambda n: "slug%s" % n) text = factory.Sequence(lambda n: "text%s" % n)
WithMultipleGetOrCreateFieldsFactory
python
simplejson__simplejson
simplejson/tests/test_errors.py
{ "start": 120, "end": 2081 }
class ____(TestCase): def test_string_keys_error(self): data = [{'a': 'A', 'b': (2, 4), 'c': 3.0, ('d',): 'D tuple'}] try: json.dumps(data) except TypeError: err = sys.exc_info()[1] else: self.fail('Expected TypeError') self.assertEqual(str(err), 'keys must be str, int, float, bool or None, not tuple') def test_not_serializable(self): try: json.dumps(json) except TypeError: err = sys.exc_info()[1] else: self.fail('Expected TypeError') self.assertEqual(str(err), 'Object of type module is not JSON serializable') def test_decode_error(self): err = None try: json.loads('{}\na\nb') except json.JSONDecodeError: err = sys.exc_info()[1] else: self.fail('Expected JSONDecodeError') self.assertEqual(err.lineno, 2) self.assertEqual(err.colno, 1) self.assertEqual(err.endlineno, 3) self.assertEqual(err.endcolno, 2) def test_scan_error(self): err = None for t in (text_type, b): try: json.loads(t('{"asdf": "')) except json.JSONDecodeError: err = sys.exc_info()[1] else: self.fail('Expected JSONDecodeError') self.assertEqual(err.lineno, 1) self.assertEqual(err.colno, 10) def test_error_is_pickable(self): err = None try: json.loads('{}\na\nb') except json.JSONDecodeError: err = sys.exc_info()[1] else: self.fail('Expected JSONDecodeError') s = pickle.dumps(err) e = pickle.loads(s) self.assertEqual(err.msg, e.msg) self.assertEqual(err.doc, e.doc) self.assertEqual(err.pos, e.pos) self.assertEqual(err.end, e.end)
TestErrors
python
conda__conda
conda/plugins/types.py
{ "start": 9023, "end": 9482 }
class ____(CondaPlugin): """ Return type to use when defining a conda pre-solve plugin hook. For details on how this is used, see :meth:`~conda.plugins.hookspec.CondaSpecs.conda_pre_solves`. :param name: Pre-solve name (e.g., ``custom_plugin_pre_solve``). :param action: Callable which contains the code to be run. """ name: str action: Callable[[frozenset[MatchSpec], frozenset[MatchSpec]], None] @dataclass
CondaPreSolve
python
ray-project__ray
python/ray/data/_internal/execution/resource_manager.py
{ "start": 17508, "end": 25165 }
class ____(ABC): """An interface for dynamic operator resource allocation. This interface allows dynamically allocating available resources to each operator, limiting how many tasks each operator can submit, and how much data each operator can read from its running tasks. """ class IdleDetector: """Utility class for detecting idle operators. Note, stalling can happen when there are less resources than Data executor expects. E.g., when some resources are preempted by non-Data code, see `test_no_deadlock_on_resource_contention` as an example. This class is used to detect potential stalling and allow the execution to make progress. """ # The interval to detect idle operators. # When downstream is idle, we'll allow reading at least one task output # per this interval, DETECTION_INTERVAL_S = 10.0 # Print a warning if an operator is idle for this time. WARN_ON_IDLE_TIME_S = 60.0 # Whether a warning has been printed. _warn_printed = False def __init__(self): # per-op fields self.last_num_outputs = defaultdict(int) self.last_output_time = defaultdict(lambda: time.time()) self.last_detection_time = defaultdict(lambda: time.time()) def detect_idle(self, op: PhysicalOperator): cur_time = time.time() if cur_time - self.last_detection_time[op] > self.DETECTION_INTERVAL_S: cur_num_outputs = op.metrics.num_task_outputs_generated if cur_num_outputs > self.last_num_outputs[op]: self.last_num_outputs[op] = cur_num_outputs self.last_output_time[op] = cur_time self.last_detection_time[op] = cur_time else: self.last_detection_time[op] = cur_time self.print_warning_if_idle_for_too_long( op, cur_time - self.last_output_time[op] ) return True return False @classmethod def print_warning_if_idle_for_too_long( cls, op: PhysicalOperator, idle_time: float ): """Print a warning if an operator is idle for too long.""" if idle_time < cls.WARN_ON_IDLE_TIME_S or cls._warn_printed: return cls._warn_printed = True msg = ( f"Operator {op} is running but has no outputs for {idle_time} seconds." " Execution may be slower than expected.\n" "Ignore this warning if your UDF is expected to be slow." " Otherwise, this can happen when there are fewer cluster resources" " available to Ray Data than expected." " If you have non-Data tasks or actors running in the cluster, exclude" " their resources from Ray Data with" " `DataContext.get_current().execution_options.exclude_resources`." " This message will only print once." ) logger.warning(msg) def __init__(self, topology: "Topology"): self._topology = topology self._idle_detector = self.IdleDetector() @abstractmethod def update_budgets( self, *, limits: ExecutionResources, ): """Callback to update resource usages.""" ... @abstractmethod def can_submit_new_task(self, op: PhysicalOperator) -> bool: """Return whether the given operator can submit a new task.""" ... @abstractmethod def max_task_output_bytes_to_read( self, op: PhysicalOperator, *, task_resource_usage: Dict[PhysicalOperator, ExecutionResources], output_object_store_usage: Dict[PhysicalOperator, int], ) -> Optional[int]: """Return the maximum bytes of pending task outputs can be read for the given operator. None means no limit.""" ... @abstractmethod def get_budget(self, op: PhysicalOperator) -> Optional[ExecutionResources]: """Returns the budget for the given operator or `None` if the operator has unlimited budget. Operator's budget is defined as: Budget = Allocation - Usage """ ... @abstractmethod def get_output_budget(self, op: PhysicalOperator) -> Optional[int]: """Returns the budget for operator's outputs (in object store bytes) or `None` if there's no limit. """ ... @abstractmethod def get_allocation(self, op: PhysicalOperator) -> Optional[ExecutionResources]: """Returns allocation for the given operator or `None` if operator's allocation is unlimited.""" ... def _get_eligible_ops(self) -> List[PhysicalOperator]: first_pending_shuffle_op_idx = _get_first_pending_shuffle_op(self._topology) return [ op for idx, op in enumerate(self._topology) if self._is_op_eligible(op) and ( first_pending_shuffle_op_idx == -1 or idx <= first_pending_shuffle_op_idx ) ] @staticmethod def _is_op_eligible(op: PhysicalOperator) -> bool: """Whether the op is eligible for memory reservation.""" return ( not op.throttling_disabled() # As long as the op has finished execution, even if there are still # non-taken outputs, we don't need to allocate resources for it. and not op.has_execution_finished() ) def _get_downstream_eligible_ops( self, op: PhysicalOperator ) -> Iterable[PhysicalOperator]: """Get the downstream eligible operators of the given operator, ignoring intermediate ineligible operators. E.g., - "cur_map->downstream_map" will return [downstream_map]. - "cur_map->limit1->limit2->downstream_map" will return [downstream_map]. """ for next_op in op.output_dependencies: if self._is_op_eligible(next_op): yield next_op else: yield from self._get_downstream_eligible_ops(next_op) def _should_unblock_streaming_output_backpressure( self, op: PhysicalOperator ) -> bool: # NOTE: If this operator is a terminal one, extracting outputs from it # should not be throttled if not op.output_dependencies: return True # In some edge cases, the downstream operators may have no enough resources to # launch tasks. Then we should temporarily unblock the streaming output # backpressure by allowing reading at least 1 block. So the current operator # can finish at least one task and yield resources to the downstream operators. for downstream_op in self._get_downstream_eligible_ops(op): if not self.can_submit_new_task(downstream_op): # Case 1: the downstream operator hasn't reserved the minimum resources # to run at least one task. return True # Case 2: the downstream operator has reserved the minimum resources, but # the resources are preempted by non-Data tasks or actors. # We don't have a good way to detect this case, so we'll unblock # backpressure when the downstream operator has been idle for a while. if self._idle_detector.detect_idle(downstream_op): return True return False
OpResourceAllocator
python
pyca__cryptography
src/cryptography/hazmat/primitives/hashes.py
{ "start": 2863, "end": 2975 }
class ____(HashAlgorithm): # noqa: N801 name = "sha3-224" digest_size = 28 block_size = None
SHA3_224
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/base.py
{ "start": 32169, "end": 32653 }
class ____(HasCopyInternals): __slots__ = () _annotations: _ImmutableExecuteOptions = util.EMPTY_DICT __visit_name__: str = "executable_option" _is_has_cache_key: bool = False _is_core: bool = True def _clone(self, **kw): """Create a shallow copy of this ExecutableOption.""" c = self.__class__.__new__(self.__class__) c.__dict__ = dict(self.__dict__) # type: ignore return c _L = TypeVar("_L", bound=str)
ExecutableOption
python
PyCQA__pylint
tests/functional/u/unnecessary/unnecessary_dunder_call_async_py39.py
{ "start": 87, "end": 286 }
class ____: """A class implementing __aiter__ and __anext__.""" def __aiter__(self): ... async def __anext__(self): ... MyClass().__aiter__() MyClass().__anext__()
MyClass
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/sqltypes.py
{ "start": 5482, "end": 8418 }
class ____(Concatenable, TypeEngine[str]): """The base for all string and character types. In SQL, corresponds to VARCHAR. The `length` field is usually required when the `String` type is used within a CREATE TABLE statement, as VARCHAR requires a length on most databases. """ __visit_name__ = "string" operator_classes = OperatorClass.STRING def __init__( self, length: Optional[int] = None, collation: Optional[str] = None, ): """ Create a string-holding type. :param length: optional, a length for the column for use in DDL and CAST expressions. May be safely omitted if no ``CREATE TABLE`` will be issued. Certain databases may require a ``length`` for use in DDL, and will raise an exception when the ``CREATE TABLE`` DDL is issued if a ``VARCHAR`` with no length is included. Whether the value is interpreted as bytes or characters is database specific. :param collation: Optional, a column-level collation for use in DDL and CAST expressions. Renders using the COLLATE keyword supported by SQLite, MySQL, and PostgreSQL. E.g.: .. sourcecode:: pycon+sql >>> from sqlalchemy import cast, select, String >>> print(select(cast("some string", String(collation="utf8")))) {printsql}SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1 .. note:: In most cases, the :class:`.Unicode` or :class:`.UnicodeText` datatypes should be used for a :class:`_schema.Column` that expects to store non-ascii data. These datatypes will ensure that the correct types are used on the database. """ self.length = length self.collation = collation def _with_collation(self, collation): new_type = self.copy() new_type.collation = collation return new_type def _resolve_for_literal(self, value): # I was SO PROUD of my regex trick, but we dont need it. # re.search(r"[^\u0000-\u007F]", value) if value.isascii(): return _STRING else: return _UNICODE def literal_processor(self, dialect): def process(value): value = value.replace("'", "''") if dialect.identifier_preparer._double_percents: value = value.replace("%", "%%") return "'%s'" % value return process def bind_processor( self, dialect: Dialect ) -> Optional[_BindProcessorType[str]]: return None def result_processor( self, dialect: Dialect, coltype: object ) -> Optional[_ResultProcessorType[str]]: return None @property def python_type(self): return str def get_dbapi_type(self, dbapi): return dbapi.STRING
String
python
pallets__jinja
tests/test_bytecode_cache.py
{ "start": 1068, "end": 1984 }
class ____: def test_dump_load(self): memcached = MockMemcached() m = MemcachedBytecodeCache(memcached) b = Bucket(None, "key", "") b.code = "code" m.dump_bytecode(b) assert memcached.key == "jinja2/bytecode/key" b = Bucket(None, "key", "") m.load_bytecode(b) assert b.code == "code" def test_exception(self): memcached = MockMemcached() memcached.get = memcached.get_side_effect memcached.set = memcached.set_side_effect m = MemcachedBytecodeCache(memcached) b = Bucket(None, "key", "") b.code = "code" m.dump_bytecode(b) m.load_bytecode(b) m.ignore_memcache_errors = False with pytest.raises(MockMemcached.Error): m.dump_bytecode(b) with pytest.raises(MockMemcached.Error): m.load_bytecode(b)
TestMemcachedBytecodeCache
python
pennersr__django-allauth
allauth/socialaccount/providers/trello/views.py
{ "start": 218, "end": 1191 }
class ____(OAuthAdapter): provider_id = "trello" request_token_url = "https://trello.com/1/OAuthGetRequestToken" # nosec authorize_url = "https://trello.com/1/OAuthAuthorizeToken" access_token_url = "https://trello.com/1/OAuthGetAccessToken" # nosec def complete_login(self, request, app, token, response): # we need to get the member id and the other information info_url = "{base}?{query}".format( base="https://api.trello.com/1/members/me", query=urlencode({"key": app.key, "token": response.get("oauth_token")}), ) resp = get_adapter().get_requests_session().get(info_url) resp.raise_for_status() extra_data = resp.json() result = self.get_provider().sociallogin_from_response(request, extra_data) return result oauth_login = OAuthLoginView.adapter_view(TrelloOAuthAdapter) oauth_callback = OAuthCallbackView.adapter_view(TrelloOAuthAdapter)
TrelloOAuthAdapter
python
ansible__ansible
test/lib/ansible_test/_internal/cli/argparsing/parsers.py
{ "start": 1129, "end": 1263 }
class ____(Completion): """Argument completion error.""" message: t.Optional[str] = None @dataclasses.dataclass
CompletionError
python
doocs__leetcode
solution/3100-3199/3101.Count Alternating Subarrays/Solution.py
{ "start": 0, "end": 216 }
class ____: def countAlternatingSubarrays(self, nums: List[int]) -> int: ans = s = 1 for a, b in pairwise(nums): s = s + 1 if a != b else 1 ans += s return ans
Solution
python
bokeh__bokeh
tests/unit/bokeh/models/test_plots.py
{ "start": 2713, "end": 5036 }
class ____: def setup_method(self): self._plot = figure(tools='pan') self._plot.scatter([1,2,3], [3,2,1], name='foo') @patch('bokeh.models.plots.find') def test_string_arg(self, mock_find: MagicMock) -> None: self._plot.select('foo') assert mock_find.called assert mock_find.call_args[0][1] == dict(name='foo') @patch('bokeh.models.plots.find') def test_type_arg(self, mock_find: MagicMock) -> None: self._plot.select(PanTool) assert mock_find.called assert mock_find.call_args[0][1] == dict(type=PanTool) @patch('bokeh.models.plots.find') def test_kwargs(self, mock_find: MagicMock) -> None: kw = dict(name='foo', type=GlyphRenderer) self._plot.select(**kw) assert mock_find.called assert mock_find.call_args[0][1] == kw @patch('bokeh.models.plots.find') def test_single_selector_kwarg(self, mock_find: MagicMock) -> None: kw = dict(name='foo', type=GlyphRenderer) self._plot.select(selector=kw) assert mock_find.called assert mock_find.call_args[0][1] == kw def test_selector_kwarg_and_extra_kwargs(self) -> None: with pytest.raises(TypeError) as exc: self._plot.select(selector=dict(foo='foo'), bar='bar') assert "when passing 'selector' keyword arg, not other keyword args may be present" == str(exc.value) def test_bad_arg_type(self) -> None: with pytest.raises(TypeError) as exc: self._plot.select(10) assert "selector must be a dictionary, string or plot object." == str(exc.value) def test_too_many_args(self) -> None: with pytest.raises(TypeError) as exc: self._plot.select('foo', 'bar') assert 'select accepts at most ONE positional argument.' == str(exc.value) def test_no_input(self) -> None: with pytest.raises(TypeError) as exc: self._plot.select() assert 'select requires EITHER a positional argument, OR keyword arguments.' == str(exc.value) def test_arg_and_kwarg(self) -> None: with pytest.raises(TypeError) as exc: self._plot.select('foo', type=PanTool) assert 'select accepts EITHER a positional argument, OR keyword arguments (not both).' == str(exc.value)
TestPlotSelect
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 353515, "end": 354989 }
class ____(sgqlc.types.Input): """Autogenerated input type of UpdateIssue""" __schema__ = github_schema __field_names__ = ("id", "title", "body", "assignee_ids", "milestone_id", "label_ids", "state", "project_ids", "client_mutation_id") id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id") """The ID of the Issue to modify.""" title = sgqlc.types.Field(String, graphql_name="title") """The title for the issue.""" body = sgqlc.types.Field(String, graphql_name="body") """The body for the issue description.""" assignee_ids = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="assigneeIds") """An array of Node IDs of users for this issue.""" milestone_id = sgqlc.types.Field(ID, graphql_name="milestoneId") """The Node ID of the milestone for this issue.""" label_ids = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="labelIds") """An array of Node IDs of labels for this issue.""" state = sgqlc.types.Field(IssueState, graphql_name="state") """The desired issue state.""" project_ids = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="projectIds") """An array of Node IDs for projects associated with this issue.""" client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
UpdateIssueInput
python
google__jax
jax/experimental/mosaic/gpu/fragmented_array.py
{ "start": 10593, "end": 10662 }
class ____: times: int @dataclasses.dataclass(frozen=True)
Replicated
python
getsentry__sentry
src/sentry/shared_integrations/exceptions/__init__.py
{ "start": 3667, "end": 4223 }
class ____(ApiError): code = 504 @classmethod def from_exception(cls, exception: RequestException) -> ApiTimeoutError: maybe_request = getattr(exception, "request", None) if maybe_request is not None: return cls.from_request(maybe_request) return cls("Timed out reaching host") @classmethod def from_request(cls, request: _RequestHasUrl) -> ApiTimeoutError: host = urlparse(request.url).netloc return cls(f"Timed out attempting to reach host: {host}", url=request.url)
ApiTimeoutError
python
google__jax
jax/_src/core.py
{ "start": 119644, "end": 132739 }
class ____: aval : AbstractValue mutable_qdd : MutableQuasiDynamicData _ref_allocating_primitives = {ref_p} def _check_jaxpr( ctx_factory: Callable[[], tuple[JaxprPpContext, JaxprPpSettings]], jaxpr: Jaxpr ) -> None: env: dict[Var, Atom | MutableTypecheckVal] = {} def read(x: Atom) -> Atom | MutableTypecheckVal: # Check the type annotation is itself well-typed. check_type(ctx_factory, env, x.aval) if isinstance(x, Var): # Check the variable is in-scope and consistently typed. if x not in env: ctx, _ = ctx_factory() raise JaxprTypeError(f"Variable '{pp_var(x, ctx)}' not defined") return env[x] elif isinstance(x, Literal): # Check that the literal matches its type annotation. if not typecheck(x.aval, x.val): ctx, _ = ctx_factory() raise JaxprTypeError( f"Literal value {x.val} does not match its type annotation " f"{pp_aval(x.aval, ctx)}") return x else: assert False, "syntactically invalid jaxpr" def write(v: Var, a: AvalQDD) -> None: aval, qdd = a.aval, a.qdd assert isinstance(v, Var), "syntactically invalid jaxpr" # Check the type annotation of the binder is itself well-typed. check_type(ctx_factory, env, v.aval) # Check that the variable is not already bound. if v in env: ctx, _ = ctx_factory() raise JaxprTypeError(f"Variable '{pp_var(v, ctx)}' already bound") # Check that the computed type is consistent with the binder annotation. if not typematch(v.aval, aval): ctx, _ = ctx_factory() raise JaxprTypeError( f"Value for variable '{pp_var(v, ctx)}' inconsistently typed " f"as {pp_aval(aval, ctx)} for let-binder of type {pp_aval(v.aval, ctx)}") # If the variable is not a DropVar, add it to the environment. if not isinstance(v, DropVar): if qdd is None: env[v] = v else: env[v] = MutableTypecheckVal(aval, MutableQuasiDynamicData(qdd)) # # Don't return refs if config.mutable_array_checks.value: from jax._src.state.types import AbstractRef # pytype: disable=import-error for v in jaxpr.outvars: if isinstance(v.aval, AbstractRef): raise JaxprTypeError("returned a ref!") # Check type annotations on lambda binders. for v in it.chain(jaxpr.constvars, jaxpr.invars): check_type(ctx_factory, env, v.aval) write(v, AvalQDD(v.aval, v.initial_qdd)) # Check each eqn. sentinel = object() in_idx = {v: i for i, v in enumerate(it.chain(jaxpr.constvars, jaxpr.invars))} mut_arrays = set() for eqn_idx, eqn in enumerate(jaxpr.eqns): prim = eqn.primitive try: in_atoms = map(read, eqn.invars) in_avals = [AvalMutableQDD(x.aval, x.mutable_qdd) if isinstance(x, MutableTypecheckVal) else x.aval for x in in_atoms] # use in_atoms for dyn shapes # Compute the type of the primitive application. with eqn.ctx.manager: if prim in custom_typechecks: out_type, eqn_effects = custom_typechecks[prim]( ctx_factory, *in_atoms, **eqn.params) elif prim.call_primitive: out_type, eqn_effects = _check_call(ctx_factory, prim, in_atoms, eqn.params) elif prim.map_primitive: out_type, eqn_effects = _check_map(ctx_factory, prim, in_avals, eqn.params) else: out_type, eqn_effects = check_eqn(prim, in_avals, eqn.params) # Check the computed effect type matches the eqn's annotation, and is # included in the jaxpr's annotation. if prim.ref_primitive: if prim in _ref_allocating_primitives: outvar, = eqn.outvars in_idx[outvar] = None # type: ignore mut_arrays.add(outvar) if eqn.effects != eqn_effects: raise JaxprTypeError("Inferred effects do not match equation effects. " f"Equation effects: {eqn.effects}. " f"Inferred effects: {eqn_effects}") for eff in eqn.effects: if isinstance(eff, effects.JaxprInputEffect): eqn_invar = eqn.invars[eff.input_index] if type(eqn_invar) is Literal or eqn_invar in mut_arrays: continue if (jaxpr_index := in_idx.get(eqn_invar, sentinel)) is sentinel: raise JaxprTypeError( "Invalid `JaxprInputEffect`: must correspond to a jaxpr invar") jaxpr_effect = eff.replace(input_index=jaxpr_index) if jaxpr_effect not in jaxpr.effects: raise JaxprTypeError( "Invalid `JaxprInputEffect`: must be present in jaxpr. " f"{jaxpr_effect} is not in {jaxpr.effects}.") elif isinstance(eff, NamedAxisEffect): # It is valid for a primitive to discharge the named axis effect. continue elif eff not in jaxpr.effects: raise JaxprTypeError("Equation effect not present in jaxpr effects. " f"Equation effect: {eff}. " f"Jaxpr effects: {jaxpr.effects}") # Check out_type matches the let-binders' annotation (after substitution). out_type = substitute_vars_in_output_ty(out_type, eqn.invars, eqn.outvars) out_type = [t if isinstance(t, AvalQDD) else AvalQDD(t, None) for t in out_type] foreach(write, eqn.outvars, out_type) except JaxprTypeError as e: ctx, settings = ctx_factory() msg, = e.args src = source_info_util.summarize(eqn.source_info) msg = "\n\n".join([msg, "in equation:", str(pp.nest(2, pp_eqn(eqn, ctx, settings))), f"from source: {src}"]) raise JaxprTypeError(msg, eqn_idx) from None # Check there are no output refs # TODO(mattjj): improve this error message if config.mutable_array_checks.value: from jax._src.state.types import AbstractRef # pytype: disable=import-error for v in jaxpr.outvars: if isinstance(v.aval, AbstractRef): raise TypeError("returned ref") # TODO(mattjj): include output type annotation on jaxpr and check it here foreach(read, jaxpr.outvars) def check_type( ctx_factory: Callable[[], tuple[JaxprPpContext, JaxprPpSettings]], env: dict[Var, Atom | MutableTypecheckVal], ty: AbstractValue, ) -> None: if isinstance(ty, DShapedArray): # Check all elements in the shape tuple are well-typed. for d in ty.shape: if (isinstance(d, int) or isinstance(d, DArray) and not d.shape and type(d.dtype) == bint): continue elif isinstance(d, Var): if d not in env: ctx, _ = ctx_factory() raise JaxprTypeError(f"unbound axis size: '{pp_var(d, ctx)}'") if not isinstance(d.aval, (ShapedArray, DShapedArray)): raise JaxprTypeError(f"axis size with unexpected type annotation: " f"{d.aval} of type {type(d.aval)}") if isinstance(d.aval, ShapedArray): shape, dtype = d.aval.shape, d.aval.dtype if shape: raise JaxprTypeError(f"axis size nonscalar: {d.aval}") if not dtypes.issubdtype(dtype, np.integer): raise JaxprTypeError(f"axis size with non-integer dtype: {d.aval}") else: assert isinstance(d.aval, DShapedArray) shape, dtype = d.aval.shape, d.aval.dtype if shape: raise JaxprTypeError(f"axis size nonscalar: {d.aval}") if type(dtype) is not bint: raise JaxprTypeError( f"DArray axis size with non-bint dtype: {d.aval}") else: raise JaxprTypeError(f"unexpected type in shape: {type(d)}") else: return # Except in above case(s), all syntactic forms are valid def substitute_vars_in_output_ty( out_type: Sequence[AbstractValue], # shapes may contain InDBIdx / OutDBIdx in_atoms: Sequence[Atom], out_binders: Sequence[Var], ) -> list[AbstractValue]: # shapes may contain Vars in_atoms = [x.val if type(x) is Literal else x for x in in_atoms] result = [] for aval in out_type: if type(aval) is DShapedArray: shape = [in_atoms[d.val] if type(d) is InDBIdx else out_binders[d.val] if type(d) is OutDBIdx else d for d in aval.shape] aval = aval.update(shape=tuple(shape)) result.append(aval) return result def check_eqn(prim, in_avals, params): for jaxpr in jaxprs_in_params(params): check_jaxpr(jaxpr) out_avals, effects = prim.abstract_eval(*in_avals, **params) if not prim.multiple_results: out_avals = [out_avals] return out_avals, effects def _check_call(ctx_factory, prim, in_atoms, params): if "call_jaxpr" not in params: raise JaxprTypeError( f"Call primitive {prim} missing 'call_jaxpr' parameter") if isinstance(prim, ClosedCallPrimitive): call_jaxpr = params["call_jaxpr"].jaxpr else: call_jaxpr = params["call_jaxpr"] if len(in_atoms) != len(call_jaxpr.invars): raise JaxprTypeError(f"Call primitive {prim} with {len(in_atoms)} " f"operands cannot call jaxpr with " f"{len(call_jaxpr.invars)} inputs") # Check `call_jaxpr` can be applied to in_atoms. env: dict[Var, Atom | MutableTypecheckVal] = {} def substitute(aval: AbstractValue): if isinstance(aval, DShapedArray): aval = aval.update(shape=tuple(env.get(d, d) for d in aval.shape)) # type: ignore return aval for v, x in zip(call_jaxpr.invars, in_atoms): if not typecompat(substitute(v.aval), x.aval): # TODO(mattjj): vars in error message are confusing b/c of Var.__repr__ raise JaxprTypeError(f"Call primitive {prim} passes operand {x} of type " f"{x.aval} to jaxpr expecting type " f"{substitute(v.aval)}") env[v] = x.val if type(x) is Literal else x check_jaxpr(call_jaxpr) invars, outvars = call_jaxpr.invars, call_jaxpr.outvars in_map : dict[Var, InDBIdx] = {v: InDBIdx(i) for i, v in enumerate( invars)} out_map: dict[Var, OutDBIdx] = {x: OutDBIdx(i) for i, x in enumerate(outvars) if type(x) is Var} out_avals = [x.aval for x in call_jaxpr.outvars] out_type = [a.update(shape=tuple(in_map.get(d, out_map.get(d)) if type(d) is Var else d for d in a.shape)) if type(a) is DShapedArray else a for a in out_avals] # jaxpr input effects are indexed to include jaxpr.constvars, but the eqn # should have effects indexed only on its explicit arguments effs = {e.replace(input_index=e.input_index - len(call_jaxpr.constvars)) if isinstance(e, effects.JaxprInputEffect) else e for e in call_jaxpr.effects} return out_type, effs def _check_map(ctx_factory, prim, in_avals, params): if "call_jaxpr" not in params: raise JaxprTypeError(f"Map primitive {prim} missing 'call_jaxpr' parameter") call_jaxpr = params["call_jaxpr"] ordered_effects_ = effects.ordered_effects.filter_in(call_jaxpr.effects) if ordered_effects_: raise JaxprTypeError( f"Map primitive {prim} mapping ordered effects: {ordered_effects_}") if "axis_size" not in params: raise JaxprTypeError(f"Map primitive {prim} missing 'axis_size' parameter") axis_size = params["axis_size"] if "axis_name" not in params: raise JaxprTypeError(f"Map primitive {prim} missing 'axis_name' parameter") axis_name = params["axis_name"] if "in_axes" not in params: raise JaxprTypeError(f"Map primitive {prim} missing 'in_axes' parameter") in_axes = params["in_axes"] if "out_axes" not in params: raise JaxprTypeError(f"Map primitive {prim} missing 'out_axes' parameter") out_axes = params["out_axes"] binder_avals = [unmapped_aval(axis_size, in_axis, v.aval) if in_axis is not None else v.aval for v, in_axis in zip(call_jaxpr.invars, in_axes)] for binder_aval, in_aval in zip(binder_avals, in_avals): if not typecompat(binder_aval, in_aval): raise JaxprTypeError(f"Call primitive {prim} passes operand {in_aval} " f"to jaxpr expecting {binder_aval}") with extend_axis_env_nd([(params['axis_name'], axis_size)]): _check_jaxpr(ctx_factory, call_jaxpr) mapped_out_avals = [v.aval for v in call_jaxpr.outvars] out_avals = [unmapped_aval(axis_size, out_axis, aval) if out_axis is not None else aval for aval, out_axis in zip(mapped_out_avals, out_axes)] return out_avals, filter_named_axis_effects(call_jaxpr.effects, {axis_name}) def eqn_effects(jaxpr): # jaxpr input effects are indexed to include jaxpr.constvars, but the eqn # should have effects indexed only on its explicit arguments effs = jaxpr.effects return {e.replace(input_index=e.input_index - len(jaxpr.constvars)) if isinstance(e, effects.JaxprInputEffect) else e for e in effs} # ------------------- ShapeDtypeStruct ------------------- @set_module("jax")
MutableTypecheckVal
python
pytorch__pytorch
torch/backends/xeon/run_cpu.py
{ "start": 6334, "end": 11232 }
class ____: """Get CPU information, such as cores list and NUMA information.""" def __init__(self, test_input=""): self.cpuinfo = [] if platform.system() in ["Windows", "Darwin"]: raise RuntimeError(f"{platform.system()} is not supported!!!") elif platform.system() == "Linux": # Sample output of: `lscpu --parse=CPU,Core,Socket,Node` # # # The following is the parsable format, which can be fed to other # # programs. Each different item in every column has an unique ID # # starting from zero. # # CPU,Core,Socket,Node # 0,0,0,0 # 1,1,0,0 # ... if test_input == "": lscpu_cmd = ["lscpu", "--parse=CPU,Core,Socket,Node"] lscpu_info = subprocess.check_output( lscpu_cmd, universal_newlines=True ).split("\n") else: lscpu_info = test_input.split("\n") # Get information about cpu, core, socket and node for line in lscpu_info: pattern = r"^([\d]+,[\d]+,[\d]+,[\d]?)" regex_out = re.search(pattern, line) if regex_out: self.cpuinfo.append(regex_out.group(1).strip().split(",")) # physical cores := core column in lscpu output # logical cores := cPU column in lscpu output self.node_nums = int(max(line[3] for line in self.cpuinfo)) + 1 self.node_physical_cores: list[list[int]] = [] # node_id is index self.node_logical_cores: list[list[int]] = [] # node_id is index self.physical_core_node_map = {} # physical core to numa node id self.logical_core_node_map = {} # logical core to numa node id for node_id in range(self.node_nums): cur_node_physical_core = [] cur_node_logical_core = [] for cpuinfo in self.cpuinfo: nid = cpuinfo[3] if cpuinfo[3] != "" else "0" if node_id == int(nid): if int(cpuinfo[1]) not in cur_node_physical_core: cur_node_physical_core.append(int(cpuinfo[1])) self.physical_core_node_map[int(cpuinfo[1])] = int(node_id) cur_node_logical_core.append(int(cpuinfo[0])) self.logical_core_node_map[int(cpuinfo[0])] = int(node_id) self.node_physical_cores.append(cur_node_physical_core) self.node_logical_cores.append(cur_node_logical_core) def _physical_core_nums(self): return len(self.node_physical_cores) * len(self.node_physical_cores[0]) def _logical_core_nums(self): return len(self.node_logical_cores) * len(self.node_logical_cores[0]) def get_node_physical_cores(self, node_id): if node_id < 0 or node_id > self.node_nums - 1: raise ValueError( f"Invalid node id: {node_id}. Valid node ids: {list(range(len(self.node_physical_cores)))}" ) return self.node_physical_cores[node_id] def get_node_logical_cores(self, node_id): if node_id < 0 or node_id > self.node_nums - 1: raise ValueError( f"Invalid node id: {node_id}. Valid node ids: {list(range(len(self.node_physical_cores)))}" ) return self.node_logical_cores[node_id] def get_all_physical_cores(self): all_cores = [] for cores in self.node_physical_cores: all_cores.extend(cores) return all_cores def get_all_logical_cores(self): all_cores = [] for cores in self.node_logical_cores: all_cores.extend(cores) return all_cores def numa_aware_check(self, core_list): """ Check whether all cores in core_list are in the same NUMA node. Cross NUMA will reduce performance. We strongly advice to not use cores on different nodes. """ cores_numa_map = self.logical_core_node_map numa_ids = [] for core in core_list: numa_id = cores_numa_map[core] if numa_id not in numa_ids: numa_ids.append(numa_id) if len(numa_ids) > 1: logger.warning( "Numa Aware: cores:%s on different NUMA nodes:%s. To avoid \ this behavior, please use --ncores-per-instance knob to make sure number of cores is divisible by --ncores-per-\ instance. Alternatively, please use --skip-cross-node-cores knob.", str(core_list), str(numa_ids), ) if len(numa_ids) == 0: raise RuntimeError( "invalid number of NUMA nodes; please make sure numa_ids >= 1" ) return numa_ids
_CPUinfo
python
PyCQA__pylint
doc/data/messages/i/invalid-name/bad.py
{ "start": 0, "end": 207 }
class ____: # [invalid-name] def Meow(self, NUMBER_OF_MEOW): # [invalid-name, invalid-name] print("Meow" * NUMBER_OF_MEOW) return NUMBER_OF_MEOW Cat = cat().Meow(42) # [invalid-name]
cat
python
astropy__astropy
astropy/utils/shapes.py
{ "start": 7293, "end": 14452 }
class ____(NDArrayShapeMethods, metaclass=abc.ABCMeta): """Mixin class to provide shape-changing methods. The class proper is assumed to have some underlying data, which are arrays or array-like structures. It must define a ``shape`` property, which gives the shape of those data, as well as an ``_apply`` method that creates a new instance in which a `~numpy.ndarray` method has been applied to those. Furthermore, for consistency with `~numpy.ndarray`, it is recommended to define a setter for the ``shape`` property, which, like the `~numpy.ndarray.shape` property allows in-place reshaping the internal data (and, unlike the ``reshape`` method raises an exception if this is not possible). This class also defines default implementations for ``ndim`` and ``size`` properties, calculating those from the ``shape``. These can be overridden by subclasses if there are faster ways to obtain those numbers. """ # Note to developers: if new methods are added here, be sure to check that # they work properly with the classes that use this, such as Time and # BaseRepresentation, i.e., look at their ``_apply`` methods and add # relevant tests. This is particularly important for methods that imply # copies rather than views of data (see the special-case treatment of # 'flatten' in Time). @property @abc.abstractmethod def shape(self) -> tuple[int, ...]: """The shape of the underlying data.""" @abc.abstractmethod def _apply(method, *args, **kwargs): """Create a new instance, with ``method`` applied to underlying data. The method is any of the shape-changing methods for `~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those picking particular elements (``__getitem__``, ``take``, etc.). It will be applied to the underlying arrays (e.g., ``jd1`` and ``jd2`` in `~astropy.time.Time`), with the results used to create a new instance. Parameters ---------- method : str Method to be applied to the instance's internal data arrays. args : tuple Any positional arguments for ``method``. kwargs : dict Any keyword arguments for ``method``. """ @property def ndim(self) -> int: """The number of dimensions of the instance and underlying arrays.""" return len(self.shape) @property def size(self) -> int: """The size of the object, as calculated from its shape.""" return prod(self.shape) @property def isscalar(self) -> bool: return self.shape == () def __len__(self) -> int: if self.isscalar: raise TypeError(f"Scalar {self.__class__.__name__!r} object has no len()") return self.shape[0] def __bool__(self) -> bool: """Any instance should evaluate to True, except when it is empty.""" return self.size > 0 def __getitem__(self, item): try: return self._apply("__getitem__", item) except IndexError: if self.isscalar: raise TypeError( f"scalar {self.__class.__name__!r} object is not subscriptable." ) else: raise def __iter__(self): if self.isscalar: raise TypeError( f"scalar {self.__class__.__name__!r} object is not iterable." ) # We cannot just write a generator here, since then the above error # would only be raised once we try to use the iterator, rather than # upon its definition using iter(self). def self_iter(): for idx in range(len(self)): yield self[idx] return self_iter() # Functions that change shape or essentially do indexing. _APPLICABLE_FUNCTIONS = { np.moveaxis, np.rollaxis, np.atleast_1d, np.atleast_2d, np.atleast_3d, np.expand_dims, np.broadcast_to, np.flip, np.fliplr, np.flipud, np.rot90, np.roll, np.delete, } # TODO: use astropy.units.quantity_helpers.function_helpers.FunctionAssigner? # Maybe better after moving that to astropy.utils, since Masked uses it too. _CUSTOM_FUNCTIONS = { np.concatenate: concatenate, np.stack: stack, } # Functions that themselves defer to a method. Those are all # defined in np._core.fromnumeric, but exclude alen as well as # sort and partition, which make copies before calling the method. _METHOD_FUNCTIONS = { getattr(np, name): { "amax": "max", "amin": "min", "around": "round", "round_": "round", "alltrue": "all", "sometrue": "any", }.get(name, name) for name in np_core.fromnumeric.__all__ if name not in ["alen", "sort", "partition"] } # Add np.copy, which we may as well let defer to our method. _METHOD_FUNCTIONS[np.copy] = "copy" # Could be made to work with a bit of effort: # np.where, np.compress, np.extract, # np.diag_indices_from, np.triu_indices_from, np.tril_indices_from # np.tile, np.repeat (need .repeat method) # TODO: create a proper implementation. # Furthermore, some arithmetic functions such as np.mean, np.median, # could work for Time, and many more for TimeDelta, so those should # override __array_function__. def __array_function__(self, function, types, args, kwargs): """Wrap numpy functions that make sense.""" if function in self._APPLICABLE_FUNCTIONS: if function is np.broadcast_to: # Ensure that any ndarray subclasses used are # properly propagated. kwargs.setdefault("subok", True) elif ( function in {np.atleast_1d, np.atleast_2d, np.atleast_3d} and len(args) > 1 ): seq_cls = list if NUMPY_LT_2_0 else tuple return seq_cls(function(arg, **kwargs) for arg in args) if self is not args[0]: return NotImplemented return self._apply(function, *args[1:], **kwargs) elif function in self._CUSTOM_FUNCTIONS: return self._CUSTOM_FUNCTIONS[function](*args, **kwargs) # For functions that defer to methods, use the corresponding # method/attribute if we have it. Otherwise, fall through. if self is args[0] and function in self._METHOD_FUNCTIONS: method = getattr(self, self._METHOD_FUNCTIONS[function], None) if method is not None: if callable(method): return method(*args[1:], **kwargs) else: # For np.shape, etc., just return the attribute. return method # Fall-back, just pass the arguments on since perhaps the function # works already (see above). return function.__wrapped__(*args, **kwargs)
ShapedLikeNDArray
python
tensorflow__tensorflow
tensorflow/python/feature_column/sequence_feature_column_test.py
{ "start": 5822, "end": 8425 }
class ____( test.TestCase, parameterized.TestCase): @parameterized.named_parameters( {'testcase_name': '2D', 'inputs_args': { 'indices': ((0, 0), (1, 0), (1, 1)), 'values': (1, 2, 0), 'dense_shape': (2, 2)}, 'expected_args': { 'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)), 'values': np.array((1, 2, 0), dtype=np.int64), 'dense_shape': (2, 2, 1)}}, {'testcase_name': '3D', 'inputs_args': { 'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)), 'values': (6, 7, 8), 'dense_shape': (2, 2, 2)}, 'expected_args': { 'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)), 'values': np.array((6, 7, 8), dtype=np.int64), 'dense_shape': (2, 2, 2)}} ) def test_get_sparse_tensors(self, inputs_args, expected_args): inputs = sparse_tensor.SparseTensorValue(**inputs_args) expected = sparse_tensor.SparseTensorValue(**expected_args) column = sfc.sequence_categorical_column_with_identity('aaa', num_buckets=9) id_weight_pair = _get_sparse_tensors(column, {'aaa': inputs}) self.assertIsNone(id_weight_pair.weight_tensor) _assert_sparse_tensor_value( self, expected, self.evaluate(id_weight_pair.id_tensor)) def test_serialization(self): """Tests that column can be serialized.""" parent = sfc.sequence_categorical_column_with_identity( 'animal', num_buckets=4) animal = fc.indicator_column(parent) config = animal.get_config() self.assertEqual( { 'categorical_column': { 'class_name': 'SequenceCategoricalColumn', 'config': { 'categorical_column': { 'class_name': 'IdentityCategoricalColumn', 'config': { 'default_value': None, 'key': 'animal', 'number_buckets': 4 } } } } }, config) new_animal = fc.IndicatorColumn.from_config(config) self.assertEqual(animal, new_animal) self.assertIsNot(parent, new_animal.categorical_column) new_animal = fc.IndicatorColumn.from_config( config, columns_by_name={ serialization._column_name_with_class_name(parent): parent }) self.assertEqual(animal, new_animal) self.assertIs(parent, new_animal.categorical_column) @test_util.run_all_in_graph_and_eager_modes
SequenceCategoricalColumnWithIdentityTest
python
apache__airflow
airflow-core/src/airflow/serialization/serialized_objects.py
{ "start": 21856, "end": 45622 }
class ____: """BaseSerialization provides utils for serialization.""" # JSON primitive types. _primitive_types = (int, bool, float, str) # Time types. # datetime.date and datetime.time are converted to strings. _datetime_types = (datetime.datetime,) # Object types that are always excluded in serialization. _excluded_types = (logging.Logger, Connection, type, property) _json_schema: ClassVar[Validator | None] = None # Should the extra operator link be loaded via plugins when # de-serializing the DAG? This flag is set to False in Scheduler so that Extra Operator links # are not loaded to not run User code in Scheduler. _load_operator_extra_links = True _CONSTRUCTOR_PARAMS: dict[str, Parameter] = {} SERIALIZER_VERSION = 3 @classmethod def to_json(cls, var: Any) -> str: """Stringify DAGs and operators contained by var and returns a JSON string of var.""" return json.dumps(cls.to_dict(var), ensure_ascii=True) @classmethod def to_dict(cls, var: Any) -> dict: """Stringify DAGs and operators contained by var and returns a dict of var.""" # Don't call on this class directly - only SerializedDAG or # SerializedBaseOperator should be used as the "entrypoint" raise NotImplementedError() @classmethod def from_json(cls, serialized_obj: str) -> BaseSerialization | dict | list | set | tuple: """Deserialize json_str and reconstructs all DAGs and operators it contains.""" return cls.from_dict(json.loads(serialized_obj)) @classmethod def from_dict(cls, serialized_obj: dict[Encoding, Any]) -> BaseSerialization | dict | list | set | tuple: """Deserialize a dict of type decorators and reconstructs all DAGs and operators it contains.""" return cls.deserialize(serialized_obj) @classmethod def validate_schema(cls, serialized_obj: str | dict) -> None: """Validate serialized_obj satisfies JSON schema.""" if cls._json_schema is None: raise AirflowException(f"JSON schema of {cls.__name__:s} is not set.") if isinstance(serialized_obj, dict): cls._json_schema.validate(serialized_obj) elif isinstance(serialized_obj, str): cls._json_schema.validate(json.loads(serialized_obj)) else: raise TypeError("Invalid type: Only dict and str are supported.") @staticmethod def _encode(x: Any, type_: Any) -> dict[Encoding, Any]: """Encode data by a JSON dict.""" return {Encoding.VAR: x, Encoding.TYPE: type_} @classmethod def _is_primitive(cls, var: Any) -> bool: """Primitive types.""" return var is None or isinstance(var, cls._primitive_types) @classmethod def _is_excluded(cls, var: Any, attrname: str, instance: Any) -> bool: """Check if type is excluded from serialization.""" if var is None: if not cls._is_constructor_param(attrname, instance): # Any instance attribute, that is not a constructor argument, we exclude None as the default return True return cls._value_is_hardcoded_default(attrname, var, instance) return isinstance(var, cls._excluded_types) or cls._value_is_hardcoded_default( attrname, var, instance ) @classmethod def serialize_to_json( cls, # TODO (GH-52141): When can we remove scheduler constructs here? object_to_serialize: SdkOperator | SerializedOperator | DAG | SerializedDAG, decorated_fields: set, ) -> dict[str, Any]: """Serialize an object to JSON.""" serialized_object: dict[str, Any] = {} keys_to_serialize = object_to_serialize.get_serialized_fields() for key in keys_to_serialize: # None is ignored in serialized form and is added back in deserialization. value = getattr(object_to_serialize, key, None) if cls._is_excluded(value, key, object_to_serialize): continue if key == "_operator_name": # when operator_name matches task_type, we can remove # it to reduce the JSON payload task_type = getattr(object_to_serialize, "task_type", None) if value != task_type: serialized_object[key] = cls.serialize(value) elif key in decorated_fields: serialized_object[key] = cls.serialize(value) elif key == "timetable" and value is not None: serialized_object[key] = encode_timetable(value) elif key == "weight_rule" and value is not None: encoded_priority_weight_strategy = encode_priority_weight_strategy(value) # Exclude if it is just default default_pri_weight_stra = cls.get_schema_defaults("operator").get(key, None) if default_pri_weight_stra != encoded_priority_weight_strategy: serialized_object[key] = encoded_priority_weight_strategy else: value = cls.serialize(value) if isinstance(value, dict) and Encoding.TYPE in value: value = value[Encoding.VAR] serialized_object[key] = value return serialized_object @classmethod def serialize( cls, var: Any, *, strict: bool = False ) -> Any: # Unfortunately there is no support for recursive types in mypy """ Serialize an object; helper function of depth first search for serialization. The serialization protocol is: (1) keeping JSON supported types: primitives, dict, list; (2) encoding other types as ``{TYPE: 'foo', VAR: 'bar'}``, the deserialization step decode VAR according to TYPE; (3) Operator has a special field CLASS to record the original class name for displaying in UI. :meta private: """ from airflow.sdk.definitions._internal.types import is_arg_set from airflow.sdk.exceptions import TaskDeferred if not is_arg_set(var): return cls._encode(None, type_=DAT.ARG_NOT_SET) elif cls._is_primitive(var): # enum.IntEnum is an int instance, it causes json dumps error so we use its value. if isinstance(var, enum.Enum): return var.value # These are not allowed in JSON. https://datatracker.ietf.org/doc/html/rfc8259#section-6 if isinstance(var, float) and (math.isnan(var) or math.isinf(var)): return str(var) return var elif isinstance(var, dict): return cls._encode( {str(k): cls.serialize(v, strict=strict) for k, v in var.items()}, type_=DAT.DICT, ) elif isinstance(var, list): return [cls.serialize(v, strict=strict) for v in var] elif var.__class__.__name__ == "V1Pod" and _has_kubernetes() and isinstance(var, k8s.V1Pod): json_pod = PodGenerator.serialize_pod(var) return cls._encode(json_pod, type_=DAT.POD) elif isinstance(var, OutletEventAccessors): return cls._encode( encode_outlet_event_accessors(var), type_=DAT.ASSET_EVENT_ACCESSORS, ) elif isinstance(var, AssetUniqueKey): return cls._encode( attrs.asdict(var), type_=DAT.ASSET_UNIQUE_KEY, ) elif isinstance(var, AssetAliasUniqueKey): return cls._encode( attrs.asdict(var), type_=DAT.ASSET_ALIAS_UNIQUE_KEY, ) elif isinstance(var, DAG): return cls._encode(SerializedDAG.serialize_dag(var), type_=DAT.DAG) elif isinstance(var, DeadlineAlert): return cls._encode(DeadlineAlert.serialize_deadline_alert(var), type_=DAT.DEADLINE_ALERT) elif isinstance(var, Resources): return var.to_dict() elif isinstance(var, MappedOperator): return cls._encode(SerializedBaseOperator.serialize_mapped_operator(var), type_=DAT.OP) elif isinstance(var, BaseOperator): var._needs_expansion = var.get_needs_expansion() return cls._encode(SerializedBaseOperator.serialize_operator(var), type_=DAT.OP) elif isinstance(var, cls._datetime_types): return cls._encode(var.timestamp(), type_=DAT.DATETIME) elif isinstance(var, datetime.timedelta): return cls._encode(var.total_seconds(), type_=DAT.TIMEDELTA) elif isinstance(var, (Timezone, FixedTimezone)): return cls._encode(encode_timezone(var), type_=DAT.TIMEZONE) elif isinstance(var, relativedelta.relativedelta): return cls._encode(encode_relativedelta(var), type_=DAT.RELATIVEDELTA) elif isinstance(var, TaskInstanceKey): return cls._encode( var._asdict(), type_=DAT.TASK_INSTANCE_KEY, ) elif isinstance(var, (AirflowException, TaskDeferred)) and hasattr(var, "serialize"): exc_cls_name, args, kwargs = var.serialize() return cls._encode( cls.serialize( {"exc_cls_name": exc_cls_name, "args": args, "kwargs": kwargs}, strict=strict, ), type_=DAT.AIRFLOW_EXC_SER, ) elif isinstance(var, (KeyError, AttributeError)): return cls._encode( cls.serialize( { "exc_cls_name": var.__class__.__name__, "args": [var.args], "kwargs": {}, }, strict=strict, ), type_=DAT.BASE_EXC_SER, ) elif isinstance(var, BaseTrigger): return cls._encode( cls.serialize( var.serialize(), strict=strict, ), type_=DAT.BASE_TRIGGER, ) elif callable(var): return str(get_python_source(var)) elif isinstance(var, set): # FIXME: casts set to list in customized serialization in future. try: return cls._encode( sorted(cls.serialize(v, strict=strict) for v in var), type_=DAT.SET, ) except TypeError: return cls._encode( [cls.serialize(v, strict=strict) for v in var], type_=DAT.SET, ) elif isinstance(var, tuple): # FIXME: casts tuple to list in customized serialization in future. return cls._encode( [cls.serialize(v, strict=strict) for v in var], type_=DAT.TUPLE, ) elif isinstance(var, TaskGroup): return TaskGroupSerialization.serialize_task_group(var) elif isinstance(var, Param): return cls._encode(cls._serialize_param(var), type_=DAT.PARAM) elif isinstance(var, XComArg): return cls._encode(serialize_xcom_arg(var), type_=DAT.XCOM_REF) elif isinstance(var, LazySelectSequence): return cls.serialize(list(var)) elif isinstance(var, BaseAsset): serialized_asset = encode_asset_condition(var) return cls._encode(serialized_asset, type_=serialized_asset.pop("__type")) elif isinstance(var, AssetRef): return cls._encode(attrs.asdict(var), type_=DAT.ASSET_REF) elif isinstance(var, Connection): return cls._encode(var.to_dict(validate=True), type_=DAT.CONNECTION) elif isinstance(var, TaskCallbackRequest): return cls._encode(var.to_json(), type_=DAT.TASK_CALLBACK_REQUEST) elif isinstance(var, DagCallbackRequest): return cls._encode(var.to_json(), type_=DAT.DAG_CALLBACK_REQUEST) elif var.__class__ == Context: d = {} for k, v in var.items(): obj = cls.serialize(v, strict=strict) d[str(k)] = obj return cls._encode(d, type_=DAT.TASK_CONTEXT) else: return cls.default_serialization(strict, var) @classmethod def default_serialization(cls, strict, var) -> str: log.debug("Cast type %s to str in serialization.", type(var)) if strict: raise SerializationError("Encountered unexpected type") return str(var) @classmethod def deserialize(cls, encoded_var: Any) -> Any: """ Deserialize an object; helper function of depth first search for deserialization. :meta private: """ if cls._is_primitive(encoded_var): return encoded_var elif isinstance(encoded_var, list): return [cls.deserialize(v) for v in encoded_var] if not isinstance(encoded_var, dict): raise ValueError(f"The encoded_var should be dict and is {type(encoded_var)}") var = encoded_var[Encoding.VAR] type_ = encoded_var[Encoding.TYPE] if type_ == DAT.TASK_CONTEXT: d = {} for k, v in var.items(): if k == "task": # todo: add `_encode` of Operator so we don't need this continue d[k] = cls.deserialize(v) d["task"] = d["task_instance"].task # todo: add `_encode` of Operator so we don't need this d["macros"] = macros d["var"] = { "json": VariableAccessor(deserialize_json=True), "value": VariableAccessor(deserialize_json=False), } d["conn"] = ConnectionAccessor() return Context(**d) elif type_ == DAT.DICT: return {k: cls.deserialize(v) for k, v in var.items()} elif type_ == DAT.ASSET_EVENT_ACCESSORS: return decode_outlet_event_accessors(var) elif type_ == DAT.ASSET_UNIQUE_KEY: return AssetUniqueKey(name=var["name"], uri=var["uri"]) elif type_ == DAT.ASSET_ALIAS_UNIQUE_KEY: return AssetAliasUniqueKey(name=var["name"]) elif type_ == DAT.DAG: return SerializedDAG.deserialize_dag(var) elif type_ == DAT.OP: return SerializedBaseOperator.deserialize_operator(var) elif type_ == DAT.DATETIME: return from_timestamp(var) elif type_ == DAT.POD: # Attempt to import kubernetes for deserialization. Using attempt_import=True allows # lazy loading of kubernetes libraries only when actually needed for POD deserialization. if not _has_kubernetes(attempt_import=True): raise RuntimeError( "Cannot deserialize POD objects without kubernetes libraries. " "Please install the cncf.kubernetes provider." ) pod = PodGenerator.deserialize_model_dict(var) return pod elif type_ == DAT.TIMEDELTA: return datetime.timedelta(seconds=var) elif type_ == DAT.TIMEZONE: return decode_timezone(var) elif type_ == DAT.RELATIVEDELTA: return decode_relativedelta(var) elif type_ == DAT.AIRFLOW_EXC_SER or type_ == DAT.BASE_EXC_SER: deser = cls.deserialize(var) exc_cls_name = deser["exc_cls_name"] args = deser["args"] kwargs = deser["kwargs"] del deser if type_ == DAT.AIRFLOW_EXC_SER: exc_cls = import_string(exc_cls_name) else: exc_cls = import_string(f"builtins.{exc_cls_name}") return exc_cls(*args, **kwargs) elif type_ == DAT.BASE_TRIGGER: tr_cls_name, kwargs = cls.deserialize(var) tr_cls = import_string(tr_cls_name) return tr_cls(**kwargs) elif type_ == DAT.SET: return {cls.deserialize(v) for v in var} elif type_ == DAT.TUPLE: return tuple(cls.deserialize(v) for v in var) elif type_ == DAT.PARAM: return cls._deserialize_param(var) elif type_ == DAT.XCOM_REF: return _XComRef(var) # Delay deserializing XComArg objects until we have the entire DAG. elif type_ == DAT.ASSET: return decode_asset(var) elif type_ == DAT.ASSET_ALIAS: return AssetAlias(**var) elif type_ == DAT.ASSET_ANY: return AssetAny(*(decode_asset_condition(x) for x in var["objects"])) elif type_ == DAT.ASSET_ALL: return AssetAll(*(decode_asset_condition(x) for x in var["objects"])) elif type_ == DAT.ASSET_REF: return Asset.ref(**var) elif type_ == DAT.CONNECTION: return Connection(**var) elif type_ == DAT.TASK_CALLBACK_REQUEST: return TaskCallbackRequest.from_json(var) elif type_ == DAT.DAG_CALLBACK_REQUEST: return DagCallbackRequest.from_json(var) elif type_ == DAT.TASK_INSTANCE_KEY: return TaskInstanceKey(**var) elif type_ == DAT.ARG_NOT_SET: from airflow.serialization.definitions.notset import NOTSET return NOTSET elif type_ == DAT.DEADLINE_ALERT: return DeadlineAlert.deserialize_deadline_alert(var) else: raise TypeError(f"Invalid type {type_!s} in deserialization.") _deserialize_datetime = from_timestamp _deserialize_timezone = parse_timezone @classmethod def _deserialize_timedelta(cls, seconds: int) -> datetime.timedelta: return datetime.timedelta(seconds=seconds) @classmethod def _is_constructor_param(cls, attrname: str, instance: Any) -> bool: return attrname in cls._CONSTRUCTOR_PARAMS @classmethod def _value_is_hardcoded_default(cls, attrname: str, value: Any, instance: Any) -> bool: """ Return true if ``value`` is the hard-coded default for the given attribute. This takes in to account cases where the ``max_active_tasks`` parameter is stored in the ``_max_active_tasks`` attribute. And by using `is` here only and not `==` this copes with the case a user explicitly specifies an attribute with the same "value" as the default. (This is because ``"default" is "default"`` will be False as they are different strings with the same characters.) Also returns True if the value is an empty list or empty dict. This is done to account for the case where the default value of the field is None but has the ``field = field or {}`` set. """ if attrname in cls._CONSTRUCTOR_PARAMS: if cls._CONSTRUCTOR_PARAMS[attrname] is value or (value in [{}, []]): return True if cls._CONSTRUCTOR_PARAMS[attrname] is attrs.NOTHING and value is None: return True if attrs.has(type(instance)): return any(fld.default is value for fld in attrs.fields(type(instance)) if fld.name == attrname) return False @classmethod def _serialize_param(cls, param: Param): return { "__class": f"{param.__module__}.{param.__class__.__name__}", "default": cls.serialize(param.value), "description": cls.serialize(param.description), "schema": cls.serialize(param.schema), "source": cls.serialize(getattr(param, "source", None)), } @classmethod def _deserialize_param(cls, param_dict: dict) -> SerializedParam: """ Deserialize an encoded Param to a server-side SerializedParam. In 2.2.0, Param attrs were assumed to be json-serializable and were not run through this class's ``serialize`` method. So before running through ``deserialize``, we first verify that it's necessary to do. """ attrs = ("default", "description", "schema", "source") kwargs = {} def is_serialized(val): if isinstance(val, dict): return Encoding.TYPE in val if isinstance(val, list): return all(isinstance(item, dict) and Encoding.TYPE in item for item in val) return False for attr in attrs: if attr in param_dict: val = param_dict[attr] if is_serialized(val): val = cls.deserialize(val) kwargs[attr] = val return SerializedParam( default=kwargs.get("default"), description=kwargs.get("description"), source=kwargs.get("source", None), **(kwargs.get("schema") or {}), ) @classmethod def _serialize_params_dict(cls, params: ParamsDict | dict) -> list[tuple[str, dict]]: """Serialize Params dict for a DAG or task as a list of tuples to ensure ordering.""" serialized_params = [] for k, raw_v in params.items(): # Use native param object, not resolved value if possible v = params.get_param(k) if isinstance(params, ParamsDict) else raw_v try: class_identity = f"{v.__module__}.{v.__class__.__name__}" except AttributeError: class_identity = "" if class_identity == "airflow.sdk.definitions.param.Param": serialized_params.append((k, cls._serialize_param(v))) else: # Auto-box other values into Params object like it is done by DAG parsing as well serialized_params.append((k, cls._serialize_param(Param(v)))) return serialized_params @classmethod def _deserialize_params_dict(cls, encoded_params: list[tuple[str, dict]]) -> SerializedParamsDict: """Deserialize an encoded ParamsDict to a server-side SerializedParamsDict.""" if isinstance(encoded_params, collections.abc.Mapping): # in 2.9.2 or earlier params were serialized as JSON objects encoded_param_pairs: Iterable[tuple[str, dict]] = encoded_params.items() else: encoded_param_pairs = encoded_params def deserialized_param(v): if not isinstance(v, dict) or "__class" not in v: return SerializedParam(v) # Old style param serialization format. return cls._deserialize_param(v) op_params = {k: deserialized_param(v) for k, v in encoded_param_pairs} return SerializedParamsDict(op_params) @classmethod @lru_cache(maxsize=4) # Cache for "operator", "dag", and a few others def get_schema_defaults(cls, object_type: str) -> dict[str, Any]: """ Extract default values from JSON schema for any object type. :param object_type: The object type to get defaults for (e.g., "operator", "dag") :return: Dictionary of field name -> default value """ # Load schema if needed (handles lazy loading) schema_loader = cls._json_schema if schema_loader is None: return {} # Access the schema definitions (trigger lazy loading) schema_data = schema_loader.schema object_def = schema_data.get("definitions", {}).get(object_type, {}) properties = object_def.get("properties", {}) defaults = {} for field_name, field_def in properties.items(): if isinstance(field_def, dict) and "default" in field_def: defaults[field_name] = field_def["default"] return defaults
BaseSerialization
python
huggingface__transformers
tests/quantization/quanto_integration/test_quanto.py
{ "start": 1475, "end": 4367 }
class ____(unittest.TestCase): model_id = "facebook/opt-350m" def setUp(self): config = AutoConfig.from_pretrained(self.model_id) with init_empty_weights(): self.model = AutoModelForCausalLM.from_config(config) self.nb_linear = 0 self.nb_layernorm = 0 for module in self.model.modules(): if isinstance(module, torch.nn.Linear): self.nb_linear += 1 elif isinstance(module, torch.nn.LayerNorm): self.nb_layernorm += 1 def test_weight_only_quantization_conversion(self): """ Simple test that checks if the quantized model has been converted properly when using weight only quantization """ # Try with weight only quantization quantization_config = QuantoConfig(weights="int8", activations=None) self.model, _ = replace_with_quanto_layers(self.model, quantization_config=quantization_config) nb_qlinear = 0 for module in self.model.modules(): if isinstance(module, QLinear): nb_qlinear += 1 self.assertEqual(self.nb_linear, nb_qlinear) def test_weight_and_activation_quantization_conversion(self): """ Simple test that checks if the quantized model has been converted properly when using weight + activation quantization """ # Try with weight + activation quantization quantization_config = QuantoConfig(weights="int8", activations="int8") self.model, _ = replace_with_quanto_layers(self.model, quantization_config=quantization_config) nb_qlinear = 0 nb_qlayernorm = 0 for module in self.model.modules(): if isinstance(module, QLinear): nb_qlinear += 1 if isinstance(module, QLayerNorm): nb_qlayernorm += 1 self.assertEqual(self.nb_linear, nb_qlinear) self.assertEqual(self.nb_layernorm, nb_qlayernorm) def test_conversion_with_modules_to_not_convert(self): """ Simple test that checks if the quantized model has been converted properly when specifying modules_to_not_convert argument """ # Try with weight + activatioin quantization quantization_config = QuantoConfig(weights="int8", activations="int8") self.model, _ = replace_with_quanto_layers( self.model, quantization_config=quantization_config, modules_to_not_convert=["lm_head"] ) nb_qlinear = 0 nb_qlayernorm = 0 for module in self.model.modules(): if isinstance(module, QLinear): nb_qlinear += 1 if isinstance(module, QLayerNorm): nb_qlayernorm += 1 self.assertEqual(self.nb_linear - 1, nb_qlinear) @slow @require_torch_accelerator @require_optimum_quanto @require_accelerate
QuantoTestIntegration
python
pypa__pipenv
pipenv/patched/pip/_internal/models/selection_prefs.py
{ "start": 216, "end": 2030 }
class ____: """ Encapsulates the candidate selection preferences for downloading and installing files. """ __slots__ = [ "allow_yanked", "allow_all_prereleases", "format_control", "prefer_binary", "ignore_requires_python", ] # Don't include an allow_yanked default value to make sure each call # site considers whether yanked releases are allowed. This also causes # that decision to be made explicit in the calling code, which helps # people when reading the code. def __init__( self, allow_yanked: bool, allow_all_prereleases: bool = False, format_control: Optional[FormatControl] = None, prefer_binary: bool = False, ignore_requires_python: Optional[bool] = None, ) -> None: """Create a SelectionPreferences object. :param allow_yanked: Whether files marked as yanked (in the sense of PEP 592) are permitted to be candidates for install. :param format_control: A FormatControl object or None. Used to control the selection of source packages / binary packages when consulting the index and links. :param prefer_binary: Whether to prefer an old, but valid, binary dist over a new source dist. :param ignore_requires_python: Whether to ignore incompatible "Requires-Python" values in links. Defaults to False. """ if ignore_requires_python is None: ignore_requires_python = False self.allow_yanked = allow_yanked self.allow_all_prereleases = allow_all_prereleases self.format_control = format_control self.prefer_binary = prefer_binary self.ignore_requires_python = ignore_requires_python
SelectionPreferences
python
scipy__scipy
scipy/stats/_continuous_distns.py
{ "start": 43475, "end": 45622 }
class ____(rv_continuous): r"""A Cauchy continuous random variable. %(before_notes)s Notes ----- The probability density function for `cauchy` is .. math:: f(x) = \frac{1}{\pi (1 + x^2)} for a real number :math:`x`. This distribution uses routines from the Boost Math C++ library for the computation of the ``ppf`` and ``isf`` methods. [1]_ %(after_notes)s References ---------- .. [1] The Boost Developers. "Boost C++ Libraries". https://www.boost.org/. %(example)s """ def _shape_info(self): return [] def _pdf(self, x): # cauchy.pdf(x) = 1 / (pi * (1 + x**2)) with np.errstate(over='ignore'): return 1.0/np.pi/(1.0+x*x) def _logpdf(self, x): # The formulas # log(1/(pi*(1 + x**2))) = -log(pi) - log(1 + x**2) # = -log(pi) - log(x**2*(1 + 1/x**2)) # = -log(pi) - (2log(|x|) + log1p(1/x**2)) # are used here. absx = np.abs(x) # In the following apply_where, `f1` provides better precision than `f2` # for small and moderate x, while `f2` avoids the overflow that can # occur with absx**2. return xpx.apply_where( absx < 1, absx, lambda absx: -_LOG_PI - np.log1p(absx**2), lambda absx: (-_LOG_PI - (2*np.log(absx) + np.log1p((1/absx)**2)))) def _cdf(self, x): return np.arctan2(1, -x)/np.pi def _ppf(self, q): return scu._cauchy_ppf(q, 0, 1) def _sf(self, x): return np.arctan2(1, x)/np.pi def _isf(self, q): return scu._cauchy_isf(q, 0, 1) def _stats(self): return np.nan, np.nan, np.nan, np.nan def _entropy(self): return np.log(4*np.pi) def _fitstart(self, data, args=None): # Initialize ML guesses using quartiles instead of moments. if isinstance(data, CensoredData): data = data._uncensor() p25, p50, p75 = np.percentile(data, [25, 50, 75]) return p50, (p75 - p25)/2 cauchy = cauchy_gen(name='cauchy')
cauchy_gen
python
encode__django-rest-framework
tests/test_model_serializer.py
{ "start": 50842, "end": 51005 }
class ____(models.Model): """Model without .objects manager.""" name = models.CharField(max_length=64) all_objects = models.Manager()
Issue6110TestModel
python
oauthlib__oauthlib
tests/oauth1/rfc5849/test_request_validator.py
{ "start": 108, "end": 3175 }
class ____(TestCase): def test_not_implemented(self): v = RequestValidator() self.assertRaises(NotImplementedError, v.get_client_secret, None, None) self.assertRaises(NotImplementedError, v.get_request_token_secret, None, None, None) self.assertRaises(NotImplementedError, v.get_access_token_secret, None, None, None) self.assertRaises(NotImplementedError, lambda: v.dummy_client) self.assertRaises(NotImplementedError, lambda: v.dummy_request_token) self.assertRaises(NotImplementedError, lambda: v.dummy_access_token) self.assertRaises(NotImplementedError, v.get_rsa_key, None, None) self.assertRaises(NotImplementedError, v.get_default_realms, None, None) self.assertRaises(NotImplementedError, v.get_realms, None, None) self.assertRaises(NotImplementedError, v.get_redirect_uri, None, None) self.assertRaises(NotImplementedError, v.validate_client_key, None, None) self.assertRaises(NotImplementedError, v.validate_access_token, None, None, None) self.assertRaises(NotImplementedError, v.validate_request_token, None, None, None) self.assertRaises(NotImplementedError, v.verify_request_token, None, None) self.assertRaises(NotImplementedError, v.verify_realms, None, None, None) self.assertRaises(NotImplementedError, v.validate_timestamp_and_nonce, None, None, None, None) self.assertRaises(NotImplementedError, v.validate_redirect_uri, None, None, None) self.assertRaises(NotImplementedError, v.validate_realms, None, None, None, None, None) self.assertRaises(NotImplementedError, v.validate_requested_realms, None, None, None) self.assertRaises(NotImplementedError, v.validate_verifier, None, None, None, None) self.assertRaises(NotImplementedError, v.save_access_token, None, None) self.assertRaises(NotImplementedError, v.save_request_token, None, None) self.assertRaises(NotImplementedError, v.save_verifier, None, None, None) def test_check_length(self): v = RequestValidator() for method in (v.check_client_key, v.check_request_token, v.check_access_token, v.check_nonce, v.check_verifier): for not_valid in ('tooshort', 'invalid?characters!', 'thisclientkeyisalittlebittoolong'): self.assertFalse(method(not_valid)) for valid in ('itsjustaboutlongenough',): self.assertTrue(method(valid)) def test_check_realms(self): v = RequestValidator() self.assertFalse(v.check_realms(['foo'])) class FooRealmValidator(RequestValidator): @property def realms(self): return ['foo'] v = FooRealmValidator() self.assertTrue(v.check_realms(['foo']))
RequestValidatorTests
python
pytorch__pytorch
torch/_inductor/runtime/benchmarking.py
{ "start": 3366, "end": 9523 }
class ____: """ A device-agnostic benchmarking utility for measuring the runtime of inductor generated callables. """ def __init__(self: Self) -> None: pass def infer_device(self, *fn_args: Any, **fn_kwargs: Any) -> torch.device: inferred_device: Optional[torch.device] = None for arg_or_kwarg in chain(fn_args, fn_kwargs.values()): # Some callables take nested structures as arguments so use the # flattened form to find any tensors for arg_or_kwarg_leaf in pytree.tree_leaves(arg_or_kwarg): if not isinstance(arg_or_kwarg_leaf, torch.Tensor): continue if inferred_device is None: inferred_device = arg_or_kwarg_leaf.device elif arg_or_kwarg_leaf.device != inferred_device: raise ValueError( "Can't safely infer the device type of `fn` with multiple device types in `fn_args` and `fn_kwargs`!" ) if inferred_device is None: raise ValueError( "Can't safely infer the device type of `fn` with no device types" " in `fn_args` or `fn_kwargs`. Use a direct benchmarking method instead e.g. " "`Benchmarker.benchmark_cpu` or `Benchmarker.benchmark_gpu`." ) return inferred_device @time_and_count def benchmark( self: Self, fn: Callable[..., Any], fn_args: Optional[tuple[Any, ...]] = None, fn_kwargs: Optional[dict[str, Any]] = None, device: Optional[Union[str, torch.device]] = None, **kwargs: Any, ) -> float: """Benchmark `fn(*fn_args, *fn_kwargs)` and return the runtime, in milliseconds (the actual runtime calculation is dictated by the benchmarking implementation, but may be one of [mean, median, minimum, etc.]). Functions as a convenience wrapper around device-specific implementations, like `benchmark_cpu` and `benchmark_gpu`. Raises `ValueError(...)` if we can't safely infer the device type of `fn`; for example, if multiple device types are found in `fn_args` and `fn_kwargs`, or if no device types are found. To bypass device inference, provide the device to the `device` parameter. WARNING: if `fn` mutates `fn_args` or `fn_kwargs`, benchmarking may fail unexpectedly. For example, if `fn` clears a mutable object, subsequent invocations of `fn` during benchmarking will fail. In such cases, `fn` should handle cloning its arguments internally. If device inference is required, `Benchmarker.infer_device` can be used prior to calling this method without any arguments for `fn_args` and `fn_kwargs`. Arguments: - fn: The function to benchmark. - fn_args: The function's arguments. - fn_kwargs: The function's kwargs. Keyword Arguments: - device: Which device to use for benchmarking. If not provided the device will be attempted to be inferred from `fn_args` and `fn_kwargs`. - **kwargs: The benchmarking implementation's kwargs. Returns: - The runtime of `fn(*fn_args, **fn_kwargs)`, in milliseconds. """ inferred_device: Optional[torch.device] = None if device is not None: inferred_device = ( torch.device(device) if isinstance(device, str) else device ) else: if fn_args is None and fn_kwargs is None: raise ValueError( "`fn_args` and `fn_kwargs` cannot both be None if `device` is not provided." ) fn_args = fn_args or tuple() fn_kwargs = fn_kwargs or {} inferred_device = self.infer_device(*fn_args, **fn_kwargs) assert isinstance(inferred_device, torch.device) fn_args = fn_args or tuple() fn_kwargs = fn_kwargs or {} # No need to wrap if the callable takes no arguments if len(fn_args) == 0 and len(fn_kwargs) == 0: _callable = fn else: _callable = lambda: fn(*fn_args, **fn_kwargs) # noqa: E731 # Surfacing all kernels during autotuning is super noisy; filtering these out. with DebugMode._benchmarking_inductor(): if inferred_device == torch.device("cpu"): return self.benchmark_cpu(_callable, **kwargs) # TODO(nmacchioni): For non-CPU functions we default to using the GPU-specific benchmarking # implementation which was written specifically with CUDA devices in mind, we may want to # explore alternate implementations for other device types. return self.benchmark_gpu(_callable, **kwargs) @time_and_count def benchmark_cpu( self: Self, _callable: Callable[[], Any], warmup: int = 20, rep: int = 100 ) -> float: """Benchmark the CPU callable, `_callable`, and return the median runtime, in milliseconds. Arguments: - _callable: The CPU callable to benchmark. Keyword Arguments: - warmup: Optionally, the duration, in milliseconds, to run `_callable` before benchmarking starts. - rep: Optionally, the duration, in milliseconds, to run `_callable` during benchmarking. Returns: - The median runtime of `_callable`, in milliseconds. """ def run_for(ms: int) -> list[float]: timings = [] run_start_t = time.perf_counter() while True: start_t = time.perf_counter() _callable() end_t = time.perf_counter() timings.append((end_t - start_t) * MILLISECONDS_PER_SECOND) if ((end_t - run_start_t) * MILLISECONDS_PER_SECOND) > ms: break return timings run_for(warmup) return median(run_for(rep)) @time_and_count def benchmark_gpu(self: Self, *args: Any, **kwargs: Any) -> float: raise NotImplementedError
Benchmarker
python
Lightning-AI__lightning
examples/pytorch/domain_templates/reinforce_learn_ppo.py
{ "start": 1873, "end": 2868 }
class ____(nn.Module): """Policy network, for discrete action spaces, which returns a distribution and an action given an observation.""" def __init__(self, actor_net): """ Args: input_shape: observation shape of the environment n_actions: number of discrete actions available in the environment """ super().__init__() self.actor_net = actor_net def forward(self, states): logits = self.actor_net(states) pi = Categorical(logits=logits) actions = pi.sample() return pi, actions def get_log_prob(self, pi: Categorical, actions: torch.Tensor): """Takes in a distribution and actions and returns log prob of actions under the distribution. Args: pi: torch distribution actions: actions taken by distribution Returns: log probability of the action under pi """ return pi.log_prob(actions)
ActorCategorical
python
getsentry__sentry
tests/sentry/api/endpoints/test_event_attachment_details.py
{ "start": 8713, "end": 11479 }
class ____(PermissionTestCase, CreateAttachmentMixin): def setUp(self) -> None: super().setUp() self.create_attachment() self.path = f"/api/0/projects/{self.organization.slug}/{self.project.slug}/events/{self.event.event_id}/attachments/{self.attachment.id}/?download" @with_feature("organizations:event-attachments") def test_member_can_access_by_default(self) -> None: close_streaming_response(self.assert_member_can_access(self.path)) close_streaming_response(self.assert_can_access(self.owner, self.path)) @with_feature("organizations:event-attachments") def test_member_cannot_access_for_owner_role(self) -> None: self.organization.update_option("sentry:attachments_role", "owner") self.assert_member_cannot_access(self.path) close_streaming_response(self.assert_can_access(self.owner, self.path)) @with_feature("organizations:event-attachments") def test_random_user_cannot_access(self) -> None: self.organization.update_option("sentry:attachments_role", "owner") user = self.create_user() self.assert_cannot_access(user, self.path) @with_feature("organizations:event-attachments") def test_superuser_can_access(self) -> None: self.organization.update_option("sentry:attachments_role", "owner") superuser = self.create_user(is_superuser=True) close_streaming_response(self.assert_can_access(superuser, self.path)) with self.settings(SENTRY_SELF_HOSTED=False): self.assert_can_access(superuser, self.path) self.assert_can_access(superuser, self.path, method="DELETE") @with_feature("organizations:event-attachments") @override_options({"superuser.read-write.ga-rollout": True}) @override_settings(SENTRY_SELF_HOSTED=False) def test_superuser_read_access(self) -> None: self.organization.update_option("sentry:attachments_role", "owner") superuser = self.create_user(is_superuser=True) close_streaming_response(self.assert_can_access(superuser, self.path)) self.assert_cannot_access(superuser, self.path, method="DELETE") @with_feature("organizations:event-attachments") @override_options({"superuser.read-write.ga-rollout": True}) @override_settings(SENTRY_SELF_HOSTED=False) def test_superuser_write_can_access(self) -> None: self.organization.update_option("sentry:attachments_role", "owner") superuser = self.create_user(is_superuser=True) self.add_user_permission(superuser, "superuser.write") close_streaming_response(self.assert_can_access(superuser, self.path)) self.assert_can_access(superuser, self.path, method="DELETE")
EventAttachmentDetailsPermissionTest
python
pytest-dev__pytest
src/_pytest/main.py
{ "start": 15260, "end": 15650 }
class ____: def __init__( self, pm: PytestPluginManager, remove_mods: AbstractSet[object], ) -> None: self.pm = pm self.remove_mods = remove_mods def __getattr__(self, name: str) -> pluggy.HookCaller: x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods) self.__dict__[name] = x return x
FSHookProxy