language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
jd__tenacity
tenacity/retry.py
{ "start": 3428, "end": 4377 }
class ____(retry_if_exception): """Retries until an exception is raised of one or more types.""" def __init__( self, exception_types: typing.Union[ typing.Type[BaseException], typing.Tuple[typing.Type[BaseException], ...], ] = Exception, ) -> None: self.exception_types = exception_types super().__init__(lambda e: not isinstance(e, exception_types)) def __call__(self, retry_state: "RetryCallState") -> bool: if retry_state.outcome is None: raise RuntimeError("__call__() called before outcome was set") # always retry if no exception was raised if not retry_state.outcome.failed: return True exception = retry_state.outcome.exception() if exception is None: raise RuntimeError("outcome failed but the exception is None") return self.predicate(exception)
retry_unless_exception_type
python
django__django
tests/admin_inlines/models.py
{ "start": 6402, "end": 6606 }
class ____(models.Model): name = models.CharField(max_length=100, help_text="Help text for Consigliere") capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name="+")
Consigliere
python
readthedocs__readthedocs.org
readthedocs/storage/mixins.py
{ "start": 746, "end": 1029 }
class ____: """Make the bucket private and use auth querystring.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.bucket_acl = "private" self.default_acl = "private" self.querystring_auth = True
S3PrivateBucketMixin
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typedDictReadOnly2.py
{ "start": 2933, "end": 3103 }
class ____(TypedDict): a: float v1 = TD11(a=2) v2: TD12 = v1 # This should generate an error because "a" is writable # and is therefore invariant. v3: TD13 = v1
TD13
python
jd__tenacity
tenacity/asyncio/retry.py
{ "start": 3220, "end": 3731 }
class ____(async_retry_base): """Retries if any of the retries condition is valid.""" def __init__(self, *retries: typing.Union[retry_base, async_retry_base]) -> None: self.retries = retries async def __call__(self, retry_state: "RetryCallState") -> bool: # type: ignore[override] result = False for r in self.retries: result = result or await _utils.wrap_to_async_func(r)(retry_state) if result: break return result
retry_any
python
pytorch__pytorch
test/dynamo/test_modules.py
{ "start": 16103, "end": 16327 }
class ____(torch.nn.Module): def __init__(self) -> None: super().__init__() self.layer = LazyLayerWithListInput() def forward(self, input): return self.layer(input[:-1])
LazyModuleWithListInput
python
dagster-io__dagster
python_modules/dagster/dagster/_core/system_config/objects.py
{ "start": 2328, "end": 2582 }
class ____(NamedTuple): config: Any @staticmethod def from_dict(config: dict[str, object]) -> "ResourceConfig": check.dict_param(config, "config", key_type=str) return ResourceConfig(config=config.get("config"))
ResourceConfig
python
google__python-fire
fire/test_components_test.py
{ "start": 689, "end": 1230 }
class ____(testutils.BaseTestCase): """Tests to verify that the test components are importable and okay.""" def testTestComponents(self): self.assertIsNotNone(tc.Empty) self.assertIsNotNone(tc.OldStyleEmpty) def testNonComparable(self): with self.assertRaises(ValueError): tc.NonComparable() != 2 # pylint: disable=expression-not-assigned with self.assertRaises(ValueError): tc.NonComparable() == 2 # pylint: disable=expression-not-assigned if __name__ == '__main__': testutils.main()
TestComponentsTest
python
pytorch__pytorch
torch/_inductor/output_code.py
{ "start": 14111, "end": 14894 }
class ____(CompiledFxGraphConstants): """ This version of CompiledFxGraphConstants, instead of grabbing constants directly saved on CompiledFxGraphs, will just grab their names. Then, it takes a second GraphModule to grab the corresponding constant values out of. This is necessary for supporting freezing in FxGraphCache. """ def __init__(self, gm: torch.fx.GraphModule) -> None: self.gm = gm def unwrap(self, g: CompiledFxGraph) -> dict[str, torch.Tensor]: frozen_params = { name: getattr(self.gm, orig_name) for name, orig_name in g.frozen_param_names.items() } constants = g.constants or {} return {**constants, **frozen_params} @dataclasses.dataclass
CompiledFxGraphConstantsWithGm
python
pyqtgraph__pyqtgraph
pyqtgraph/Qt/internals.py
{ "start": 223, "end": 342 }
class ____(ctypes.Structure): _fields_= [('x', ctypes.c_double), ('y', ctypes.c_double), ('c', ctypes.c_int)]
Element
python
numba__llvmlite
llvmlite/ir/instructions.py
{ "start": 24999, "end": 25899 }
class ____(Instruction): def __init__(self, parent, agg, indices, name=''): typ = agg.type try: for i in indices: typ = typ.elements[i] except (AttributeError, IndexError): raise TypeError("Can't index at %r in %s" % (list(indices), agg.type)) super(ExtractValue, self).__init__(parent, typ, "extractvalue", [agg], name=name) self.aggregate = agg self.indices = indices def descr(self, buf): indices = [str(i) for i in self.indices] buf.append("extractvalue {0} {1}, {2} {3}\n".format( self.aggregate.type, self.aggregate.get_reference(), ', '.join(indices), self._stringify_metadata(leading_comma=True), ))
ExtractValue
python
dagster-io__dagster
python_modules/automation/automation_tests/dagster_docs_tests/test_validator.py
{ "start": 3480, "end": 9837 }
class ____: """Test DocstringValidator functionality.""" def test_empty_docstring(self): result = validate_docstring_text("", "test.symbol") assert result.symbol_path == "test.symbol" assert result.has_warnings() assert "No docstring found" in result.warnings[0] assert result.is_valid() # Empty docstring is valid (just warning) def test_simple_valid_docstring(self): docstring = "This is a simple docstring." result = validate_docstring_text(docstring, "test.symbol") assert result.symbol_path == "test.symbol" assert not result.has_errors() assert not result.has_warnings() assert result.is_valid() def test_google_style_docstring(self): docstring = '''"""Function with Google-style docstring. Args: param1: Description of param1 param2: Description of param2 Returns: Description of return value Examples: >>> example_call() 'result' """''' result = validate_docstring_text(docstring, "test.symbol") assert result.symbol_path == "test.symbol" assert not result.has_errors() assert result.is_valid() def test_malformed_section_header(self): docstring = '''"""Function with malformed section header. arguments: # Should be "Args:" param1: Description """''' result = validate_docstring_text(docstring, "test.symbol") assert result.symbol_path == "test.symbol" assert result.has_warnings() # The validator may detect RST issues or section header issues warning_text = " ".join(result.warnings).lower() assert "malformed section header" in warning_text or "rst syntax" in warning_text, ( f"Expected section or RST warning, got: {result.warnings}" ) assert result.is_valid() # Warnings don't make it invalid def test_import_symbol_success(self): # Test importing a built-in symbol result = validate_symbol_docstring("builtins.len") assert result.symbol_path == "builtins.len" assert result.is_valid() # len function should have a valid docstring def test_import_symbol_failure(self): # Test importing a non-existent symbol result = validate_symbol_docstring("nonexistent.module.symbol") assert result.symbol_path == "nonexistent.module.symbol" assert result.has_errors() assert "Failed to import symbol" in result.errors[0] assert not result.is_valid() def test_sphinx_role_filtering(self): docstring = '''"""Function using Sphinx roles. See :py:class:`SomeClass` and :func:`some_function`. """''' result = validate_docstring_text(docstring, "test.symbol") # Should not have errors because Sphinx roles are filtered out assert not result.has_errors() assert result.is_valid() def test_no_false_positive_for_words_ending_with_period(self): """Test that words ending with period (like 'returned.') don't trigger section header warnings.""" docstring = '''"""Function that explains return behavior. Args: output_required: Whether the function will always materialize an asset. If False, the function can conditionally not yield a result. Note that you must use yield rather than return. return will not respect this setting and will always produce an asset materialization, even if None is returned. """''' result = validate_docstring_text(docstring, "test.symbol") # Should not have warnings/errors about 'returned.' being a malformed section header # Check that no warnings contain section header related messages section_header_warnings = [ w for w in result.warnings if "malformed section header" in w.lower() or "possible malformed section header" in w.lower() ] section_header_errors = [ e for e in result.errors if "malformed section header" in e.lower() or "possible malformed section header" in e.lower() ] assert not section_header_warnings, ( f"Unexpected section header warnings: {section_header_warnings}" ) assert not section_header_errors, ( f"Unexpected section header errors: {section_header_errors}" ) assert result.is_valid() def test_validates_fix_for_dagster_asset_specific_case(self): """Test the specific case from dagster.asset that was causing the false positive.""" # This is the exact text pattern that was causing the issue docstring = '''"""Function with similar pattern to dagster.asset. Args: output_required: Whether the function will always materialize an asset. Defaults to True. If False, the function can conditionally not yield a result. If no result is yielded, no output will be materialized to storage and downstream assets will not be materialized. Note that for output_required to work at all, you must use yield in your asset logic rather than return. return will not respect this setting and will always produce an asset materialization, even if None is returned. """''' result = validate_docstring_text(docstring, "test.symbol") # The specific fix: should not flag "returned." as a malformed section header section_header_warnings = [ w for w in result.warnings if "malformed section header" in w.lower() and "returned" in w.lower() ] section_header_errors = [ e for e in result.errors if "malformed section header" in e.lower() and "returned" in e.lower() ] assert not section_header_warnings, ( f"Should not flag 'returned.' as malformed: {section_header_warnings}" ) assert not section_header_errors, ( f"Should not flag 'returned.' as malformed: {section_header_errors}" ) assert result.is_valid()
TestDocstringValidator
python
ray-project__ray
python/ray/serve/config.py
{ "start": 6103, "end": 13104 }
class ____(BaseModel): """Config for the Serve request router. This class configures how Ray Serve routes requests to deployment replicas. The router is responsible for selecting which replica should handle each incoming request based on the configured routing policy. You can customize the routing behavior by specifying a custom request router class and providing configuration parameters. The router also manages periodic health checks and scheduling statistics collection from replicas to make informed routing decisions. Example: .. code-block:: python from ray.serve.config import RequestRouterConfig, DeploymentConfig from ray import serve # Use default router with custom stats collection interval request_router_config = RequestRouterConfig( request_routing_stats_period_s=5.0, request_routing_stats_timeout_s=15.0 ) # Use custom router class request_router_config = RequestRouterConfig( request_router_class="ray.serve.llm.request_router.PrefixCacheAffinityRouter", request_router_kwargs={"imbalanced_threshold": 20} ) deployment_config = DeploymentConfig( request_router_config=request_router_config ) deployment = serve.deploy( "my_deployment", deployment_config=deployment_config ) """ _serialized_request_router_cls: bytes = PrivateAttr(default=b"") request_router_class: Union[str, Callable] = Field( default=DEFAULT_REQUEST_ROUTER_PATH, description=( "The class of the request router that Ray Serve uses for this deployment. This value can be " "a string or a class. All the deployment handles that you create for this " "deployment use the routing policy defined by the request router. " "Default to Serve's PowerOfTwoChoicesRequestRouter." ), ) request_router_kwargs: Dict[str, Any] = Field( default_factory=dict, description=( "Keyword arguments that Ray Serve passes to the request router class " "initialize_state method." ), ) request_routing_stats_period_s: PositiveFloat = Field( default=DEFAULT_REQUEST_ROUTING_STATS_PERIOD_S, description=( "Duration between record scheduling stats calls for the replica. " "Defaults to 10s. The health check is by default a no-op Actor call " "to the replica, but you can define your own request scheduling stats " "using the 'record_scheduling_stats' method in your deployment." ), ) request_routing_stats_timeout_s: PositiveFloat = Field( default=DEFAULT_REQUEST_ROUTING_STATS_TIMEOUT_S, description=( "Duration in seconds, that replicas wait for a request scheduling " "stats method to return before considering it as failed. Defaults to 30s." ), ) @validator("request_router_kwargs", always=True) def request_router_kwargs_json_serializable(cls, v): if isinstance(v, bytes): return v if v is not None: try: json.dumps(v) except TypeError as e: raise ValueError( f"request_router_kwargs is not JSON-serializable: {str(e)}." ) return v def __init__(self, **kwargs: dict[str, Any]): """Initialize RequestRouterConfig with the given parameters. Needed to serialize the request router class since validators are not called for attributes that begin with an underscore. Args: **kwargs: Keyword arguments to pass to BaseModel. """ serialized_request_router_cls = kwargs.pop( "_serialized_request_router_cls", None ) super().__init__(**kwargs) if serialized_request_router_cls: self._serialized_request_router_cls = serialized_request_router_cls else: self._serialize_request_router_cls() def set_serialized_request_router_cls( self, serialized_request_router_cls: bytes ) -> None: self._serialized_request_router_cls = serialized_request_router_cls @classmethod def from_serialized_request_router_cls( cls, request_router_config: dict, serialized_request_router_cls: bytes ) -> "RequestRouterConfig": config = request_router_config.copy() config["_serialized_request_router_cls"] = serialized_request_router_cls return cls(**config) def get_serialized_request_router_cls(self) -> Optional[bytes]: return self._serialized_request_router_cls def _serialize_request_router_cls(self) -> None: """Import and serialize request router class with cloudpickle. Import the request router if you pass it in as a string import path. Then cloudpickle the request router and set to `_serialized_request_router_cls`. """ request_router_class = self.request_router_class if isinstance(request_router_class, Callable): request_router_class = ( f"{request_router_class.__module__}.{request_router_class.__name__}" ) request_router_path = request_router_class or DEFAULT_REQUEST_ROUTER_PATH request_router_module, request_router_class = import_module_and_attr( request_router_path ) cloudpickle.register_pickle_by_value(request_router_module) self.set_serialized_request_router_cls(cloudpickle.dumps(request_router_class)) cloudpickle.unregister_pickle_by_value(request_router_module) # Update the request_router_class field to be the string path self.request_router_class = request_router_path def get_request_router_class(self) -> Callable: """Deserialize the request router from cloudpickled bytes.""" try: return cloudpickle.loads(self._serialized_request_router_cls) except (ModuleNotFoundError, ImportError) as e: raise ImportError( f"Failed to deserialize custom request router: {e}\n\n" "This typically happens when the router depends on external modules " "that aren't available in the current environment. To fix this:\n" " - Ensure all dependencies are installed in your Docker image or environment\n" " - Package your router as a Python package and install it\n" " - Place the router module in PYTHONPATH\n\n" "For more details, see: https://docs.ray.io/en/latest/serve/advanced-guides/" "custom-request-router.html#gotchas-and-limitations" ) from e DEFAULT_METRICS_INTERVAL_S = 10.0 @PublicAPI(stability="alpha")
RequestRouterConfig
python
getsentry__sentry
src/sentry/deletions/defaults/organizationmember.py
{ "start": 132, "end": 365 }
class ____(ModelDeletionTask[OrganizationMember]): def get_child_relations(self, instance: OrganizationMember) -> list[BaseRelation]: relations: list[BaseRelation] = [] return relations
OrganizationMemberDeletionTask
python
getsentry__sentry-python
sentry_sdk/integrations/rust_tracing.py
{ "start": 4050, "end": 7868 }
class ____: def __init__( self, origin: str, event_type_mapping: Callable[ [Dict[str, Any]], EventTypeMapping ] = default_event_type_mapping, span_filter: Callable[[Dict[str, Any]], bool] = default_span_filter, include_tracing_fields: Optional[bool] = None, ): self.origin = origin self.event_type_mapping = event_type_mapping self.span_filter = span_filter self.include_tracing_fields = include_tracing_fields def _include_tracing_fields(self) -> bool: """ By default, the values of tracing fields are not included in case they contain PII. A user may override that by passing `True` for the `include_tracing_fields` keyword argument of this integration or by setting `send_default_pii` to `True` in their Sentry client options. """ return ( should_send_default_pii() if self.include_tracing_fields is None else self.include_tracing_fields ) def on_event(self, event: str, _span_state: TraceState) -> None: deserialized_event = json.loads(event) metadata = deserialized_event.get("metadata", {}) event_type = self.event_type_mapping(metadata) if event_type == EventTypeMapping.Ignore: return elif event_type == EventTypeMapping.Exc: process_exception(deserialized_event) elif event_type == EventTypeMapping.Breadcrumb: process_breadcrumb(deserialized_event) elif event_type == EventTypeMapping.Event: process_event(deserialized_event) def on_new_span(self, attrs: str, span_id: str) -> TraceState: attrs = json.loads(attrs) metadata = attrs.get("metadata", {}) if not self.span_filter(metadata): return None module_path = metadata.get("module_path") name = metadata.get("name") message = attrs.get("message") if message is not None: sentry_span_name = message elif module_path is not None and name is not None: sentry_span_name = f"{module_path}::{name}" # noqa: E231 elif name is not None: sentry_span_name = name else: sentry_span_name = "<unknown>" kwargs = { "op": "function", "name": sentry_span_name, "origin": self.origin, } scope = sentry_sdk.get_current_scope() parent_sentry_span = scope.span if parent_sentry_span: sentry_span = parent_sentry_span.start_child(**kwargs) else: sentry_span = scope.start_span(**kwargs) fields = metadata.get("fields", []) for field in fields: if self._include_tracing_fields(): sentry_span.set_data(field, attrs.get(field)) else: sentry_span.set_data(field, SENSITIVE_DATA_SUBSTITUTE) scope.span = sentry_span return (parent_sentry_span, sentry_span) def on_close(self, span_id: str, span_state: TraceState) -> None: if span_state is None: return parent_sentry_span, sentry_span = span_state sentry_span.finish() sentry_sdk.get_current_scope().span = parent_sentry_span def on_record(self, span_id: str, values: str, span_state: TraceState) -> None: if span_state is None: return _parent_sentry_span, sentry_span = span_state deserialized_values = json.loads(values) for key, value in deserialized_values.items(): if self._include_tracing_fields(): sentry_span.set_data(key, value) else: sentry_span.set_data(key, SENSITIVE_DATA_SUBSTITUTE)
RustTracingLayer
python
sqlalchemy__sqlalchemy
test/sql/test_update.py
{ "start": 3775, "end": 32544 }
class ____(_UpdateFromTestBase, fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = "default_enhanced" @testing.variation("twotable", [True, False]) @testing.variation("values", ["none", "blank"]) def test_update_no_params(self, values, twotable): """test issue identified while doing #9721 UPDATE with empty VALUES but multiple tables would raise a NoneType error; fixed this to emit an empty "SET" the way a single table UPDATE currently does. both cases should probably raise CompileError, however this could be backwards incompatible with current use cases (such as other test suites) """ table1 = self.tables.mytable table2 = self.tables.myothertable stmt = table1.update().where(table1.c.name == "jill") if twotable: stmt = stmt.where(table2.c.otherid == table1.c.myid) if values.blank: stmt = stmt.values() if twotable: if values.blank: self.assert_compile( stmt, "UPDATE mytable SET FROM myothertable " "WHERE mytable.name = :name_1 " "AND myothertable.otherid = mytable.myid", ) elif values.none: self.assert_compile( stmt, "UPDATE mytable SET myid=:myid, name=:name, " "description=:description FROM myothertable " "WHERE mytable.name = :name_1 " "AND myothertable.otherid = mytable.myid", ) elif values.blank: self.assert_compile( stmt, "UPDATE mytable SET WHERE mytable.name = :name_1", ) elif values.none: self.assert_compile( stmt, "UPDATE mytable SET myid=:myid, name=:name, " "description=:description WHERE mytable.name = :name_1", ) def test_update_literal_binds(self): table1 = self.tables.mytable stmt = ( table1.update().values(name="jack").where(table1.c.name == "jill") ) self.assert_compile( stmt, "UPDATE mytable SET name='jack' WHERE mytable.name = 'jill'", literal_binds=True, ) def test_update_custom_key_thing(self): table1 = self.tables.mytable class Thing: def __clause_element__(self): return table1.c.name stmt = ( table1.update() .values({Thing(): "jack"}) .where(table1.c.name == "jill") ) self.assert_compile( stmt, "UPDATE mytable SET name='jack' WHERE mytable.name = 'jill'", literal_binds=True, ) def test_update_ordered_custom_key_thing(self): table1 = self.tables.mytable class Thing: def __clause_element__(self): return table1.c.name stmt = ( table1.update() .ordered_values((Thing(), "jack")) .where(table1.c.name == "jill") ) self.assert_compile( stmt, "UPDATE mytable SET name='jack' WHERE mytable.name = 'jill'", literal_binds=True, ) def test_update_broken_custom_key_thing(self): table1 = self.tables.mytable class Thing: def __clause_element__(self): return 5 assert_raises_message( exc.ArgumentError, "SET/VALUES column expression or string key expected, got .*Thing", table1.update().values, {Thing(): "jack"}, ) def test_update_ordered_broken_custom_key_thing(self): table1 = self.tables.mytable class Thing: def __clause_element__(self): return 5 assert_raises_message( exc.ArgumentError, "SET/VALUES column expression or string key expected, got .*Thing", table1.update().ordered_values, (Thing(), "jack"), ) def test_correlated_update_one(self): table1 = self.tables.mytable # test against a straight text subquery u = update(table1).values( { table1.c.name: text( "(select name from mytable where id=mytable.id)" ) } ) self.assert_compile( u, "UPDATE mytable SET name=(select name from mytable " "where id=mytable.id)", ) def test_correlated_update_two(self): table1 = self.tables.mytable mt = table1.alias() u = update(table1).values( { table1.c.name: select(mt.c.name) .where(mt.c.myid == table1.c.myid) .scalar_subquery() } ) self.assert_compile( u, "UPDATE mytable SET name=(SELECT mytable_1.name FROM " "mytable AS mytable_1 WHERE " "mytable_1.myid = mytable.myid)", ) def test_correlated_update_three(self): table1 = self.tables.mytable table2 = self.tables.myothertable # test against a regular constructed subquery s = ( select(table2) .where(table2.c.otherid == table1.c.myid) .scalar_subquery() ) u = ( update(table1) .where(table1.c.name == "jack") .values({table1.c.name: s}) ) self.assert_compile( u, "UPDATE mytable SET name=(SELECT myothertable.otherid, " "myothertable.othername FROM myothertable WHERE " "myothertable.otherid = mytable.myid) " "WHERE mytable.name = :name_1", ) def test_correlated_update_four(self): table1 = self.tables.mytable table2 = self.tables.myothertable # test a non-correlated WHERE clause s = select(table2.c.othername).where(table2.c.otherid == 7) u = update(table1).where(table1.c.name == s.scalar_subquery()) self.assert_compile( u, "UPDATE mytable SET myid=:myid, name=:name, " "description=:description WHERE mytable.name = " "(SELECT myothertable.othername FROM myothertable " "WHERE myothertable.otherid = :otherid_1)", ) def test_correlated_update_five(self): table1 = self.tables.mytable table2 = self.tables.myothertable # test one that is actually correlated... s = select(table2.c.othername).where(table2.c.otherid == table1.c.myid) u = table1.update().where(table1.c.name == s.scalar_subquery()) self.assert_compile( u, "UPDATE mytable SET myid=:myid, name=:name, " "description=:description WHERE mytable.name = " "(SELECT myothertable.othername FROM myothertable " "WHERE myothertable.otherid = mytable.myid)", ) def test_correlated_update_six(self): table1 = self.tables.mytable table2 = self.tables.myothertable # test correlated FROM implicit in WHERE and SET clauses u = ( table1.update() .values(name=table2.c.othername) .where(table2.c.otherid == table1.c.myid) ) self.assert_compile( u, "UPDATE mytable SET name=myothertable.othername " "FROM myothertable WHERE myothertable.otherid = mytable.myid", ) def test_correlated_update_seven(self): table1 = self.tables.mytable table2 = self.tables.myothertable u = ( table1.update() .values(name="foo") .where(table2.c.otherid == table1.c.myid) ) # this is the "default_enhanced" compiler. there's no UPDATE FROM # in the base compiler. # See also test/dialect/mssql/test_compiler->test_update_from(). self.assert_compile( u, "UPDATE mytable SET name=:name " "FROM myothertable WHERE myothertable.otherid = mytable.myid", ) def test_binds_that_match_columns(self): """test bind params named after column names replace the normal SET/VALUES generation. See also test_compiler.py::CrudParamOverlapTest """ t = table("foo", column("x"), column("y")) u = t.update().where(t.c.x == bindparam("x")) assert_raises(exc.CompileError, u.compile) self.assert_compile(u, "UPDATE foo SET WHERE foo.x = :x", params={}) assert_raises(exc.CompileError, u.values(x=7).compile) self.assert_compile( u.values(y=7), "UPDATE foo SET y=:y WHERE foo.x = :x" ) assert_raises( exc.CompileError, u.values(x=7).compile, column_keys=["x", "y"] ) assert_raises(exc.CompileError, u.compile, column_keys=["x", "y"]) self.assert_compile( u.values(x=3 + bindparam("x")), "UPDATE foo SET x=(:param_1 + :x) WHERE foo.x = :x", ) self.assert_compile( u.values(x=3 + bindparam("x")), "UPDATE foo SET x=(:param_1 + :x) WHERE foo.x = :x", params={"x": 1}, ) self.assert_compile( u.values(x=3 + bindparam("x")), "UPDATE foo SET x=(:param_1 + :x), y=:y WHERE foo.x = :x", params={"x": 1, "y": 2}, ) def test_labels_no_collision(self): t = table("foo", column("id"), column("foo_id")) self.assert_compile( t.update().where(t.c.id == 5), "UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :id_1", ) self.assert_compile( t.update().where(t.c.id == bindparam(key=t.c.id._label)), "UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :foo_id_1", ) def test_labels_no_collision_index(self): """test for [ticket:4911]""" t = Table( "foo", MetaData(), Column("id", Integer, index=True), Column("foo_id", Integer), ) self.assert_compile( t.update().where(t.c.id == 5), "UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :id_1", ) self.assert_compile( t.update().where(t.c.id == bindparam(key=t.c.id._label)), "UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :foo_id_1", ) def test_inline_defaults(self): m = MetaData() foo = Table("foo", m, Column("id", Integer)) t = Table( "test", m, Column("col1", Integer, onupdate=func.foo(1)), Column( "col2", Integer, onupdate=select(func.coalesce(func.max(foo.c.id))), ), Column("col3", String(30)), ) self.assert_compile( t.update().values({"col3": "foo"}), "UPDATE test SET col1=foo(:foo_1), col2=(SELECT " "coalesce(max(foo.id)) AS coalesce_1 FROM foo), " "col3=:col3", ) self.assert_compile( t.update().inline().values({"col3": "foo"}), "UPDATE test SET col1=foo(:foo_1), col2=(SELECT " "coalesce(max(foo.id)) AS coalesce_1 FROM foo), " "col3=:col3", ) def test_update_1(self): table1 = self.tables.mytable self.assert_compile( update(table1).where(table1.c.myid == 7), "UPDATE mytable SET name=:name WHERE mytable.myid = :myid_1", params={table1.c.name: "fred"}, ) def test_update_2(self): table1 = self.tables.mytable self.assert_compile( table1.update() .where(table1.c.myid == 7) .values({table1.c.myid: 5}), "UPDATE mytable SET myid=:myid WHERE mytable.myid = :myid_1", checkparams={"myid": 5, "myid_1": 7}, ) def test_update_3(self): table1 = self.tables.mytable self.assert_compile( update(table1).where(table1.c.myid == 7), "UPDATE mytable SET name=:name WHERE mytable.myid = :myid_1", params={"name": "fred"}, ) def test_update_4(self): table1 = self.tables.mytable self.assert_compile( update(table1).values({table1.c.name: table1.c.myid}), "UPDATE mytable SET name=mytable.myid", ) def test_update_5(self): table1 = self.tables.mytable self.assert_compile( update(table1) .where(table1.c.name == bindparam("crit")) .values( {table1.c.name: "hi"}, ), "UPDATE mytable SET name=:name WHERE mytable.name = :crit", params={"crit": "notthere"}, checkparams={"crit": "notthere", "name": "hi"}, ) def test_update_6(self): table1 = self.tables.mytable self.assert_compile( update(table1) .where(table1.c.myid == 12) .values( {table1.c.name: table1.c.myid}, ), "UPDATE mytable " "SET name=mytable.myid, description=:description " "WHERE mytable.myid = :myid_1", params={"description": "test"}, checkparams={"description": "test", "myid_1": 12}, ) def test_update_7(self): table1 = self.tables.mytable self.assert_compile( update(table1) .where(table1.c.myid == 12) .values({table1.c.myid: 9}), "UPDATE mytable " "SET myid=:myid, description=:description " "WHERE mytable.myid = :myid_1", params={"myid_1": 12, "myid": 9, "description": "test"}, ) def test_update_8(self): table1 = self.tables.mytable self.assert_compile( update(table1).where(table1.c.myid == 12), "UPDATE mytable SET myid=:myid WHERE mytable.myid = :myid_1", params={"myid": 18}, checkparams={"myid": 18, "myid_1": 12}, ) def test_update_9(self): table1 = self.tables.mytable s = ( table1.update() .where(table1.c.myid == 12) .values({table1.c.name: "lala"}) ) c = s.compile(column_keys=["id", "name"]) eq_(str(s), str(c)) def test_update_10(self): table1 = self.tables.mytable v1 = {table1.c.name: table1.c.myid} v2 = {table1.c.name: table1.c.name + "foo"} self.assert_compile( update(table1).where(table1.c.myid == 12).values(v1).values(v2), "UPDATE mytable " "SET " "name=(mytable.name || :name_1), " "description=:description " "WHERE mytable.myid = :myid_1", params={"description": "test"}, ) def test_update_11(self): table1 = self.tables.mytable values = { table1.c.name: table1.c.name + "lala", table1.c.myid: func.do_stuff(table1.c.myid, literal("hoho")), } self.assert_compile( update(table1) .where( (table1.c.myid == func.hoho(4)) & ( table1.c.name == literal("foo") + table1.c.name + literal("lala") ) ) .values(values), "UPDATE mytable " "SET " "myid=do_stuff(mytable.myid, :param_1), " "name=(mytable.name || :name_1) " "WHERE " "mytable.myid = hoho(:hoho_1) AND " "mytable.name = (:param_2 || mytable.name || :param_3)", ) def test_unconsumed_names_kwargs(self): t = table("t", column("x"), column("y")) assert_raises_message( exc.CompileError, "Unconsumed column names: z", t.update().values(x=5, z=5).compile, ) @testing.variation("include_in_from", [True, False]) @testing.variation("use_mysql", [True, False]) def test_unconsumed_names_values_dict(self, include_in_from, use_mysql): t = table("t", column("x"), column("y")) t2 = table("t2", column("q"), column("z")) stmt = t.update().values(x=5, j=7).values({t2.c.z: 5}) if include_in_from: stmt = stmt.where(t.c.x == t2.c.q) if use_mysql: if not include_in_from: msg = ( "Statement is not a multi-table UPDATE statement; cannot " r"include columns from table\(s\) 't2' in SET clause" ) else: msg = "Unconsumed column names: j" else: msg = ( "Backend does not support additional tables in the SET " r"clause; cannot include columns from table\(s\) 't2' in " "SET clause" ) with expect_raises_message(exc.CompileError, msg): if use_mysql: stmt.compile(dialect=mysql.dialect()) else: stmt.compile() def test_unconsumed_names_kwargs_w_keys(self): t = table("t", column("x"), column("y")) assert_raises_message( exc.CompileError, "Unconsumed column names: j", t.update().values(x=5, j=7).compile, column_keys=["j"], ) def test_update_ordered_parameters_newstyle_1(self): table1 = self.tables.mytable # Confirm that we can pass values as list value pairs # note these are ordered *differently* from table.c values = [ (table1.c.name, table1.c.name + "lala"), (table1.c.myid, func.do_stuff(table1.c.myid, literal("hoho"))), ] self.assert_compile( update(table1) .where( (table1.c.myid == func.hoho(4)) & ( table1.c.name == literal("foo") + table1.c.name + literal("lala") ) ) .ordered_values(*values), "UPDATE mytable " "SET " "name=(mytable.name || :name_1), " "myid=do_stuff(mytable.myid, :param_1) " "WHERE " "mytable.myid = hoho(:hoho_1) AND " "mytable.name = (:param_2 || mytable.name || :param_3)", ) def test_update_ordered_parameters_newstyle_2(self): table1 = self.tables.mytable # Confirm that we can pass values as list value pairs # note these are ordered *differently* from table.c values = [ (table1.c.name, table1.c.name + "lala"), ("description", "some desc"), (table1.c.myid, func.do_stuff(table1.c.myid, literal("hoho"))), ] self.assert_compile( update(table1) .where( (table1.c.myid == func.hoho(4)) & ( table1.c.name == literal("foo") + table1.c.name + literal("lala") ), ) .ordered_values(*values), "UPDATE mytable " "SET " "name=(mytable.name || :name_1), " "description=:description, " "myid=do_stuff(mytable.myid, :param_1) " "WHERE " "mytable.myid = hoho(:hoho_1) AND " "mytable.name = (:param_2 || mytable.name || :param_3)", ) def test_update_ordered_parameters_multiple(self): table1 = self.tables.mytable stmt = update(table1) stmt = stmt.ordered_values(("name", "somename")) assert_raises_message( exc.ArgumentError, "This statement already has ordered values present", stmt.ordered_values, ("myid", 10), ) def test_update_ordered_then_nonordered(self): table1 = self.tables.mytable stmt = table1.update().ordered_values(("myid", 1), ("name", "d1")) assert_raises_message( exc.InvalidRequestError, "This statement already has ordered values present", stmt.values, {"myid": 2, "name": "d2"}, ) def test_update_no_multiple_parameters_allowed(self): table1 = self.tables.mytable stmt = table1.update().values( [{"myid": 1, "name": "n1"}, {"myid": 2, "name": "n2"}] ) assert_raises_message( exc.InvalidRequestError, "UPDATE construct does not support multiple parameter sets.", stmt.compile, ) def test_update_ordereddict(self): table1 = self.tables.mytable # Confirm that ordered dicts are treated as normal dicts, # columns sorted in table order values = util.OrderedDict( ( (table1.c.name, table1.c.name + "lala"), (table1.c.myid, func.do_stuff(table1.c.myid, literal("hoho"))), ) ) self.assert_compile( update(table1) .where( (table1.c.myid == func.hoho(4)) & ( table1.c.name == literal("foo") + table1.c.name + literal("lala") ), ) .values(values), "UPDATE mytable " "SET " "myid=do_stuff(mytable.myid, :param_1), " "name=(mytable.name || :name_1) " "WHERE " "mytable.myid = hoho(:hoho_1) AND " "mytable.name = (:param_2 || mytable.name || :param_3)", ) def test_where_empty(self): table1 = self.tables.mytable self.assert_compile( table1.update().where( BooleanClauseList._construct_raw(operators.and_) ), "UPDATE mytable SET myid=:myid, name=:name, " "description=:description", ) self.assert_compile( table1.update().where( BooleanClauseList._construct_raw(operators.or_) ), "UPDATE mytable SET myid=:myid, name=:name, " "description=:description", ) def test_prefix_with(self): table1 = self.tables.mytable stmt = ( table1.update() .prefix_with("A", "B", dialect="mysql") .prefix_with("C", "D") ) self.assert_compile( stmt, "UPDATE C D mytable SET myid=:myid, name=:name, " "description=:description", ) self.assert_compile( stmt, "UPDATE A B C D mytable SET myid=%s, name=%s, description=%s", dialect=mysql.dialect(), ) def test_update_to_expression_one(self): """test update from an expression. this logic is triggered currently by a left side that doesn't have a key. The current supported use case is updating the index of a PostgreSQL ARRAY type. """ table1 = self.tables.mytable expr = func.foo(table1.c.myid) eq_(expr.key, None) self.assert_compile( table1.update().values({expr: "bar"}), "UPDATE mytable SET foo(myid)=:param_1", ) def random_update_order_parameters(): from sqlalchemy import ARRAY t = table( "foo", column("data1", ARRAY(Integer)), column("data2", ARRAY(Integer)), column("data3", ARRAY(Integer)), column("data4", ARRAY(Integer)), ) idx_to_value = [ (t.c.data1, 5, 7), (t.c.data2, 10, 18), (t.c.data3, 8, 4), (t.c.data4, 12, 14), ] def combinations(): while True: random.shuffle(idx_to_value) yield list(idx_to_value) return testing.combinations( *[ (t, combination) for i, combination in zip(range(10), combinations()) ], argnames="t, idx_to_value", ) @random_update_order_parameters() def test_update_to_expression_two(self, t, idx_to_value): """test update from an expression. this logic is triggered currently by a left side that doesn't have a key. The current supported use case is updating the index of a PostgreSQL ARRAY type. """ dialect = default.StrCompileDialect() dialect.paramstyle = "qmark" dialect.positional = True stmt = t.update().ordered_values( *[(col[idx], val) for col, idx, val in idx_to_value] ) self.assert_compile( stmt, "UPDATE foo SET %s" % ( ", ".join( "%s[?]=?" % col.key for col, idx, val in idx_to_value ) ), dialect=dialect, checkpositional=tuple( itertools.chain.from_iterable( (idx, val) for col, idx, val in idx_to_value ) ), ) def test_update_to_expression_three(self): # this test is from test_defaults but exercises a particular # parameter ordering issue metadata = MetaData() q = Table( "q", metadata, Column("x", Integer, default=2), Column("y", Integer, onupdate=5), Column("z", Integer), ) p = Table( "p", metadata, Column("s", Integer), Column("t", Integer), Column("u", Integer, onupdate=1), ) cte = ( q.update().where(q.c.z == 1).values(x=7).returning(q.c.z).cte("c") ) stmt = select(p.c.s, cte.c.z).where(p.c.s == cte.c.z) dialect = default.StrCompileDialect() dialect.paramstyle = "qmark" dialect.positional = True self.assert_compile( stmt, "WITH c AS (UPDATE q SET x=?, y=? WHERE q.z = ? RETURNING q.z) " "SELECT p.s, c.z FROM p, c WHERE p.s = c.z", checkpositional=(7, None, 1), dialect=dialect, ) @testing.variation("paramstyle", ["qmark", "format", "numeric"]) def test_update_bound_ordering(self, paramstyle): """test that bound parameters between the UPDATE and FROM clauses order correctly in different SQL compilation scenarios. """ table1 = self.tables.mytable table2 = self.tables.myothertable sel = select(table2).where(table2.c.otherid == 5).alias() upd = ( table1.update() .where(table1.c.name == sel.c.othername) .values(name="foo") ) if paramstyle.qmark: dialect = default.StrCompileDialect(paramstyle="qmark") self.assert_compile( upd, "UPDATE mytable SET name=? FROM (SELECT " "myothertable.otherid AS otherid, " "myothertable.othername AS othername " "FROM myothertable " "WHERE myothertable.otherid = ?) AS anon_1 " "WHERE mytable.name = anon_1.othername", checkpositional=("foo", 5), dialect=dialect, ) elif paramstyle.format: self.assert_compile( upd, "UPDATE mytable, (SELECT myothertable.otherid AS otherid, " "myothertable.othername AS othername " "FROM myothertable " "WHERE myothertable.otherid = %s) AS anon_1 " "SET mytable.name=%s " "WHERE mytable.name = anon_1.othername", checkpositional=(5, "foo"), dialect=mysql.dialect(), ) elif paramstyle.numeric: dialect = default.StrCompileDialect(paramstyle="numeric") self.assert_compile( upd, "UPDATE mytable SET name=:1 FROM (SELECT " "myothertable.otherid AS otherid, " "myothertable.othername AS othername " "FROM myothertable " "WHERE myothertable.otherid = :2) AS anon_1 " "WHERE mytable.name = anon_1.othername", checkpositional=("foo", 5), dialect=dialect, ) else: paramstyle.fail()
UpdateTest
python
walkccc__LeetCode
solutions/2831. Find the Longest Equal Subarray/2831-2.py
{ "start": 0, "end": 386 }
class ____: def longestEqualSubarray(self, nums: list[int], k: int) -> int: ans = 0 count = collections.Counter() # l and r track the maximum window instead of the valid window. l = 0 for r, num in enumerate(nums): count[num] += 1 ans = max(ans, count[num]) if r - l + 1 - k > ans: count[nums[l]] -= 1 l += 1 return ans
Solution
python
celery__celery
t/unit/backends/test_base.py
{ "start": 13907, "end": 38174 }
class ____: def setup_method(self): self.b = DictBackend(app=self.app) @self.app.task(shared=False, bind=True) def bound_errback(self, result): pass @self.app.task(shared=False) def errback(arg1, arg2): errback.last_result = arg1 + arg2 self.bound_errback = bound_errback self.errback = errback def test_delete_group(self): self.b.delete_group('can-delete') assert 'can-delete' not in self.b._data def test_prepare_exception_json(self): x = DictBackend(self.app, serializer='json') e = x.prepare_exception(KeyError('foo')) assert 'exc_type' in e e = x.exception_to_python(e) assert e.__class__.__name__ == 'KeyError' assert str(e).strip('u') == "'foo'" def test_save_group(self): b = BaseBackend(self.app) b._save_group = Mock() b.save_group('foofoo', 'xxx') b._save_group.assert_called_with('foofoo', 'xxx') def test_add_to_chord_interface(self): b = BaseBackend(self.app) with pytest.raises(NotImplementedError): b.add_to_chord('group_id', 'sig') def test_forget_interface(self): b = BaseBackend(self.app) with pytest.raises(NotImplementedError): b.forget('foo') def test_restore_group(self): assert self.b.restore_group('missing') is None assert self.b.restore_group('missing') is None assert self.b.restore_group('exists') == 'group' assert self.b.restore_group('exists') == 'group' assert self.b.restore_group('exists', cache=False) == 'group' def test_reload_group_result(self): self.b._cache = {} self.b.reload_group_result('exists') self.b._cache['exists'] = {'result': 'group'} def test_reload_task_result(self): self.b._cache = {} self.b.reload_task_result('task-exists') self.b._cache['task-exists'] = {'result': 'task'} def test_fail_from_current_stack(self): import inspect self.b.mark_as_failure = Mock() frame_list = [] def raise_dummy(): frame_str_temp = str(inspect.currentframe().__repr__) frame_list.append(frame_str_temp) raise KeyError('foo') try: raise_dummy() except KeyError as exc: self.b.fail_from_current_stack('task_id') self.b.mark_as_failure.assert_called() args = self.b.mark_as_failure.call_args[0] assert args[0] == 'task_id' assert args[1] is exc assert args[2] tb_ = exc.__traceback__ while tb_ is not None: if str(tb_.tb_frame.__repr__) == frame_list[0]: assert len(tb_.tb_frame.f_locals) == 0 tb_ = tb_.tb_next def test_prepare_value_serializes_group_result(self): self.b.serializer = 'json' g = self.app.GroupResult('group_id', [self.app.AsyncResult('foo')]) v = self.b.prepare_value(g) assert isinstance(v, (list, tuple)) assert result_from_tuple(v, app=self.app) == g v2 = self.b.prepare_value(g[0]) assert isinstance(v2, (list, tuple)) assert result_from_tuple(v2, app=self.app) == g[0] self.b.serializer = 'pickle' assert isinstance(self.b.prepare_value(g), self.app.GroupResult) def test_is_cached(self): b = BaseBackend(app=self.app, max_cached_results=1) b._cache['foo'] = 1 assert b.is_cached('foo') assert not b.is_cached('false') def test_mark_as_done__chord(self): b = BaseBackend(app=self.app) b._store_result = Mock() request = Mock(name='request') b.on_chord_part_return = Mock() b.mark_as_done('id', 10, request=request) b.on_chord_part_return.assert_called_with(request, states.SUCCESS, 10) def test_mark_as_failure__bound_errback_eager(self): b = BaseBackend(app=self.app) b._store_result = Mock() request = Mock(name='request') request.delivery_info = { 'is_eager': True } request.errbacks = [ self.bound_errback.subtask(args=[1], immutable=True)] exc = KeyError() group = self.patching('celery.backends.base.group') b.mark_as_failure('id', exc, request=request) group.assert_called_with(request.errbacks, app=self.app) group.return_value.apply.assert_called_with( (request.id, ), parent_id=request.id, root_id=request.root_id) def test_mark_as_failure__bound_errback(self): b = BaseBackend(app=self.app) b._store_result = Mock() request = Mock(name='request') request.delivery_info = {} request.errbacks = [ self.bound_errback.subtask(args=[1], immutable=True)] exc = KeyError() group = self.patching('celery.backends.base.group') b.mark_as_failure('id', exc, request=request) group.assert_called_with(request.errbacks, app=self.app) group.return_value.apply_async.assert_called_with( (request.id, ), parent_id=request.id, root_id=request.root_id) def test_mark_as_failure__errback(self): b = BaseBackend(app=self.app) b._store_result = Mock() request = Mock(name='request') request.errbacks = [self.errback.subtask(args=[2, 3], immutable=True)] exc = KeyError() b.mark_as_failure('id', exc, request=request) assert self.errback.last_result == 5 @patch('celery.backends.base.group') def test_class_based_task_can_be_used_as_error_callback(self, mock_group): b = BaseBackend(app=self.app) b._store_result = Mock() class TaskBasedClass(Task): def run(self): pass TaskBasedClass = self.app.register_task(TaskBasedClass()) request = Mock(name='request') request.errbacks = [TaskBasedClass.subtask(args=[], immutable=True)] exc = KeyError() b.mark_as_failure('id', exc, request=request) mock_group.assert_called_once_with(request.errbacks, app=self.app) @patch('celery.backends.base.group') def test_unregistered_task_can_be_used_as_error_callback(self, mock_group): b = BaseBackend(app=self.app) b._store_result = Mock() request = Mock(name='request') request.errbacks = [signature('doesnotexist', immutable=True)] exc = KeyError() b.mark_as_failure('id', exc, request=request) mock_group.assert_called_once_with(request.errbacks, app=self.app) def test_mark_as_failure__chord(self): b = BaseBackend(app=self.app) b._store_result = Mock() request = Mock(name='request') request.errbacks = [] b.on_chord_part_return = Mock() exc = KeyError() b.mark_as_failure('id', exc, request=request) b.on_chord_part_return.assert_called_with(request, states.FAILURE, exc) def test_mark_as_revoked__chord(self): b = BaseBackend(app=self.app) b._store_result = Mock() request = Mock(name='request') request.errbacks = [] b.on_chord_part_return = Mock() b.mark_as_revoked('id', 'revoked', request=request) b.on_chord_part_return.assert_called_with(request, states.REVOKED, ANY) def test_chord_error_from_stack_raises(self): class ExpectedException(Exception): pass b = BaseBackend(app=self.app) callback = MagicMock(name='callback') callback.options = {'link_error': []} callback.keys.return_value = [] task = self.app.tasks[callback.task] = Mock() b.fail_from_current_stack = Mock() self.patching('celery.group') with patch.object( b, "_call_task_errbacks", side_effect=ExpectedException() ) as mock_call_errbacks: b.chord_error_from_stack(callback, exc=ValueError()) task.backend.fail_from_current_stack.assert_called_with( callback.id, exc=mock_call_errbacks.side_effect, ) def test_exception_to_python_when_None(self): b = BaseBackend(app=self.app) assert b.exception_to_python(None) is None def test_not_an_actual_exc_info(self): pass def test_not_an_exception_but_a_callable(self): x = { 'exc_message': ('echo 1',), 'exc_type': 'system', 'exc_module': 'os' } with pytest.raises(SecurityError, match=re.escape(r"Expected an exception class, got os.system with payload ('echo 1',)")): self.b.exception_to_python(x) def test_not_an_exception_but_another_object(self): x = { 'exc_message': (), 'exc_type': 'object', 'exc_module': 'builtins' } with pytest.raises(SecurityError, match=re.escape(r"Expected an exception class, got builtins.object with payload ()")): self.b.exception_to_python(x) def test_exception_to_python_when_attribute_exception(self): b = BaseBackend(app=self.app) test_exception = {'exc_type': 'AttributeDoesNotExist', 'exc_module': 'celery', 'exc_message': ['Raise Custom Message']} result_exc = b.exception_to_python(test_exception) assert str(result_exc) == 'Raise Custom Message' def test_exception_to_python_when_type_error(self): b = BaseBackend(app=self.app) celery.TestParamException = paramexception test_exception = {'exc_type': 'TestParamException', 'exc_module': 'celery', 'exc_message': []} result_exc = b.exception_to_python(test_exception) del celery.TestParamException assert str(result_exc) == "<class 't.unit.backends.test_base.paramexception'>([])" def test_wait_for__on_interval(self): self.patching('time.sleep') b = BaseBackend(app=self.app) b._get_task_meta_for = Mock() b._get_task_meta_for.return_value = {'status': states.PENDING} callback = Mock(name='callback') with pytest.raises(TimeoutError): b.wait_for(task_id='1', on_interval=callback, timeout=1) callback.assert_called_with() b._get_task_meta_for.return_value = {'status': states.SUCCESS} b.wait_for(task_id='1', timeout=None) def test_get_children(self): b = BaseBackend(app=self.app) b._get_task_meta_for = Mock() b._get_task_meta_for.return_value = {} assert b.get_children('id') is None b._get_task_meta_for.return_value = {'children': 3} assert b.get_children('id') == 3 @pytest.mark.parametrize( "message,original_exc,expected_cause_behavior", [ # With exception - should preserve original exception ( "Dependency failed", ValueError("original error"), "has_cause", ), # Without exception (None) - should not have __cause__ ( "Dependency failed", None, "no_cause", ), # With non-exception - should not have __cause__ ( "Dependency failed", "not an exception", "no_cause", ), ], ids=( "with_exception", "without_exception", "with_non_exception", ) ) def test_create_chord_error_with_cause( self, message, original_exc, expected_cause_behavior ): """Test _create_chord_error_with_cause with various parameter combinations.""" chord_error = _create_chord_error_with_cause(message, original_exc) # Verify basic ChordError properties assert isinstance(chord_error, ChordError) assert str(chord_error) == message # Verify __cause__ behavior based on test case if expected_cause_behavior == "has_cause": assert chord_error.__cause__ is original_exc elif expected_cause_behavior == "no_cause": assert not hasattr(chord_error, '__cause__') or chord_error.__cause__ is None @pytest.mark.parametrize( "task_id,errbacks,task_name,extra_kwargs,expected_attrs", [ # Basic parameters test ( "test-task-id", ["errback1", "errback2"], "test.task", {}, { "id": "test-task-id", "errbacks": ["errback1", "errback2"], "task": "test.task", "delivery_info": {}, }, ), # Default parameters test ( "test-task-id", None, None, {}, { "id": "test-task-id", "errbacks": [], "task": "unknown", "delivery_info": {}, }, ), # Extra parameters test ( "test-task-id", None, None, {"extra_param": "extra_value"}, { "id": "test-task-id", "errbacks": [], "task": "unknown", "delivery_info": {}, "extra_param": "extra_value", }, ), ], ids=( "basic_parameters", "default_parameters", "extra_parameters", ) ) def test_create_fake_task_request( self, task_id, errbacks, task_name, extra_kwargs, expected_attrs ): """Test _create_fake_task_request with various parameter combinations.""" # Build call arguments args = [task_id] if errbacks is not None: args.append(errbacks) if task_name is not None: args.append(task_name) fake_request = _create_fake_task_request(*args, **extra_kwargs) # Verify all expected attributes for attr_name, expected_value in expected_attrs.items(): assert getattr(fake_request, attr_name) == expected_value def _create_mock_callback(self, task_name="test.task", spec=None, **options): """Helper to create mock callbacks with common setup.""" from collections.abc import Mapping # Create a mock that properly implements the # mapping protocol for PyPy env compatibility class MockCallback(Mock, Mapping): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._mapping_data = {} def __getitem__(self, key): return self._mapping_data[key] def __iter__(self): return iter(self._mapping_data) def __len__(self): return len(self._mapping_data) def keys(self): return self._mapping_data.keys() def items(self): return self._mapping_data.items() callback = MockCallback(spec=spec) callback.task = task_name callback.options = {"link_error": [], **options} return callback def _setup_task_backend(self, task_name, backend=None): """Helper to set up task with backend in app registry.""" if backend is None: backend = Mock() backend.fail_from_current_stack = Mock(return_value="backend_result") self.app.tasks[task_name] = Mock() self.app.tasks[task_name].backend = backend return backend @pytest.mark.parametrize( "callback_type,task_name,expected_group_handler_called", [ ("group", "test.group.task", True), ("regular", "test.task", False), ], ids=["group_callback", "regular_callback"] ) def test_chord_error_from_stack_callback_dispatch(self, callback_type, task_name, expected_group_handler_called): """Test chord_error_from_stack dispatches to correct handler based on callback type.""" backend = self.b # Create callback based on type spec = group if callback_type == "group" else None callback = self._create_mock_callback(task_name, spec=spec) # Setup backend resolution mock_backend = self._setup_task_backend(task_name) # Mock handlers backend._handle_group_chord_error = Mock(return_value="group_result") backend._call_task_errbacks = Mock() exc = ValueError("test exception") result = backend.chord_error_from_stack(callback, exc) if expected_group_handler_called: backend._handle_group_chord_error.assert_called_once_with( group_callback=callback, backend=mock_backend, exc=exc ) assert result == "group_result" else: mock_backend.fail_from_current_stack.assert_called_once() def test_chord_error_from_stack_backend_fallback(self): """Test chord_error_from_stack falls back to self when task not found.""" backend = self.b callback = self._create_mock_callback("nonexistent.task") # Ensure task doesn't exist if "nonexistent.task" in self.app.tasks: del self.app.tasks["nonexistent.task"] backend._call_task_errbacks = Mock() backend.fail_from_current_stack = Mock(return_value="self_result") _ = backend.chord_error_from_stack(callback, ValueError("test")) # Verify self was used as fallback backend backend.fail_from_current_stack.assert_called_once() def _create_mock_frozen_group(self, group_id="group-id", task_ids=None, task_names=None): """Helper to create mock frozen group with results.""" if task_ids is None: task_ids = ["task-id-1"] if task_names is None: task_names = ["test.task"] * len(task_ids) results = [] for task_id, task_name in zip(task_ids, task_names): result = Mock() result.id = task_id result.task = task_name results.append(result) frozen_group = Mock(spec=GroupResult) frozen_group.results = results frozen_group.id = group_id frozen_group.revoke = Mock() return frozen_group def _setup_group_chord_error_test(self, exc=None, errbacks=None, task_ids=None): """Common setup for group chord error tests.""" if exc is None: exc = ValueError("test error") if errbacks is None: errbacks = [] if task_ids is None: task_ids = ["task-id-1"] backend = Mock() backend._call_task_errbacks = Mock() backend.fail_from_current_stack = Mock() backend.mark_as_failure = Mock() group_callback = Mock(spec=group) group_callback.options = {"link_error": errbacks} frozen_group = self._create_mock_frozen_group(task_ids=task_ids) group_callback.freeze.return_value = frozen_group return self.b, backend, group_callback, frozen_group, exc @pytest.mark.parametrize( "exception_setup,expected_exc_used", [ ("with_cause", "original"), ("without_cause", "direct"), ], ids=["extracts_cause", "without_cause"] ) def test_handle_group_chord_error_exception_handling(self, exception_setup, expected_exc_used): """Test _handle_group_chord_error handles exceptions with and without __cause__.""" # Setup exceptions based on test case if exception_setup == "with_cause": original_exc = ValueError("original error") exc = ChordError("wrapped error") exc.__cause__ = original_exc expected_exc = original_exc else: exc = ValueError("direct error") expected_exc = exc b, backend, group_callback, frozen_group, _ = self._setup_group_chord_error_test(exc=exc) # Call the method _ = b._handle_group_chord_error(group_callback, backend, exc) # Verify correct exception was used backend.fail_from_current_stack.assert_called_with("task-id-1", exc=expected_exc) backend.mark_as_failure.assert_called_with("group-id", expected_exc) frozen_group.revoke.assert_called_once() def test_handle_group_chord_error_multiple_tasks(self): """Test _handle_group_chord_error handles multiple tasks in group.""" task_ids = ["task-id-1", "task-id-2"] b, backend, group_callback, frozen_group, exc = self._setup_group_chord_error_test(task_ids=task_ids) # Call the method b._handle_group_chord_error(group_callback, backend, exc) # Verify group revocation and all tasks handled frozen_group.revoke.assert_called_once() assert backend.fail_from_current_stack.call_count == 2 backend.fail_from_current_stack.assert_any_call("task-id-1", exc=exc) backend.fail_from_current_stack.assert_any_call("task-id-2", exc=exc) def test_handle_group_chord_error_with_errbacks(self): """Test _handle_group_chord_error calls error callbacks for each task.""" errbacks = ["errback1", "errback2"] b, backend, group_callback, frozen_group, exc = self._setup_group_chord_error_test(errbacks=errbacks) # Call the method b._handle_group_chord_error(group_callback, backend, exc) # Verify error callbacks were called backend._call_task_errbacks.assert_called_once() call_args = backend._call_task_errbacks.call_args fake_request = call_args[0][0] # Verify fake request was created correctly assert fake_request.id == "task-id-1" assert fake_request.errbacks == errbacks assert fake_request.task == "test.task" def test_handle_group_chord_error_cleanup_exception_handling(self): """Test _handle_group_chord_error handles cleanup exceptions gracefully.""" b = self.b backend = Mock() exc = ValueError("test error") # Mock group callback that raises exception during freeze group_callback = Mock(spec=group) group_callback.freeze.side_effect = RuntimeError("freeze failed") # Mock fallback behavior backend.fail_from_current_stack = Mock(return_value="fallback_result") # Should not raise exception, but return fallback result result = b._handle_group_chord_error(group_callback, backend, exc) # Verify fallback was called - the method returns an ExceptionInfo when cleanup fails # and falls back to single task handling assert result is not None # Method returns ExceptionInfo from fail_from_current_stack def test_handle_group_chord__exceptions_paths(self, caplog): """Test _handle_group_chord handles exceptions in various paths.""" backend = Mock() # Mock group callback group_callback = Mock(spec=group) group_callback.options = {"link_error": []} # Mock frozen group with multiple results mock_result1 = Mock() mock_result1.id = "task-id-1" mock_result2 = Mock() mock_result2.id = "task-id-2" frozen_group = Mock(spec=GroupResult) frozen_group.results = [mock_result1, mock_result2] frozen_group.revoke = Mock() group_callback.freeze.return_value = frozen_group # Test exception during fail_from_current_stack backend._call_task_errbacks.side_effect = RuntimeError("fail on _call_task_errbacks") backend.fail_from_current_stack.side_effect = RuntimeError("fail on fail_from_current_stack") _ = self.b._handle_group_chord_error(group_callback, backend, ValueError("test error")) assert "Failed to handle chord error for task" in caplog.text
test_BaseBackend_dict
python
allegroai__clearml
clearml/backend_api/services/v2_13/events.py
{ "start": 9581, "end": 14489 }
class ____(NonStrictDataModel): """ An image or video was dumped to storage for debugging :param timestamp: Epoch milliseconds UTC, will be set by the server if not set. :type timestamp: float :param task: Task ID (required) :type task: str :param iter: Iteration :type iter: int :param metric: Metric name, e.g. 'count', 'loss', 'accuracy' :type metric: str :param variant: E.g. 'class_1', 'total', 'average :type variant: str :param key: File key :type key: str :param url: File URL :type url: str """ _schema = { "description": "An image or video was dumped to storage for debugging", "properties": { "iter": {"description": "Iteration", "type": "integer"}, "key": {"description": "File key", "type": "string"}, "metric": { "description": "Metric name, e.g. 'count', 'loss', 'accuracy'", "type": "string", }, "task": {"description": "Task ID (required)", "type": "string"}, "timestamp": { "description": "Epoch milliseconds UTC, will be set by the server if not set.", "type": ["number", "null"], }, "type": {"const": "training_debug_image", "description": ""}, "url": {"description": "File URL", "type": "string"}, "variant": { "description": "E.g. 'class_1', 'total', 'average", "type": "string", }, }, "required": ["task", "type"], "type": "object", } def __init__( self, task: str, timestamp: Optional[float] = None, iter: Optional[int] = None, metric: Optional[str] = None, variant: Optional[str] = None, key: Optional[str] = None, url: Optional[str] = None, **kwargs: Any ) -> None: super(MetricsImageEvent, self).__init__(**kwargs) self.timestamp = timestamp self.task = task self.iter = iter self.metric = metric self.variant = variant self.key = key self.url = url @schema_property("timestamp") def timestamp(self) -> Optional[float]: return self._property_timestamp @timestamp.setter def timestamp(self, value: Optional[float]) -> None: if value is None: self._property_timestamp = None return self.assert_isinstance(value, "timestamp", six.integer_types + (float,)) self._property_timestamp = value @schema_property("type") def type(self) -> Any: return "training_debug_image" @schema_property("task") def task(self) -> str: return self._property_task @task.setter def task(self, value: str) -> None: if value is None: self._property_task = None return self.assert_isinstance(value, "task", six.string_types) self._property_task = value @schema_property("iter") def iter(self) -> Optional[int]: return self._property_iter @iter.setter def iter(self, value: Optional[int]) -> None: if value is None: self._property_iter = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "iter", six.integer_types) self._property_iter = value @schema_property("metric") def metric(self) -> Optional[str]: return self._property_metric @metric.setter def metric(self, value: Optional[str]) -> None: if value is None: self._property_metric = None return self.assert_isinstance(value, "metric", six.string_types) self._property_metric = value @schema_property("variant") def variant(self) -> Optional[str]: return self._property_variant @variant.setter def variant(self, value: Optional[str]) -> None: if value is None: self._property_variant = None return self.assert_isinstance(value, "variant", six.string_types) self._property_variant = value @schema_property("key") def key(self) -> Optional[str]: return self._property_key @key.setter def key(self, value: Optional[str]) -> None: if value is None: self._property_key = None return self.assert_isinstance(value, "key", six.string_types) self._property_key = value @schema_property("url") def url(self) -> Optional[str]: return self._property_url @url.setter def url(self, value: Optional[str]) -> None: if value is None: self._property_url = None return self.assert_isinstance(value, "url", six.string_types) self._property_url = value
MetricsImageEvent
python
dagster-io__dagster
python_modules/libraries/dagster-dg-cli/dagster_dg_cli_tests/cli_tests/api_tests/sensor_tests/test_business_logic.py
{ "start": 6615, "end": 11202 }
class ____: """Test the sensor formatting functions.""" def _create_sample_sensor_list(self): """Create sample DgApiSensorList for testing.""" sensors = [ DgApiSensor( id="sensor1-id", name="daily_sensor", status=DgApiSensorStatus.RUNNING, sensor_type=DgApiSensorType.STANDARD, description="Daily processing sensor", repository_origin="main_location@main_repo", next_tick_timestamp=1705311000.0, # 2024-01-15T10:30:00Z ), DgApiSensor( id="sensor2-id", name="asset_sensor", status=DgApiSensorStatus.STOPPED, sensor_type=DgApiSensorType.ASSET, description="Asset change sensor", repository_origin="main_location@main_repo", next_tick_timestamp=None, ), DgApiSensor( id="sensor3-id", name="minimal_sensor", status=DgApiSensorStatus.PAUSED, sensor_type=DgApiSensorType.MULTI_ASSET, description=None, repository_origin=None, next_tick_timestamp=None, ), ] return DgApiSensorList(items=sensors, total=len(sensors)) def _create_sample_sensor(self): """Create sample DgApiSensor for testing.""" return DgApiSensor( id="single-sensor-id", name="critical_sensor", status=DgApiSensorStatus.RUNNING, sensor_type=DgApiSensorType.AUTO_MATERIALIZE, description="Critical production sensor", repository_origin="prod_location@prod_repo", next_tick_timestamp=1705311900.0, # 2024-01-15T10:45:00Z ) def test_format_sensors_text_output(self, snapshot): """Test formatting sensors as text.""" from dagster_shared.utils.timing import fixed_timezone sensor_list = self._create_sample_sensor_list() with fixed_timezone("UTC"): result = format_sensors(sensor_list, as_json=False) # Snapshot the entire text output snapshot.assert_match(result) def test_format_sensors_json_output(self, snapshot): """Test formatting sensors as JSON.""" sensor_list = self._create_sample_sensor_list() result = format_sensors(sensor_list, as_json=True) # For JSON, we want to snapshot the parsed structure to avoid formatting differences parsed = json.loads(result) snapshot.assert_match(parsed) def test_format_sensor_text_output(self, snapshot): """Test formatting single sensor as text.""" from dagster_shared.utils.timing import fixed_timezone sensor = self._create_sample_sensor() with fixed_timezone("UTC"): result = format_sensor(sensor, as_json=False) # Snapshot the text output snapshot.assert_match(result) def test_format_sensor_json_output(self, snapshot): """Test formatting single sensor as JSON.""" sensor = self._create_sample_sensor() result = format_sensor(sensor, as_json=True) # For JSON, we want to snapshot the parsed structure to avoid formatting differences parsed = json.loads(result) snapshot.assert_match(parsed) def test_format_minimal_sensor_text_output(self, snapshot): """Test formatting minimal sensor as text.""" from dagster_shared.utils.timing import fixed_timezone sensor = DgApiSensor( id="minimal-id", name="minimal_sensor", status=DgApiSensorStatus.PAUSED, sensor_type=DgApiSensorType.STANDARD, description=None, repository_origin=None, next_tick_timestamp=None, ) with fixed_timezone("UTC"): result = format_sensor(sensor, as_json=False) snapshot.assert_match(result) def test_format_minimal_sensor_json_output(self, snapshot): """Test formatting minimal sensor as JSON.""" sensor = DgApiSensor( id="minimal-id", name="minimal_sensor", status=DgApiSensorStatus.PAUSED, sensor_type=DgApiSensorType.STANDARD, description=None, repository_origin=None, next_tick_timestamp=None, ) result = format_sensor(sensor, as_json=True) parsed = json.loads(result) snapshot.assert_match(parsed)
TestFormatSensors
python
huggingface__transformers
src/transformers/models/dpr/modeling_dpr.py
{ "start": 2749, "end": 3619 }
class ____(ModelOutput): r""" start_logits (`torch.FloatTensor` of shape `(n_passages, sequence_length)`): Logits of the start index of the span for each passage. end_logits (`torch.FloatTensor` of shape `(n_passages, sequence_length)`): Logits of the end index of the span for each passage. relevance_logits (`torch.FloatTensor` of shape `(n_passages, )`): Outputs of the QA classifier of the DPRReader that corresponds to the scores of each passage to answer the question, compared to all the other passages. """ start_logits: torch.FloatTensor end_logits: Optional[torch.FloatTensor] = None relevance_logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @auto_docstring
DPRReaderOutput
python
numpy__numpy
numpy/_core/tests/test_cpu_features.py
{ "start": 15412, "end": 15802 }
class ____(AbstractTest): features = ["RVV"] def load_flags(self): self.load_flags_auxv() if not self.features_flags: # Let the test fail and dump if we cannot read HWCAP. return hwcap = int(next(iter(self.features_flags)), 16) if hwcap & (1 << 21): # HWCAP_RISCV_V self.features_flags.add("RVV")
Test_RISCV_Features
python
huggingface__transformers
src/transformers/modeling_gguf_pytorch_utils.py
{ "start": 6737, "end": 7840 }
class ____(TensorProcessor): def __init__(self, config=None): super().__init__(config=config) def process(self, weights, name, **kwargs): # Original transpose implementation # https://github.com/ggerganov/llama.cpp/blob/a38b884c6c4b0c256583acfaaabdf556c62fabea/convert_hf_to_gguf.py#L2060-L2061 if ( "attn_qkv.weight" in name or "ffn_down.weight" in name or "ffn_up.weight" in name or "attn_output.weight" in name ): weights = weights.T # Handle special case for output.weight if name == "output.weight": # output.weight has conflicts with attn_output.weight in name checking # Store the tensor directly and signal to skip further processing name = "lm_head.weight" parsed_parameters = kwargs.get("parsed_parameters", {}) parsed_parameters["tensors"][name] = torch.from_numpy(np.copy(weights)) name = None # Signal to skip further processing return GGUFTensor(weights, name, {})
GPT2TensorProcessor
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/mysql/mariadbconnector.py
{ "start": 10356, "end": 10607 }
class ____( MariaDBDialect, MySQLDialect_mariadbconnector ): supports_statement_cache = True _allows_uuid_binds = False dialect = MySQLDialect_mariadbconnector mariadb_dialect = MariaDBDialect_mariadbconnector
MariaDBDialect_mariadbconnector
python
pandas-dev__pandas
pandas/errors/__init__.py
{ "start": 20972, "end": 21945 }
class ____(Warning): """ Warning raised when trying to set using chained assignment. When the ``mode.copy_on_write`` option is enabled, chained assignment can never work. In such a situation, we are always setting into a temporary object that is the result of an indexing operation (getitem), which under Copy-on-Write always behaves as a copy. Thus, assigning through a chain can never update the original Series or DataFrame. For more information on Copy-on-Write, see :ref:`the user guide<copy_on_write>`. See Also -------- options.mode.copy_on_write : Global setting for enabling or disabling Copy-on-Write behavior. Examples -------- >>> pd.options.mode.copy_on_write = True >>> df = pd.DataFrame({"A": [1, 1, 1, 2, 2]}, columns=["A"]) >>> df["A"][0:3] = 10 # doctest: +SKIP ... # ChainedAssignmentError: ... >>> pd.options.mode.copy_on_write = False """
ChainedAssignmentError
python
google__jax
jax/_src/lax/slicing.py
{ "start": 10292, "end": 16726 }
class ____(enum.Enum): """ Describes how to handle out-of-bounds indices in a gather or scatter. Possible values are: CLIP: Indices will be clamped to the nearest in-range value, i.e., such that the entire window to be gathered is in-range. FILL_OR_DROP: If any part of a gathered window is out of bounds, the entire window that is returned, even those elements that were otherwise in-bounds, will be filled with a constant. If any part of a scattered window is out of bounds, the entire window will be discarded. PROMISE_IN_BOUNDS: The user promises that indices are in bounds. No additional checking will be performed. In practice, with the current XLA implementation this means that out-of-bounds gathers will be clamped but out-of-bounds scatters will be discarded. Gradients will not be correct if indices are out-of-bounds. """ CLIP = enum.auto() FILL_OR_DROP = enum.auto() PROMISE_IN_BOUNDS = enum.auto() ONE_HOT = enum.auto() @staticmethod def from_any(s: str | GatherScatterMode | None) -> GatherScatterMode: if isinstance(s, GatherScatterMode): return s if s == "clip": return GatherScatterMode.CLIP if s is None or s == "fill" or s == "drop": return GatherScatterMode.FILL_OR_DROP if s == "promise_in_bounds": return GatherScatterMode.PROMISE_IN_BOUNDS if s == "one_hot": return GatherScatterMode.ONE_HOT else: raise ValueError(f'Unknown gather mode "{s}"') def gather(operand: ArrayLike, start_indices: ArrayLike, dimension_numbers: GatherDimensionNumbers, slice_sizes: Shape, *, unique_indices: bool = False, indices_are_sorted: bool = False, mode: str | GatherScatterMode | None = None, fill_value = None) -> Array: """Gather operator. Wraps `XLA's Gather operator <https://www.openxla.org/xla/operation_semantics#gather>`_. :func:`gather` is a low-level operator with complicated semantics, and most JAX users will never need to call it directly. Instead, you should prefer using `Numpy-style indexing`_, and/or :func:`jax.numpy.ndarray.at`, perhaps in combination with :func:`jax.vmap`. Args: operand: an array from which slices should be taken start_indices: the indices at which slices should be taken dimension_numbers: a `lax.GatherDimensionNumbers` object that describes how dimensions of `operand`, `start_indices` and the output relate. slice_sizes: the size of each slice. Must be a sequence of non-negative integers with length equal to `ndim(operand)`. indices_are_sorted: whether `indices` is known to be sorted. If true, may improve performance on some backends. unique_indices: whether the elements gathered from ``operand`` are guaranteed not to overlap with each other. If ``True``, this may improve performance on some backends. JAX does not check this promise: if the elements overlap the behavior is undefined. mode: how to handle indices that are out of bounds: when set to ``'clip'``, indices are clamped so that the slice is within bounds, and when set to ``'fill'`` or ``'drop'`` gather returns a slice full of ``fill_value`` for the affected slice. The behavior for out-of-bounds indices when set to ``'promise_in_bounds'`` is implementation-defined. fill_value: the fill value to return for out-of-bounds slices when `mode` is ``'fill'``. Ignored otherwise. Defaults to ``NaN`` for inexact types, the largest negative value for signed types, the largest positive value for unsigned types, and ``True`` for booleans. Returns: An array containing the gather output. Examples: As mentioned above, you should basically never use :func:`gather` directly, and instead use NumPy-style indexing expressions to gather values from arrays. For example, here is how you can extract values at particular indices using straightforward indexing semantics, which will lower to XLA's Gather operator: >>> import jax.numpy as jnp >>> x = jnp.array([10, 11, 12]) >>> indices = jnp.array([0, 1, 1, 2, 2, 2]) >>> x[indices] Array([10, 11, 11, 12, 12, 12], dtype=int32) For control over settings like ``indices_are_sorted``, ``unique_indices``, ``mode``, and ``fill_value``, you can use the :attr:`jax.numpy.ndarray.at` syntax: >>> x.at[indices].get(indices_are_sorted=True, mode="promise_in_bounds") Array([10, 11, 11, 12, 12, 12], dtype=int32) By comparison, here is the equivalent function call using :func:`gather` directly, which is not something typical users should ever need to do: >>> from jax import lax >>> lax.gather(x, indices[:, None], slice_sizes=(1,), ... dimension_numbers=lax.GatherDimensionNumbers( ... offset_dims=(), ... collapsed_slice_dims=(0,), ... start_index_map=(0,)), ... indices_are_sorted=True, ... mode=lax.GatherScatterMode.PROMISE_IN_BOUNDS) Array([10, 11, 11, 12, 12, 12], dtype=int32) .. _Numpy-style indexing: https://numpy.org/doc/stable/reference/arrays.indexing.html """ if mode is None: mode = GatherScatterMode.PROMISE_IN_BOUNDS parsed_mode = GatherScatterMode.from_any(mode) if parsed_mode == GatherScatterMode.FILL_OR_DROP: if fill_value is None: dtype = _dtype(operand) if dtypes.issubdtype(dtype, np.inexact): fill_value = np.nan elif dtypes.issubdtype(dtype, np.signedinteger): fill_value = dtypes.iinfo(dtype).min elif dtypes.issubdtype(dtype, np.unsignedinteger): fill_value = dtypes.iinfo(dtype).max elif dtype == dtypes.bool_: fill_value = True elif dtypes.issubdtype(dtype, dtypes.prng_key): fill_value = np.iinfo('uint32').max else: raise ValueError(f"Unsupported dtype for gather fill_value {dtype}") else: fill_value = None operand, start_indices = core.standard_insert_pvary(operand, start_indices) return gather_p.bind( operand, start_indices, dimension_numbers=dimension_numbers, slice_sizes=core.canonicalize_shape(slice_sizes), unique_indices=bool(unique_indices), indices_are_sorted=bool(indices_are_sorted), mode=parsed_mode, fill_value=fill_value)
GatherScatterMode
python
python-excel__xlwt
xlwt/antlr.py
{ "start": 17225, "end": 17477 }
class ____(Exception): pass ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### Token ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
TryAgain
python
gevent__gevent
src/greentest/3.10/test_wsgiref.py
{ "start": 3353, "end": 10385 }
class ____(TestCase): def check_hello(self, out, has_length=True): pyver = (python_implementation() + "/" + sys.version.split()[0]) self.assertEqual(out, ("HTTP/1.0 200 OK\r\n" "Server: WSGIServer/0.2 " + pyver +"\r\n" "Content-Type: text/plain\r\n" "Date: Mon, 05 Jun 2006 18:49:54 GMT\r\n" + (has_length and "Content-Length: 13\r\n" or "") + "\r\n" "Hello, world!").encode("iso-8859-1") ) def test_plain_hello(self): out, err = run_amock() self.check_hello(out) def test_environ(self): request = ( b"GET /p%61th/?query=test HTTP/1.0\n" b"X-Test-Header: Python test \n" b"X-Test-Header: Python test 2\n" b"Content-Length: 0\n\n" ) out, err = run_amock(header_app, request) self.assertEqual( out.splitlines()[-1], b"Python test,Python test 2;query=test;/path/" ) def test_request_length(self): out, err = run_amock(data=b"GET " + (b"x" * 65537) + b" HTTP/1.0\n\n") self.assertEqual(out.splitlines()[0], b"HTTP/1.0 414 Request-URI Too Long") def test_validated_hello(self): out, err = run_amock(validator(hello_app)) # the middleware doesn't support len(), so content-length isn't there self.check_hello(out, has_length=False) def test_simple_validation_error(self): def bad_app(environ,start_response): start_response("200 OK", ('Content-Type','text/plain')) return ["Hello, world!"] out, err = run_amock(validator(bad_app)) self.assertTrue(out.endswith( b"A server error occurred. Please contact the administrator." )) self.assertEqual( err.splitlines()[-2], "AssertionError: Headers (('Content-Type', 'text/plain')) must" " be of type list: <class 'tuple'>" ) def test_status_validation_errors(self): def create_bad_app(status): def bad_app(environ, start_response): start_response(status, [("Content-Type", "text/plain; charset=utf-8")]) return [b"Hello, world!"] return bad_app tests = [ ('200', 'AssertionError: Status must be at least 4 characters'), ('20X OK', 'AssertionError: Status message must begin w/3-digit code'), ('200OK', 'AssertionError: Status message must have a space after code'), ] for status, exc_message in tests: with self.subTest(status=status): out, err = run_amock(create_bad_app(status)) self.assertTrue(out.endswith( b"A server error occurred. Please contact the administrator." )) self.assertEqual(err.splitlines()[-2], exc_message) def test_wsgi_input(self): def bad_app(e,s): e["wsgi.input"].read() s("200 OK", [("Content-Type", "text/plain; charset=utf-8")]) return [b"data"] out, err = run_amock(validator(bad_app)) self.assertTrue(out.endswith( b"A server error occurred. Please contact the administrator." )) self.assertEqual( err.splitlines()[-2], "AssertionError" ) def test_bytes_validation(self): def app(e, s): s("200 OK", [ ("Content-Type", "text/plain; charset=utf-8"), ("Date", "Wed, 24 Dec 2008 13:29:32 GMT"), ]) return [b"data"] out, err = run_amock(validator(app)) self.assertTrue(err.endswith('"GET / HTTP/1.0" 200 4\n')) ver = sys.version.split()[0].encode('ascii') py = python_implementation().encode('ascii') pyver = py + b"/" + ver self.assertEqual( b"HTTP/1.0 200 OK\r\n" b"Server: WSGIServer/0.2 "+ pyver + b"\r\n" b"Content-Type: text/plain; charset=utf-8\r\n" b"Date: Wed, 24 Dec 2008 13:29:32 GMT\r\n" b"\r\n" b"data", out) def test_cp1252_url(self): def app(e, s): s("200 OK", [ ("Content-Type", "text/plain"), ("Date", "Wed, 24 Dec 2008 13:29:32 GMT"), ]) # PEP3333 says environ variables are decoded as latin1. # Encode as latin1 to get original bytes return [e["PATH_INFO"].encode("latin1")] out, err = run_amock( validator(app), data=b"GET /\x80%80 HTTP/1.0") self.assertEqual( [ b"HTTP/1.0 200 OK", mock.ANY, b"Content-Type: text/plain", b"Date: Wed, 24 Dec 2008 13:29:32 GMT", b"", b"/\x80\x80", ], out.splitlines()) def test_interrupted_write(self): # BaseHandler._write() and _flush() have to write all data, even if # it takes multiple send() calls. Test this by interrupting a send() # call with a Unix signal. pthread_kill = support.get_attribute(signal, "pthread_kill") def app(environ, start_response): start_response("200 OK", []) return [b'\0' * support.SOCK_MAX_SIZE] class WsgiHandler(NoLogRequestHandler, WSGIRequestHandler): pass server = make_server(socket_helper.HOST, 0, app, handler_class=WsgiHandler) self.addCleanup(server.server_close) interrupted = threading.Event() def signal_handler(signum, frame): interrupted.set() original = signal.signal(signal.SIGUSR1, signal_handler) self.addCleanup(signal.signal, signal.SIGUSR1, original) received = None main_thread = threading.get_ident() def run_client(): http = HTTPConnection(*server.server_address) http.request("GET", "/") with http.getresponse() as response: response.read(100) # The main thread should now be blocking in a send() system # call. But in theory, it could get interrupted by other # signals, and then retried. So keep sending the signal in a # loop, in case an earlier signal happens to be delivered at # an inconvenient moment. while True: pthread_kill(main_thread, signal.SIGUSR1) if interrupted.wait(timeout=float(1)): break nonlocal received received = len(response.read()) http.close() background = threading.Thread(target=run_client) background.start() server.handle_request() background.join() self.assertEqual(received, support.SOCK_MAX_SIZE - 100)
IntegrationTests
python
scikit-learn__scikit-learn
doc/sphinxext/sphinx_issues.py
{ "start": 2891, "end": 8117 }
class ____(object): EXTERNAL_REPO_REGEX = re.compile(r"^(\w+)/(.+)([#@])([\w]+)$") def __init__( self, uri_config_option, format_kwarg, github_uri_template, format_text=None ): self.uri_config_option = uri_config_option self.format_kwarg = format_kwarg self.github_uri_template = github_uri_template self.format_text = format_text or self.default_format_text @staticmethod def default_format_text(issue_no): return "#{0}".format(issue_no) def make_node(self, name, issue_no, config, options=None): name_map = {"pr": "pull", "issue": "issues", "commit": "commit"} options = options or {} repo_match = self.EXTERNAL_REPO_REGEX.match(issue_no) if repo_match: # External repo username, repo, symbol, issue = repo_match.groups() if name not in name_map: raise ValueError( "External repo linking not supported for :{}:".format(name) ) path = name_map.get(name) ref = "https://github.com/{issues_github_path}/{path}/{n}".format( issues_github_path="{}/{}".format(username, repo), path=path, n=issue ) formatted_issue = self.format_text(issue).lstrip("#") text = "{username}/{repo}{symbol}{formatted_issue}".format(**locals()) link = nodes.reference(text=text, refuri=ref, **options) return link if issue_no not in ("-", "0"): uri_template = getattr(config, self.uri_config_option, None) if uri_template: ref = uri_template.format(**{self.format_kwarg: issue_no}) elif config.issues_github_path: ref = self.github_uri_template.format( issues_github_path=config.issues_github_path, n=issue_no ) else: raise ValueError( "Neither {} nor issues_github_path is set".format( self.uri_config_option ) ) issue_text = self.format_text(issue_no) link = nodes.reference(text=issue_text, refuri=ref, **options) else: link = None return link def __call__( self, name, rawtext, text, lineno, inliner, options=None, content=None ): options = options or {} content = content or [] issue_nos = [each.strip() for each in utils.unescape(text).split(",")] config = inliner.document.settings.env.app.config ret = [] for i, issue_no in enumerate(issue_nos): node = self.make_node(name, issue_no, config, options=options) ret.append(node) if i != len(issue_nos) - 1: sep = nodes.raw(text=", ", format="html") ret.append(sep) return ret, [] """Sphinx role for linking to an issue. Must have `issues_uri` or `issues_github_path` configured in ``conf.py``. Examples: :: :issue:`123` :issue:`42,45` :issue:`sloria/konch#123` """ issue_role = IssueRole( uri_config_option="issues_uri", format_kwarg="issue", github_uri_template="https://github.com/{issues_github_path}/issues/{n}", ) """Sphinx role for linking to a pull request. Must have `issues_pr_uri` or `issues_github_path` configured in ``conf.py``. Examples: :: :pr:`123` :pr:`42,45` :pr:`sloria/konch#43` """ pr_role = IssueRole( uri_config_option="issues_pr_uri", format_kwarg="pr", github_uri_template="https://github.com/{issues_github_path}/pull/{n}", ) def format_commit_text(sha): return sha[:7] """Sphinx role for linking to a commit. Must have `issues_pr_uri` or `issues_github_path` configured in ``conf.py``. Examples: :: :commit:`123abc456def` :commit:`sloria/konch@123abc456def` """ commit_role = IssueRole( uri_config_option="issues_commit_uri", format_kwarg="commit", github_uri_template="https://github.com/{issues_github_path}/commit/{n}", format_text=format_commit_text, ) def setup(app): # Format template for issues URI # e.g. 'https://github.com/sloria/marshmallow/issues/{issue} app.add_config_value("issues_uri", default=None, rebuild="html") # Format template for PR URI # e.g. 'https://github.com/sloria/marshmallow/pull/{issue} app.add_config_value("issues_pr_uri", default=None, rebuild="html") # Format template for commit URI # e.g. 'https://github.com/sloria/marshmallow/commits/{commit} app.add_config_value("issues_commit_uri", default=None, rebuild="html") # Shortcut for Github, e.g. 'sloria/marshmallow' app.add_config_value("issues_github_path", default=None, rebuild="html") # Format template for user profile URI # e.g. 'https://github.com/{user}' app.add_config_value("issues_user_uri", default=None, rebuild="html") app.add_role("issue", issue_role) app.add_role("pr", pr_role) app.add_role("user", user_role) app.add_role("commit", commit_role) app.add_role("cve", cve_role) return { "version": __version__, "parallel_read_safe": True, "parallel_write_safe": True, }
IssueRole
python
great-expectations__great_expectations
great_expectations/data_context/types/base.py
{ "start": 2150, "end": 5855 }
class ____(SerializableDictDot): _config_schema_class: ClassVar[Optional[Type[Schema]]] = None exclude_field_names: ClassVar[Set[str]] = { "commented_map", } def __init__(self, commented_map: Optional[CommentedMap] = None) -> None: if commented_map is None: commented_map = CommentedMap() self._commented_map = commented_map @classmethod def _get_schema_instance(cls: Type[BYC]) -> Schema: if not issubclass(cls.get_schema_class(), Schema): raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP "Invalid type: A configuration schema class needs to inherit from the Marshmallow Schema class." # noqa: E501 # FIXME CoP ) if not issubclass(cls.get_config_class(), BaseYamlConfig): raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP "Invalid type: A configuration class needs to inherit from the BaseYamlConfig class." # noqa: E501 # FIXME CoP ) if hasattr(cls.get_config_class(), "_schema_instance"): # noinspection PyProtectedMember schema_instance: Optional[Schema] = cls.get_config_class()._schema_instance if schema_instance is None: cls.get_config_class()._schema_instance = (cls.get_schema_class())() return cls.get_config_class().schema_instance else: return schema_instance else: cls.get_config_class().schema_instance = (cls.get_schema_class())() return cls.get_config_class().schema_instance @classmethod def from_commented_map(cls: Type[BYC], commented_map: Union[CommentedMap, Dict]) -> BYC: try: schema_instance: Schema = cls._get_schema_instance() config: Union[dict, BYC] = schema_instance.load(commented_map) if isinstance(config, dict): return cls.get_config_class()(commented_map=commented_map, **config) return config except ValidationError: logger.error( # noqa: TRY400 # FIXME CoP "Encountered errors during loading config. See ValidationError for more details." ) raise def _get_schema_validated_updated_commented_map(self) -> CommentedMap: commented_map: CommentedMap = copy.deepcopy(self._commented_map) schema_validated_map: dict = self._get_schema_instance().dump(self) commented_map.update(schema_validated_map) return commented_map def to_yaml(self, outfile: Union[str, pathlib.Path, TextIOWrapper]) -> None: """ :returns None (but writes a YAML file containing the project configuration) """ yaml.dump(self.commented_map, outfile) def to_yaml_str(self) -> str: """ :returns a YAML string containing the project configuration """ return object_to_yaml_str(obj=self.commented_map) @override def to_json_dict(self) -> dict[str, JSONValues]: """Returns a JSON-serializable dict containing this DataContextConfig. Returns: A JSON-serializable dict representation of this project configuration. """ commented_map: CommentedMap = self.commented_map return convert_to_json_serializable(data=commented_map) @property def commented_map(self) -> CommentedMap: return self._get_schema_validated_updated_commented_map() @classmethod def get_config_class(cls: Type) -> Type: raise NotImplementedError @classmethod def get_schema_class(cls) -> Type[Schema]: raise NotImplementedError
BaseYamlConfig
python
facebook__pyre-check
client/json_rpc.py
{ "start": 884, "end": 1059 }
class ____(JSONRPCException): """ An error occurred on the server while parsing the JSON text. """ def error_code(self) -> int: return -32700
ParseError
python
allegroai__clearml
clearml/backend_api/services/v2_20/events.py
{ "start": 144548, "end": 149870 }
class ____(Request): """ Get plot events for the requested amount of iterations per each task :param metrics: List of metrics and variants :type metrics: Sequence[TaskMetricVariants] :param iters: Max number of latest iterations for which to return debug images :type iters: int :param navigate_earlier: If set then events are retreived from latest iterations to earliest ones. Otherwise from earliest iterations to the latest. The default is True :type navigate_earlier: bool :param refresh: If set then scroll will be moved to the latest iterations. The default is False :type refresh: bool :param scroll_id: Scroll ID of previous call (used for getting more results) :type scroll_id: str """ _service = "events" _action = "plots" _version = "2.20" _schema = { "definitions": { "task_metric_variants": { "properties": { "metric": {"description": "Metric name", "type": "string"}, "task": {"description": "Task ID", "type": "string"}, "variants": { "description": "Metric variant names", "items": {"type": "string"}, "type": "array", }, }, "required": ["task"], "type": "object", } }, "properties": { "iters": { "description": "Max number of latest iterations for which to return debug images", "type": "integer", }, "metrics": { "description": "List of metrics and variants", "items": {"$ref": "#/definitions/task_metric_variants"}, "type": "array", }, "navigate_earlier": { "description": "If set then events are retreived from latest iterations to earliest ones. Otherwise from earliest iterations to the latest. The default is True", "type": "boolean", }, "refresh": { "description": "If set then scroll will be moved to the latest iterations. The default is False", "type": "boolean", }, "scroll_id": { "description": "Scroll ID of previous call (used for getting more results)", "type": "string", }, }, "required": ["metrics"], "type": "object", } def __init__( self, metrics: List[Any], iters: Optional[int] = None, navigate_earlier: Optional[bool] = None, refresh: Optional[bool] = None, scroll_id: Optional[str] = None, **kwargs: Any ) -> None: super(PlotsRequest, self).__init__(**kwargs) self.metrics = metrics self.iters = iters self.navigate_earlier = navigate_earlier self.refresh = refresh self.scroll_id = scroll_id @schema_property("metrics") def metrics(self) -> List[Any]: return self._property_metrics @metrics.setter def metrics(self, value: List[Any]) -> None: if value is None: self._property_metrics = None return self.assert_isinstance(value, "metrics", (list, tuple)) if any((isinstance(v, dict) for v in value)): value = [TaskMetricVariants.from_dict(v) if isinstance(v, dict) else v for v in value] else: self.assert_isinstance(value, "metrics", TaskMetricVariants, is_array=True) self._property_metrics = value @schema_property("iters") def iters(self) -> Optional[int]: return self._property_iters @iters.setter def iters(self, value: Optional[int]) -> None: if value is None: self._property_iters = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "iters", six.integer_types) self._property_iters = value @schema_property("navigate_earlier") def navigate_earlier(self) -> Optional[bool]: return self._property_navigate_earlier @navigate_earlier.setter def navigate_earlier(self, value: Optional[bool]) -> None: if value is None: self._property_navigate_earlier = None return self.assert_isinstance(value, "navigate_earlier", (bool,)) self._property_navigate_earlier = value @schema_property("refresh") def refresh(self) -> Optional[bool]: return self._property_refresh @refresh.setter def refresh(self, value: Optional[bool]) -> None: if value is None: self._property_refresh = None return self.assert_isinstance(value, "refresh", (bool,)) self._property_refresh = value @schema_property("scroll_id") def scroll_id(self) -> Optional[str]: return self._property_scroll_id @scroll_id.setter def scroll_id(self, value: Optional[str]) -> None: if value is None: self._property_scroll_id = None return self.assert_isinstance(value, "scroll_id", six.string_types) self._property_scroll_id = value
PlotsRequest
python
apache__airflow
airflow-core/tests/unit/utils/test_helpers.py
{ "start": 1614, "end": 9240 }
class ____: @pytest.mark.db_test @pytest.mark.usefixtures("clear_db") def test_render_log_filename(self, create_task_instance): try_number = 1 dag_id = "test_render_log_filename_dag" task_id = "test_render_log_filename_task" logical_date = timezone.datetime(2016, 1, 1) ti = create_task_instance(dag_id=dag_id, task_id=task_id, logical_date=logical_date) filename_template = "{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log" ts = ti.get_template_context()["ts"] expected_filename = f"{dag_id}/{task_id}/{ts}/{try_number}.log" rendered_filename = helpers.render_log_filename(ti, try_number, filename_template) assert rendered_filename == expected_filename def test_chunks(self): with pytest.raises(ValueError, match=CHUNK_SIZE_POSITIVE_INT): list(helpers.chunks([1, 2, 3], 0)) with pytest.raises(ValueError, match=CHUNK_SIZE_POSITIVE_INT): list(helpers.chunks([1, 2, 3], -3)) assert list(helpers.chunks([], 5)) == [] assert list(helpers.chunks([1], 1)) == [[1]] assert list(helpers.chunks([1, 2, 3], 2)) == [[1, 2], [3]] def test_is_container(self): assert not helpers.is_container("a string is not a container") assert helpers.is_container(["a", "list", "is", "a", "container"]) assert helpers.is_container(["test_list"]) assert not helpers.is_container("test_str_not_iterable") # Pass an object that is not iter nor a string. assert not helpers.is_container(10) def test_convert_camel_to_snake(self): assert helpers.convert_camel_to_snake("LocalTaskJob") == "local_task_job" assert helpers.convert_camel_to_snake("somethingVeryRandom") == "something_very_random" def test_merge_dicts(self): """ Test _merge method from JSONFormatter """ dict1 = {"a": 1, "b": 2, "c": 3} dict2 = {"a": 1, "b": 3, "d": 42} merged = merge_dicts(dict1, dict2) assert merged == {"a": 1, "b": 3, "c": 3, "d": 42} def test_merge_dicts_recursive_overlap_l1(self): """ Test merge_dicts with recursive dict; one level of nesting """ dict1 = {"a": 1, "r": {"a": 1, "b": 2}} dict2 = {"a": 1, "r": {"c": 3, "b": 0}} merged = merge_dicts(dict1, dict2) assert merged == {"a": 1, "r": {"a": 1, "b": 0, "c": 3}} def test_merge_dicts_recursive_overlap_l2(self): """ Test merge_dicts with recursive dict; two levels of nesting """ dict1 = {"a": 1, "r": {"a": 1, "b": {"a": 1}}} dict2 = {"a": 1, "r": {"c": 3, "b": {"b": 1}}} merged = merge_dicts(dict1, dict2) assert merged == {"a": 1, "r": {"a": 1, "b": {"a": 1, "b": 1}, "c": 3}} def test_merge_dicts_recursive_right_only(self): """ Test merge_dicts with recursive when dict1 doesn't have any nested dict """ dict1 = {"a": 1} dict2 = {"a": 1, "r": {"c": 3, "b": 0}} merged = merge_dicts(dict1, dict2) assert merged == {"a": 1, "r": {"b": 0, "c": 3}} def test_build_airflow_dagrun_url(self): expected_url = "/dags/somedag/runs/abc123" assert build_airflow_dagrun_url(dag_id="somedag", run_id="abc123") == expected_url @pytest.mark.parametrize( ("key_id", "message", "exception"), [ (3, "The key has to be a string and is <class 'int'>:3", TypeError), (None, "The key has to be a string and is <class 'NoneType'>:None", TypeError), ("simple_key", None, None), ("simple-key", None, None), ("group.simple_key", None, None), ("root.group.simple-key", None, None), ( "key with space", "The key 'key with space' has to be made of alphanumeric " "characters, dashes, dots and underscores exclusively", AirflowException, ), ( "key_with_!", "The key 'key_with_!' has to be made of alphanumeric " "characters, dashes, dots and underscores exclusively", AirflowException, ), (" " * 251, f"The key: {' ' * 251} has to be less than 250 characters", AirflowException), ], ) def test_validate_key(self, key_id, message, exception): if message: with pytest.raises(exception, match=re.escape(message)): validate_key(key_id) else: validate_key(key_id) def test_exactly_one(self): """ Checks that when we set ``true_count`` elements to "truthy", and others to "falsy", we get the expected return. We check for both True / False, and truthy / falsy values 'a' and '', and verify that they can safely be used in any combination. """ def assert_exactly_one(true=0, truthy=0, false=0, falsy=0): sample = [] for truth_value, num in [(True, true), (False, false), ("a", truthy), ("", falsy)]: if num: sample.extend([truth_value] * num) if sample: expected = true + truthy == 1 assert exactly_one(*sample) is expected for row in itertools.product(range(4), repeat=4): assert_exactly_one(*row) def test_exactly_one_should_fail(self): with pytest.raises(ValueError, match="Not supported for iterable args"): exactly_one([True, False]) def test_at_most_one(self): """ Checks that when we set ``true_count`` elements to "truthy", and others to "falsy", we get the expected return. We check for both True / False, and truthy / falsy values 'a' and '', and verify that they can safely be used in any combination. NOTSET values should be ignored. """ def assert_at_most_one(true=0, truthy=0, false=0, falsy=0, notset=0): sample = [] for truth_value, num in [ (True, true), (False, false), ("a", truthy), ("", falsy), (NOTSET, notset), ]: if num: sample.extend([truth_value] * num) if sample: expected = true + truthy in (0, 1) assert at_most_one(*sample) is expected for row in itertools.product(range(4), repeat=4): print(row) assert_at_most_one(*row) @pytest.mark.parametrize( ("mode", "expected"), [ ( "strict", { "b": "", "c": {"b": "", "c": "hi", "d": ["", 0, "1"]}, "d": ["", 0, "1"], "e": ["", 0, {"b": "", "c": "hi", "d": ["", 0, "1"]}, ["", 0, "1"], [""]], "f": {}, "g": [""], }, ), ( "truthy", { "c": {"c": "hi", "d": ["1"]}, "d": ["1"], "e": [{"c": "hi", "d": ["1"]}, ["1"]], }, ), ], ) def test_prune_dict(self, mode, expected): l1 = ["", 0, "1", None] d1 = {"a": None, "b": "", "c": "hi", "d": l1} d2 = {"a": None, "b": "", "c": d1, "d": l1, "e": [None, "", 0, d1, l1, [""]], "f": {}, "g": [""]} assert prune_dict(d2, mode=mode) == expected
TestHelpers
python
ray-project__ray
python/ray/train/v2/_internal/execution/controller/state.py
{ "start": 5546, "end": 5682 }
class ____(TrainControllerState): def __init__(self): super().__init__(state_type=TrainControllerStateType.ABORTED)
AbortedState
python
ray-project__ray
python/ray/train/xgboost/config.py
{ "start": 1859, "end": 4334 }
class ____(Backend): def __init__(self): self._tracker: Optional[RabitTracker] = None self._wait_thread: Optional[threading.Thread] = None def _setup_xgboost_distributed_backend(self, worker_group: BaseWorkerGroup): # Set up the rabit tracker on the Train driver. num_workers = len(worker_group) rabit_args = {"n_workers": num_workers} train_driver_ip = ray.util.get_node_ip_address() # NOTE: sortby="task" is needed to ensure that the xgboost worker ranks # align with Ray Train worker ranks. # The worker ranks will be sorted by `dmlc_task_id`, # which is defined below. self._tracker = RabitTracker( n_workers=num_workers, host_ip=train_driver_ip, sortby="task" ) self._tracker.start() # The RabitTracker is started in a separate thread, and the # `wait_for` method must be called for `worker_args` to return. self._wait_thread = threading.Thread(target=self._tracker.wait_for, daemon=True) self._wait_thread.start() rabit_args.update(self._tracker.worker_args()) start_log = ( "RabitTracker coordinator started with parameters:\n" f"{json.dumps(rabit_args, indent=2)}" ) logger.debug(start_log) def set_xgboost_communicator_args(args): import ray.train args["dmlc_task_id"] = ( f"[xgboost.ray-rank={ray.train.get_context().get_world_rank():08}]:" f"{ray.get_runtime_context().get_actor_id()}" ) _set_xgboost_args(args) worker_group.execute(set_xgboost_communicator_args, rabit_args) def on_training_start( self, worker_group: BaseWorkerGroup, backend_config: XGBoostConfig ): assert backend_config.xgboost_communicator == "rabit" self._setup_xgboost_distributed_backend(worker_group) def on_shutdown(self, worker_group: BaseWorkerGroup, backend_config: XGBoostConfig): timeout = 5 if self._wait_thread is not None: self._wait_thread.join(timeout=timeout) if self._wait_thread.is_alive(): logger.warning( "During shutdown, the RabitTracker thread failed to join " f"within {timeout} seconds. " "The process will still be terminated as part of Ray actor cleanup." )
_XGBoostRabitBackend
python
doocs__leetcode
solution/2300-2399/2389.Longest Subsequence With Limited Sum/Solution2.py
{ "start": 0, "end": 416 }
class ____: def answerQueries(self, nums: List[int], queries: List[int]) -> List[int]: nums.sort() m = len(queries) ans = [0] * m idx = sorted(range(m), key=lambda i: queries[i]) s = j = 0 for i in idx: while j < len(nums) and s + nums[j] <= queries[i]: s += nums[j] j += 1 ans[i] = j return ans
Solution
python
pyinstaller__pyinstaller
bootloader/waflib/ConfigSet.py
{ "start": 232, "end": 5179 }
class ____(object): __slots__ = ('table', 'parent') def __init__(self, filename=None): self.table = {} if filename: self.load(filename) def __contains__(self, key): if key in self.table: return True try: return self.parent.__contains__(key) except AttributeError: return False def keys(self): keys = set() cur = self while cur: keys.update(cur.table.keys()) cur = getattr(cur, 'parent', None) keys = list(keys) keys.sort() return keys def __iter__(self): return iter(self.keys()) def __str__(self): return "\n".join(["%r %r" % (x, self.__getitem__(x)) for x in self.keys()]) def __getitem__(self, key): try: while 1: x = self.table.get(key) if not x is None: return x self = self.parent except AttributeError: return [] def __setitem__(self, key, value): self.table[key] = value def __delitem__(self, key): self[key] = [] def __getattr__(self, name): if name in self.__slots__: return object.__getattribute__(self, name) else: return self[name] def __setattr__(self, name, value): if name in self.__slots__: object.__setattr__(self, name, value) else: self[name] = value def __delattr__(self, name): if name in self.__slots__: object.__delattr__(self, name) else: del self[name] def derive(self): newenv = ConfigSet() newenv.parent = self return newenv def detach(self): tbl = self.get_merged_dict() try: delattr(self, 'parent') except AttributeError: pass else: keys = tbl.keys() for x in keys: tbl[x] = copy.deepcopy(tbl[x]) self.table = tbl return self def get_flat(self, key): s = self[key] if isinstance(s, str): return s return ' '.join(s) def _get_list_value_for_modification(self, key): try: value = self.table[key] except KeyError: try: value = self.parent[key] except AttributeError: value = [] else: if isinstance(value, list): value = value[:] else: value = [value] self.table[key] = value else: if not isinstance(value, list): self.table[key] = value = [value] return value def append_value(self, var, val): if isinstance(val, str): val = [val] current_value = self._get_list_value_for_modification(var) current_value.extend(val) def prepend_value(self, var, val): if isinstance(val, str): val = [val] self.table[var] = val + self._get_list_value_for_modification(var) def append_unique(self, var, val): if isinstance(val, str): val = [val] current_value = self._get_list_value_for_modification(var) for x in val: if x not in current_value: current_value.append(x) def get_merged_dict(self): table_list = [] env = self while 1: table_list.insert(0, env.table) try: env = env.parent except AttributeError: break merged_table = {} for table in table_list: merged_table.update(table) return merged_table def store(self, filename): try: os.makedirs(os.path.split(filename)[0]) except OSError: pass buf = [] merged_table = self.get_merged_dict() keys = list(merged_table.keys()) keys.sort() try: fun = ascii except NameError: fun = repr for k in keys: if k != 'undo_stack': buf.append('%s = %s\n' % (k, fun(merged_table[k]))) Utils.writef(filename, ''.join(buf)) def load(self, filename): tbl = self.table code = Utils.readf(filename, m='r') for m in re_imp.finditer(code): g = m.group tbl[g(2)] = eval(g(3)) Logs.debug('env: %s', self.table) def update(self, d): self.table.update(d) def stash(self): orig = self.table tbl = self.table = self.table.copy() for x in tbl.keys(): tbl[x] = copy.deepcopy(tbl[x]) self.undo_stack = self.undo_stack + [orig] def commit(self): self.undo_stack.pop(-1) def revert(self): self.table = self.undo_stack.pop(-1)
ConfigSet
python
openai__openai-python
src/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py
{ "start": 268, "end": 412 }
class ____(TypedDict, total=False): enabled: bool """Enable automatic thread title generation. Defaults to true."""
AutomaticThreadTitling
python
numpy__numpy
numpy/_core/tests/test_shape_base.py
{ "start": 21354, "end": 31867 }
class ____: @pytest.fixture(params=['block', 'force_concatenate', 'force_slicing']) def block(self, request): # blocking small arrays and large arrays go through different paths. # the algorithm is triggered depending on the number of element # copies required. # We define a test fixture that forces most tests to go through # both code paths. # Ultimately, this should be removed if a single algorithm is found # to be faster for both small and large arrays. def _block_force_concatenate(arrays): arrays, list_ndim, result_ndim, _ = _block_setup(arrays) return _block_concatenate(arrays, list_ndim, result_ndim) def _block_force_slicing(arrays): arrays, list_ndim, result_ndim, _ = _block_setup(arrays) return _block_slicing(arrays, list_ndim, result_ndim) if request.param == 'force_concatenate': return _block_force_concatenate elif request.param == 'force_slicing': return _block_force_slicing elif request.param == 'block': return block else: raise ValueError('Unknown blocking request. There is a typo in the tests.') def test_returns_copy(self, block): a = np.eye(3) b = block(a) b[0, 0] = 2 assert b[0, 0] != a[0, 0] def test_block_total_size_estimate(self, block): _, _, _, total_size = _block_setup([1]) assert total_size == 1 _, _, _, total_size = _block_setup([[1]]) assert total_size == 1 _, _, _, total_size = _block_setup([[1, 1]]) assert total_size == 2 _, _, _, total_size = _block_setup([[1], [1]]) assert total_size == 2 _, _, _, total_size = _block_setup([[1, 2], [3, 4]]) assert total_size == 4 def test_block_simple_row_wise(self, block): a_2d = np.ones((2, 2)) b_2d = 2 * a_2d desired = np.array([[1, 1, 2, 2], [1, 1, 2, 2]]) result = block([a_2d, b_2d]) assert_equal(desired, result) def test_block_simple_column_wise(self, block): a_2d = np.ones((2, 2)) b_2d = 2 * a_2d expected = np.array([[1, 1], [1, 1], [2, 2], [2, 2]]) result = block([[a_2d], [b_2d]]) assert_equal(expected, result) def test_block_with_1d_arrays_row_wise(self, block): # # # 1-D vectors are treated as row arrays a = np.array([1, 2, 3]) b = np.array([2, 3, 4]) expected = np.array([1, 2, 3, 2, 3, 4]) result = block([a, b]) assert_equal(expected, result) def test_block_with_1d_arrays_multiple_rows(self, block): a = np.array([1, 2, 3]) b = np.array([2, 3, 4]) expected = np.array([[1, 2, 3, 2, 3, 4], [1, 2, 3, 2, 3, 4]]) result = block([[a, b], [a, b]]) assert_equal(expected, result) def test_block_with_1d_arrays_column_wise(self, block): # # # 1-D vectors are treated as row arrays a_1d = np.array([1, 2, 3]) b_1d = np.array([2, 3, 4]) expected = np.array([[1, 2, 3], [2, 3, 4]]) result = block([[a_1d], [b_1d]]) assert_equal(expected, result) def test_block_mixed_1d_and_2d(self, block): a_2d = np.ones((2, 2)) b_1d = np.array([2, 2]) result = block([[a_2d], [b_1d]]) expected = np.array([[1, 1], [1, 1], [2, 2]]) assert_equal(expected, result) def test_block_complicated(self, block): # a bit more complicated one_2d = np.array([[1, 1, 1]]) two_2d = np.array([[2, 2, 2]]) three_2d = np.array([[3, 3, 3, 3, 3, 3]]) four_1d = np.array([4, 4, 4, 4, 4, 4]) five_0d = np.array(5) six_1d = np.array([6, 6, 6, 6, 6]) zero_2d = np.zeros((2, 6)) expected = np.array([[1, 1, 1, 2, 2, 2], [3, 3, 3, 3, 3, 3], [4, 4, 4, 4, 4, 4], [5, 6, 6, 6, 6, 6], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) result = block([[one_2d, two_2d], [three_2d], [four_1d], [five_0d, six_1d], [zero_2d]]) assert_equal(result, expected) def test_nested(self, block): one = np.array([1, 1, 1]) two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]) three = np.array([3, 3, 3]) four = np.array([4, 4, 4]) five = np.array(5) six = np.array([6, 6, 6, 6, 6]) zero = np.zeros((2, 6)) result = block([ [ block([ [one], [three], [four] ]), two ], [five, six], [zero] ]) expected = np.array([[1, 1, 1, 2, 2, 2], [3, 3, 3, 2, 2, 2], [4, 4, 4, 2, 2, 2], [5, 6, 6, 6, 6, 6], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) assert_equal(result, expected) def test_3d(self, block): a000 = np.ones((2, 2, 2), int) * 1 a100 = np.ones((3, 2, 2), int) * 2 a010 = np.ones((2, 3, 2), int) * 3 a001 = np.ones((2, 2, 3), int) * 4 a011 = np.ones((2, 3, 3), int) * 5 a101 = np.ones((3, 2, 3), int) * 6 a110 = np.ones((3, 3, 2), int) * 7 a111 = np.ones((3, 3, 3), int) * 8 result = block([ [ [a000, a001], [a010, a011], ], [ [a100, a101], [a110, a111], ] ]) expected = array([[[1, 1, 4, 4, 4], [1, 1, 4, 4, 4], [3, 3, 5, 5, 5], [3, 3, 5, 5, 5], [3, 3, 5, 5, 5]], [[1, 1, 4, 4, 4], [1, 1, 4, 4, 4], [3, 3, 5, 5, 5], [3, 3, 5, 5, 5], [3, 3, 5, 5, 5]], [[2, 2, 6, 6, 6], [2, 2, 6, 6, 6], [7, 7, 8, 8, 8], [7, 7, 8, 8, 8], [7, 7, 8, 8, 8]], [[2, 2, 6, 6, 6], [2, 2, 6, 6, 6], [7, 7, 8, 8, 8], [7, 7, 8, 8, 8], [7, 7, 8, 8, 8]], [[2, 2, 6, 6, 6], [2, 2, 6, 6, 6], [7, 7, 8, 8, 8], [7, 7, 8, 8, 8], [7, 7, 8, 8, 8]]]) assert_array_equal(result, expected) def test_block_with_mismatched_shape(self, block): a = np.array([0, 0]) b = np.eye(2) assert_raises(ValueError, block, [a, b]) assert_raises(ValueError, block, [b, a]) to_block = [[np.ones((2, 3)), np.ones((2, 2))], [np.ones((2, 2)), np.ones((2, 2))]] assert_raises(ValueError, block, to_block) def test_no_lists(self, block): assert_equal(block(1), np.array(1)) assert_equal(block(np.eye(3)), np.eye(3)) def test_invalid_nesting(self, block): msg = 'depths are mismatched' assert_raises_regex(ValueError, msg, block, [1, [2]]) assert_raises_regex(ValueError, msg, block, [1, []]) assert_raises_regex(ValueError, msg, block, [[1], 2]) assert_raises_regex(ValueError, msg, block, [[], 2]) assert_raises_regex(ValueError, msg, block, [ [[1], [2]], [[3, 4]], [5] # missing brackets ]) def test_empty_lists(self, block): assert_raises_regex(ValueError, 'empty', block, []) assert_raises_regex(ValueError, 'empty', block, [[]]) assert_raises_regex(ValueError, 'empty', block, [[1], []]) def test_tuple(self, block): assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4])) assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)]) def test_different_ndims(self, block): a = 1. b = 2 * np.ones((1, 2)) c = 3 * np.ones((1, 1, 3)) result = block([a, b, c]) expected = np.array([[[1., 2., 2., 3., 3., 3.]]]) assert_equal(result, expected) def test_different_ndims_depths(self, block): a = 1. b = 2 * np.ones((1, 2)) c = 3 * np.ones((1, 2, 3)) result = block([[a, b], [c]]) expected = np.array([[[1., 2., 2.], [3., 3., 3.], [3., 3., 3.]]]) assert_equal(result, expected) def test_block_memory_order(self, block): # 3D arr_c = np.zeros((3,) * 3, order='C') arr_f = np.zeros((3,) * 3, order='F') b_c = [[[arr_c, arr_c], [arr_c, arr_c]], [[arr_c, arr_c], [arr_c, arr_c]]] b_f = [[[arr_f, arr_f], [arr_f, arr_f]], [[arr_f, arr_f], [arr_f, arr_f]]] assert block(b_c).flags['C_CONTIGUOUS'] assert block(b_f).flags['F_CONTIGUOUS'] arr_c = np.zeros((3, 3), order='C') arr_f = np.zeros((3, 3), order='F') # 2D b_c = [[arr_c, arr_c], [arr_c, arr_c]] b_f = [[arr_f, arr_f], [arr_f, arr_f]] assert block(b_c).flags['C_CONTIGUOUS'] assert block(b_f).flags['F_CONTIGUOUS'] def test_block_dispatcher(): class ArrayLike: pass a = ArrayLike() b = ArrayLike() c = ArrayLike() assert_equal(list(_block_dispatcher(a)), [a]) assert_equal(list(_block_dispatcher([a])), [a]) assert_equal(list(_block_dispatcher([a, b])), [a, b]) assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c]) # don't recurse into non-lists assert_equal(list(_block_dispatcher((a, b))), [(a, b)])
TestBlock
python
huggingface__transformers
src/transformers/models/idefics2/modeling_idefics2.py
{ "start": 22255, "end": 26241 }
class ____(nn.Module): def __init__(self, config, layer_idx: Optional[int] = None) -> None: """Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`""" super().__init__() self.config = config self.layer_idx = None self.hidden_size = config.hidden_size self.num_heads = config.resampler_n_heads self.head_dim = config.resampler_head_dim self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.attention_dropout = config.attention_dropout self.scaling = self.head_dim**-0.5 self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self.is_causal = False def forward( self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: """ Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension! Args: latents (`torch.Tensor`): Tensor of shape [bsz, n_latents, embed_dim] representing fixed length latents to compress to. context (`torch.Tensor`): Tensor of shape [bsz, seq, embed_dim] representing long-form context to resample. attention_mask (`torch.Tensor`, *optional*): Tensor of shape [bsz, 1, seq, n_latents] representing attention mask. position_ids (`torch.LongTensor`, *optional*): Tensor of shape [bsz, seq] representing position indices of each input token. past_key_values (`Cache`, *optional*): Tuple of tensors containing cached key and value states. output_attentions (`bool`, *optional*, defaults to `False`): Whether to return attention weights. use_cache (`bool`, *optional*, defaults to `False`): Whether to use past_key_values for caching. """ bsz, q_len, _ = latents.size() kv_seq_len = q_len + context.size()[1] hidden_states = torch.concat([context, latents], dim=-2) queries = self.q_proj(latents) keys = self.k_proj(hidden_states) values = self.v_proj(hidden_states) queries = queries.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) keys = keys.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) values = values.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) past_key_values = getattr(self, "past_key_values", past_key_values) if past_key_values is not None: keys, values = past_key_values.update(keys, values, self.layer_idx) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, queries, keys, values, attention_mask, is_causal=self.is_causal, scaling=self.scaling, dropout=0.0 if not self.training else self.attention_dropout, **kwargs, ) attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim) attn_output = self.o_proj(attn_output) return attn_output, attn_weights
Idefics2PerceiverAttention
python
streamlit__streamlit
lib/tests/streamlit/delta_generator_test.py
{ "start": 1988, "end": 4163 }
class ____(unittest.TestCase): @patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=False)) def test_run_warning_presence(self): """Using Streamlit without `streamlit run` produces a warning.""" with self.assertLogs("streamlit", level=logging.WARNING) as logs: delta_generator._use_warning_has_been_displayed = False st.write("Using delta generator") output = "".join(logs.output) # Warning produced exactly once assert len(re.findall(r"streamlit run", output)) == 1 @patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=True)) def test_run_warning_absence(self): """Using Streamlit through the CLI results in a Runtime being instantiated, so it produces no usage warning.""" with self.assertLogs("streamlit", level=logging.WARNING) as logs: delta_generator._use_warning_has_been_displayed = False st.write("Using delta generator") # assertLogs is being used as a context manager, but it also checks # that some log output was captured, so we have to let it capture something get_logger("root").warning("irrelevant warning so assertLogs passes") assert not re.search(r"streamlit run", "".join(logs.output)) def test_public_api(self): """Test that we don't accidentally remove (or add) symbols to the public `DeltaGenerator` API. """ api = { name for name, _ in inspect.getmembers(DeltaGenerator) if not name.startswith("_") } expected_api = ELEMENT_COMMANDS.copy() # Remove commands that are only exposed in the top-level namespace (st.*) # and cannot be called on a DeltaGenerator object. expected_api = expected_api - { "spinner", "dialog", "echo", "logo", "login", "logout", } # Add public commands that only exist in the delta generator: expected_api = expected_api.union({"add_rows", "id", "dg"}) assert api == expected_api
RunWarningTest
python
pandas-dev__pandas
asv_bench/benchmarks/groupby.py
{ "start": 5986, "end": 6330 }
class ____: def setup(self): rng = date_range("1/1/2000", "12/31/2005", freq="h") self.year, self.month, self.day = rng.year, rng.month, rng.day self.ts = Series(np.random.randn(len(rng)), index=rng) def time_len_groupby_object(self): len(self.ts.groupby([self.year, self.month, self.day]))
DateAttributes
python
google__pytype
pytype/tools/traces/source_test.py
{ "start": 3058, "end": 3989 }
class ____(unittest.TestCase): def test_one_line(self): src = source.Code("foo.bar", [], _FakeTrace, "") self.assertEqual(src.get_attr_location("foo.bar", source.Location(1, 0)), (source.Location(1, 4), 3)) def test_value_dot(self): src = source.Code("foo.\nbar", [], _FakeTrace, "") self.assertEqual(src.get_attr_location("foo.bar", source.Location(1, 0)), (source.Location(2, 0), 3)) def test_dot_attr(self): src = source.Code("foo\n.bar", [], _FakeTrace, "") self.assertEqual(src.get_attr_location("foo.bar", source.Location(1, 0)), (source.Location(2, 1), 3)) def test_not_found(self): src = source.Code("foo.bar", [], _FakeTrace, "") self.assertEqual(src.get_attr_location("foo.baz", source.Location(1, 0)), (source.Location(1, 0), 7)) if __name__ == "__main__": unittest.main()
GetAttrLocationTest
python
huggingface__transformers
tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py
{ "start": 8067, "end": 9290 }
class ____(unittest.TestCase): @cached_property def default_image_processor(self): return ( MobileNetV1ImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = MobileNetV1ForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1001)) self.assertEqual(outputs.logits.shape, expected_shape) expectations = Expectations( { (None, None): [-4.1739, -1.1233, 3.1205], ("cuda", 8): [-4.1739, -1.1233, 3.1205], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=2e-4, atol=2e-4)
MobileNetV1ModelIntegrationTest
python
pytorch__pytorch
test/test_varlen_attention.py
{ "start": 672, "end": 1014 }
class ____(TorchDispatchMode): """Logging mode that captures all dispatched operations""" def __init__(self): self.called_ops = [] def __torch_dispatch__(self, func, types, args=(), kwargs=None): op_name = str(func) self.called_ops.append(op_name) return func(*args, **(kwargs or {}))
OpLoggingMode
python
pypa__warehouse
tests/unit/admin/views/test_users.py
{ "start": 62148, "end": 66657 }
class ____: def test_quarantines_user_projects(self, db_request): user = UserFactory.create() project1 = ProjectFactory.create() project2 = ProjectFactory.create() RoleFactory(project=project1, user=user, role_name="Owner") RoleFactory(project=project2, user=user, role_name="Maintainer") db_request.matchdict["username"] = str(user.username) db_request.params = {"username": user.username} db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foobar") db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) db_request.user = UserFactory.create() result = views.user_quarantine_projects(user, db_request) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/foobar" assert db_request.session.flash.calls == [ pretend.call( f"Quarantined 2 project(s) for user {user.username!r}", queue="success", ) ] assert project1.lifecycle_status == "quarantine-enter" assert project2.lifecycle_status == "quarantine-enter" def test_quarantines_user_projects_skips_already_quarantined(self, db_request): user = UserFactory.create() project1 = ProjectFactory.create(lifecycle_status="quarantine-enter") project2 = ProjectFactory.create() RoleFactory(project=project1, user=user, role_name="Owner") RoleFactory(project=project2, user=user, role_name="Maintainer") db_request.matchdict["username"] = str(user.username) db_request.params = {"username": user.username} db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foobar") db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) db_request.user = UserFactory.create() result = views.user_quarantine_projects(user, db_request) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/foobar" assert db_request.session.flash.calls == [ pretend.call( f"Quarantined 1 project(s) for user {user.username!r}", queue="success", ) ] assert project1.lifecycle_status == "quarantine-enter" assert project2.lifecycle_status == "quarantine-enter" def test_quarantines_user_projects_no_projects_to_quarantine(self, db_request): user = UserFactory.create() project1 = ProjectFactory.create(lifecycle_status="quarantine-enter") project2 = ProjectFactory.create(lifecycle_status="quarantine-enter") RoleFactory(project=project1, user=user, role_name="Owner") RoleFactory(project=project2, user=user, role_name="Maintainer") db_request.matchdict["username"] = str(user.username) db_request.params = {"username": user.username} db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foobar") db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) db_request.user = UserFactory.create() result = views.user_quarantine_projects(user, db_request) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/foobar" assert db_request.session.flash.calls == [ pretend.call( f"No projects needed quarantining for user {user.username!r}", queue="info", ) ] def test_quarantine_user_projects_bad_confirm(self, db_request): user = UserFactory.create() project = ProjectFactory.create() RoleFactory(project=project, user=user, role_name="Owner") db_request.matchdict["username"] = str(user.username) db_request.params = {"username": "wrong"} db_request.route_path = pretend.call_recorder(lambda a, **k: "/foobar") db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) result = views.user_quarantine_projects(user, db_request) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/foobar" assert db_request.session.flash.calls == [ pretend.call("Wrong confirmation input", queue="error") ] assert project.lifecycle_status is None
TestUserQuarantineProjects
python
Netflix__metaflow
metaflow/_vendor/click/exceptions.py
{ "start": 6459, "end": 6930 }
class ____(UsageError): """Raised if an option is generally supplied but the use of the option was incorrect. This is for instance raised if the number of arguments for an option is not correct. .. versionadded:: 4.0 :param option_name: the name of the option being used incorrectly. """ def __init__(self, option_name, message, ctx=None): UsageError.__init__(self, message, ctx) self.option_name = option_name
BadOptionUsage
python
python-excel__xlwt
xlwt/Cell.py
{ "start": 4761, "end": 5306 }
class ____(object): __slots__ = ["rowx", "colx", "xf_idx", "number"] def __init__(self, rowx, colx, xf_idx, error_string_or_code): self.rowx = rowx self.colx = colx self.xf_idx = xf_idx try: self.number = error_code_map[error_string_or_code] except KeyError: raise Exception('Illegal error value (%r)' % error_string_or_code) def get_biff_data(self): return BIFFRecords.BoolErrRecord(self.rowx, self.colx, self.xf_idx, self.number, 1).get()
ErrorCell
python
pyca__cryptography
tests/hazmat/primitives/test_ec.py
{ "start": 2747, "end": 7177 }
class ____(ec.EllipticCurveSignatureAlgorithm): algorithm = hashes.SHA256() def test_skip_curve_unsupported(backend): with pytest.raises(pytest.skip.Exception): _skip_curve_unsupported(backend, DummyCurve()) def test_skip_exchange_algorithm_unsupported(backend): with pytest.raises(pytest.skip.Exception): _skip_exchange_algorithm_unsupported(backend, ec.ECDH(), DummyCurve()) def test_skip_ecdsa_vector(backend): with pytest.raises(pytest.skip.Exception): _skip_ecdsa_vector(backend, DummyCurve(), hashes.SHA256) def test_derive_private_key_success(backend): curve = ec.SECP256K1() _skip_curve_unsupported(backend, curve) private_numbers = ec.generate_private_key(curve, backend).private_numbers() derived_key = ec.derive_private_key( private_numbers.private_value, curve, backend ) assert private_numbers == derived_key.private_numbers() def test_derive_private_key_errors(backend): curve = ec.SECP256K1() _skip_curve_unsupported(backend, curve) with pytest.raises(TypeError): ec.derive_private_key("one", curve, backend) # type: ignore[arg-type] with pytest.raises(TypeError): ec.derive_private_key(10, "five", backend) # type: ignore[arg-type] with pytest.raises(ValueError): ec.derive_private_key(-7, curve, backend) def test_derive_point_at_infinity(backend): curve = ec.SECP256R1() _skip_curve_unsupported(backend, curve) # order of the curve q = 0xFFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551 # BoringSSL rejects infinity points before it ever gets to us, so it # uses a more generic error message. match = ( "infinity" if not ( rust_openssl.CRYPTOGRAPHY_IS_BORINGSSL or rust_openssl.CRYPTOGRAPHY_IS_AWSLC ) else "Invalid" ) with pytest.raises(ValueError, match=match): ec.derive_private_key(q, ec.SECP256R1()) def test_derive_point_invalid_key(backend): curve = ec.SECP256R1() _skip_curve_unsupported(backend, curve) with pytest.raises(ValueError): ec.derive_private_key( 0xE2563328DFABF68188606B91324281C1D58A4456431B09D510B35FECC9F307CA1822846FA2671371A9A81BAC0E35749D, curve, ) def test_ec_numbers(): numbers = ec.EllipticCurvePrivateNumbers( 1, ec.EllipticCurvePublicNumbers(2, 3, DummyCurve()) ) assert numbers.private_value == 1 assert numbers.public_numbers.x == 2 assert numbers.public_numbers.y == 3 assert isinstance(numbers.public_numbers.curve, DummyCurve) @pytest.mark.parametrize( ("private_value", "x", "y", "curve"), [ (None, 2, 3, DummyCurve()), (1, None, 3, DummyCurve()), (1, 2, None, DummyCurve()), (1, 2, 3, None), ], ) def test_invalid_ec_numbers_args(private_value, x, y, curve): with pytest.raises(TypeError): ec.EllipticCurvePrivateNumbers( private_value, ec.EllipticCurvePublicNumbers(x, y, curve) ) def test_invalid_private_numbers_public_numbers(): with pytest.raises(TypeError): ec.EllipticCurvePrivateNumbers(1, None) # type: ignore[arg-type] def test_ec_public_numbers_repr(): pn = ec.EllipticCurvePublicNumbers(2, 3, ec.SECP256R1()) assert ( repr(pn) == "<EllipticCurvePublicNumbers(curve=secp256r1, x=2, y=3)>" ) def test_ec_public_numbers_hash(): pn1 = ec.EllipticCurvePublicNumbers(2, 3, ec.SECP256R1()) pn2 = ec.EllipticCurvePublicNumbers(2, 3, ec.SECP256R1()) pn3 = ec.EllipticCurvePublicNumbers(1, 3, ec.SECP256R1()) assert hash(pn1) == hash(pn2) assert hash(pn1) != hash(pn3) def test_ec_private_numbers_hash(): numbers1 = ec.EllipticCurvePrivateNumbers( 1, ec.EllipticCurvePublicNumbers(2, 3, DummyCurve()) ) numbers2 = ec.EllipticCurvePrivateNumbers( 1, ec.EllipticCurvePublicNumbers(2, 3, DummyCurve()) ) numbers3 = ec.EllipticCurvePrivateNumbers( 2, ec.EllipticCurvePublicNumbers(2, 3, DummyCurve()) ) assert hash(numbers1) == hash(numbers2) assert hash(numbers1) != hash(numbers3) def test_ec_key_key_size(backend): curve = ec.SECP256R1() _skip_curve_unsupported(backend, curve) key = ec.generate_private_key(curve, backend) assert key.key_size == 256 assert key.public_key().key_size == 256
DummySignatureAlgorithm
python
cython__cython
Cython/Debugger/libpython.py
{ "start": 37389, "end": 38593 }
class ____(PyObjectPtr): _typename = 'PyTupleObject' def __getitem__(self, i): # Get the gdb.Value for the (PyObject*) with the given index: field_ob_item = self.field('ob_item') return field_ob_item[i] def proxyval(self, visited): # Guard against infinite loops: if self.as_address() in visited: return ProxyAlreadyVisited('(...)') visited.add(self.as_address()) result = tuple(PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited) for i in safe_range(int_from_int(self.field('ob_size')))) return result def write_repr(self, out, visited): # Guard against infinite loops: if self.as_address() in visited: out.write('(...)') return visited.add(self.as_address()) out.write('(') for i in safe_range(int_from_int(self.field('ob_size'))): if i > 0: out.write(', ') element = PyObjectPtr.from_pyobject_ptr(self[i]) element.write_repr(out, visited) if self.field('ob_size') == 1: out.write(',)') else: out.write(')')
PyTupleObjectPtr
python
pytorch__pytorch
test/torch_np/numpy_tests/lib/test_function_base.py
{ "start": 85756, "end": 89805 }
class ____(TestCase): def test_simple(self): y = np.bincount(np.arange(4)) assert_array_equal(y, np.ones(4)) def test_simple2(self): y = np.bincount(np.array([1, 5, 2, 4, 1])) assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1])) def test_simple_weight(self): x = np.arange(4) w = np.array([0.2, 0.3, 0.5, 0.1]) y = np.bincount(x, w) assert_array_equal(y, w) def test_simple_weight2(self): x = np.array([1, 2, 4, 5, 2]) w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) y = np.bincount(x, w) assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1])) def test_with_minlength(self): x = np.array([0, 1, 0, 1, 1]) y = np.bincount(x, minlength=3) assert_array_equal(y, np.array([2, 3, 0])) x = [] y = np.bincount(x, minlength=0) assert_array_equal(y, np.array([])) def test_with_minlength_smaller_than_maxvalue(self): x = np.array([0, 1, 1, 2, 2, 3, 3]) y = np.bincount(x, minlength=2) assert_array_equal(y, np.array([1, 2, 2, 2])) y = np.bincount(x, minlength=0) assert_array_equal(y, np.array([1, 2, 2, 2])) def test_with_minlength_and_weights(self): x = np.array([1, 2, 4, 5, 2]) w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) y = np.bincount(x, w, 8) assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0])) def test_empty(self): x = np.array([], dtype=int) y = np.bincount(x) assert_array_equal(x, y) def test_empty_with_minlength(self): x = np.array([], dtype=int) y = np.bincount(x, minlength=5) assert_array_equal(y, np.zeros(5, dtype=int)) def test_with_incorrect_minlength(self): x = np.array([], dtype=int) assert_raises( TypeError, # "'str' object cannot be interpreted", lambda: np.bincount(x, minlength="foobar"), ) assert_raises( (ValueError, RuntimeError), # "must not be negative", lambda: np.bincount(x, minlength=-1), ) x = np.arange(5) assert_raises( TypeError, # "'str' object cannot be interpreted", lambda: np.bincount(x, minlength="foobar"), ) assert_raises( (ValueError, RuntimeError), # "must not be negative", lambda: np.bincount(x, minlength=-1), ) @skipIfTorchDynamo() # flaky test @skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_dtype_reference_leaks(self): # gh-6805 intp_refcount = sys.getrefcount(np.dtype(np.intp)) double_refcount = sys.getrefcount(np.dtype(np.double)) for _ in range(10): np.bincount([1, 2, 3]) assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) for _ in range(10): np.bincount([1, 2, 3], [4, 5, 6]) assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) @parametrize("vals", [[[2, 2]], 2]) def test_error_not_1d(self, vals): # Test that values has to be 1-D (both as array and nested list) vals_arr = np.asarray(vals) with assert_raises((ValueError, RuntimeError)): np.bincount(vals_arr) with assert_raises((ValueError, RuntimeError)): np.bincount(vals) parametrize_interp_sc = parametrize( "sc", [ subtest(lambda x: np.float64(x), name="real"), subtest(lambda x: _make_complex(x, 0), name="complex-real"), subtest(lambda x: _make_complex(0, x), name="complex-imag"), subtest(lambda x: _make_complex(x, np.multiply(x, -2)), name="complex-both"), ], ) @xpassIfTorchDynamo_np # (reason="TODO: implement") @instantiate_parametrized_tests
TestBincount
python
pypa__pip
src/pip/_vendor/distlib/util.py
{ "start": 54173, "end": 54523 }
class ____(object): defaults = { 'delimiter': str(','), # The strs are used because we need native 'quotechar': str('"'), # str in the csv API (2.x won't take 'lineterminator': str('\n') # Unicode) } def __enter__(self): return self def __exit__(self, *exc_info): self.stream.close()
CSVBase
python
getsentry__sentry
tests/sentry/workflow_engine/endpoints/test_organization_detector_details.py
{ "start": 3956, "end": 6265 }
class ____(OrganizationDetectorDetailsBaseTest): def test_simple(self) -> None: response = self.get_success_response(self.organization.slug, self.detector.id) assert response.data == serialize(self.detector) def test_does_not_exist(self) -> None: self.get_error_response(self.organization.slug, 3, status_code=404) def test_malformed_id(self) -> None: from django.urls import reverse # get_error_response can't generate an invalid URL, so we have to # generate a correct one and replace the valid ID with an invalid one. good_url = reverse(self.endpoint, args=[self.organization.slug, 7654]) bad_url = good_url.replace("7654", "not-an-id") assert_status_code(self.client.get(bad_url), 404) def test_pending_deletion(self) -> None: detector = self.create_detector() detector.status = ObjectStatus.PENDING_DELETION detector.save() self.get_error_response(self.organization.slug, detector.id, status_code=404) def test_with_alert_rule_mapping(self) -> None: # Create a metric alert rule mapping metric_alert_id = 12345 AlertRuleDetector.objects.create(alert_rule_id=metric_alert_id, detector=self.detector) response = self.get_success_response(self.organization.slug, self.detector.id) assert response.data["alertRuleId"] == metric_alert_id assert response.data["ruleId"] is None def test_with_issue_rule_mapping(self) -> None: # Create an issue alert rule mapping issue_rule_id = 67890 AlertRuleDetector.objects.create(rule_id=issue_rule_id, detector=self.detector) response = self.get_success_response(self.organization.slug, self.detector.id) assert response.data["ruleId"] == issue_rule_id assert response.data["alertRuleId"] is None def test_without_alert_rule_mapping(self) -> None: """Test that alertRuleId and ruleId are null when no mapping exists""" response = self.get_success_response(self.organization.slug, self.detector.id) # Verify the mapping fields are null when no mapping exists assert response.data["alertRuleId"] is None assert response.data["ruleId"] is None @region_silo_test
OrganizationDetectorDetailsGetTest
python
huggingface__transformers
src/transformers/models/sam_hq/modeling_sam_hq.py
{ "start": 21426, "end": 22320 }
class ____(nn.Module): def __init__(self, config: SamHQVisionConfig): super().__init__() self.config = config self.conv1 = nn.Conv2d(config.hidden_size, config.output_channels, kernel_size=1, bias=False) self.layer_norm1 = SamHQLayerNorm(config.output_channels, data_format="channels_first") self.conv2 = nn.Conv2d(config.output_channels, config.output_channels, kernel_size=3, padding=1, bias=False) self.layer_norm2 = SamHQLayerNorm(config.output_channels, data_format="channels_first") def forward(self, hidden_states): hidden_states = hidden_states.permute(0, 3, 1, 2) hidden_states = self.conv1(hidden_states) hidden_states = self.layer_norm1(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.layer_norm2(hidden_states) return hidden_states
SamHQVisionNeck
python
google__pytype
pytype/tests/test_pattern_matching.py
{ "start": 38167, "end": 44569 }
class ____(test_base.BaseTest): """Test exhaustive coverage of literals.""" def test_exhaustive(self): ty = self.Infer(""" from typing import Literal def f(x: Literal["a", "b", "c"]): match x: case "a": return 10 case "b" | "c": return 'a' """) self.assertTypesMatchPytd( ty, """ from typing import Literal def f(x: Literal["a", "b", "c"]) -> int | str: ... """, ) def test_default(self): ty = self.Infer(""" from typing import Literal def f(x: Literal["a", "b", "c"]): match x: case "a": return 10 case _: return 'a' """) self.assertTypesMatchPytd( ty, """ from typing import Literal def f(x: Literal["a", "b", "c"]) -> int | str: ... """, ) def test_default_with_capture(self): ty = self.Infer(""" from typing import Literal def f(x: Literal["a", "b", "c"]): match x: case "a": return 10 case _ as foo: return foo """) self.assertTypesMatchPytd( ty, """ from typing import Literal def f(x: Literal["a", "b", "c"]) -> int | str: ... """, ) def test_nonexhaustive(self): ty, err = self.InferWithErrors(""" from typing import Literal def f(x: Literal["a", "b", "c"]): match x: # incomplete-match[e] case "a": return 10 case "b": return 'a' """) self.assertTypesMatchPytd( ty, """ from typing import Literal def f(x: Literal["a", "b", "c"]) -> int | str | None: ... """, ) self.assertErrorSequences(err, {"e": ["missing", "cases", "c"]}) def test_unused_after_exhaustive(self): ty = self.Infer(""" from typing import Literal def f(x: Literal["a", "b", "c"]): match x: case "a": return 10 case "b" | "c": return 20 case _: return 'a' """) self.assertTypesMatchPytd( ty, """ from typing import Literal def f(x: Literal["a", "b", "c"]) -> int: ... """, ) def test_nested(self): ty = self.Infer(""" from typing import Literal def f(x: Literal["a", "b", "c"], y: Literal["a", "b", "c"]): match x: case "a": return 10 case "b" | "c": match y: case "a": return 10 case "b": return 'a' case _: return None """) self.assertTypesMatchPytd( ty, """ from typing import Literal def f(x: Literal["a", "b", "c"], y: Literal["a", "b", "c"]) -> int | str | None: ... """, ) def test_multiple(self): ty, _ = self.InferWithErrors(""" from typing import Literal def f(x: Literal["a", "b", "c"], y: Literal["a", "b", "c"]): match x: # incomplete-match case "a": return 10 case "b": return 20 match y: case "a": return 'a' case "b" | "c": return 'b' """) self.assertTypesMatchPytd( ty, """ from typing import Literal def f(x: Literal["a", "b", "c"], y: Literal["a", "b", "c"]) -> int | str: ... """, ) def test_redundant(self): ty, _ = self.InferWithErrors(""" from typing import Literal def f(x: Literal["a", "b", "c"], y: Literal["a", "b", "c"]): match x: case "a": return 10 case "b": return 20 case "a": # redundant-match return '10' case "c": return 20 """) self.assertTypesMatchPytd( ty, """ from typing import Literal def f(x: Literal["a", "b", "c"], y: Literal["a", "b", "c"]) -> int: ... """, ) def test_incomplete_and_redundant(self): ty, _ = self.InferWithErrors(""" from typing import Literal def f(x: Literal["a", "b", "c"], y: Literal["a", "b", "c"]): match x: # incomplete-match case "a": return 10 case "b": return 20 case "a": # redundant-match return '10' """) self.assertTypesMatchPytd( ty, """ from typing import Literal def f(x: Literal["a", "b", "c"], y: Literal["a", "b", "c"]) -> int | None: ... """, ) def test_partially_redundant(self): err = self.CheckWithErrors(""" from typing import Literal def f(x: Literal["a", "b", "c"]): match x: case "a": return 10 case "b": return 20 case "a" | "c": # redundant-match[e] return '10' """) self.assertErrorSequences(err, {"e": ["already been covered", "a"]}) def call_function_with_match(self): ty = self.Infer(""" from typing import Literal def f(x: Literal["a", "b", "c"]): match x: case "a": return 10 case "b" | "c": return 'a' a = f("a") """) self.assertTypesMatchPytd( ty, """ from typing import Literal a: int | str def f(x: Literal["a", "b", "c"]) -> int | str: ... """, ) def test_literal_vs_indefinite_value(self): # Regression test for a false positive in a corner case with self.DepTree([( "foo.py", """ import enum class Color(enum.Enum): RED = 'red' GREEN = 'green' BLUE = 'blue' OCTARINE: str """, )]): self.Check(""" from typing import Literal import foo Keys = Literal['red', 'green', 'blue'] def f(x: Keys): match x: case foo.Color.RED.value: return 10 case foo.Color.GREEN.value: return 20 case foo.Color.OCTARINE.value: return 80 # We do not check redundant or incomplete matches here because # the nonspecific `str` value means we no longer know exactly which # cases have been covered. """) if __name__ == "__main__": test_base.main()
LiteralMatchCoverageTest
python
kennethreitz__tablib
src/tablib/core.py
{ "start": 692, "end": 2253 }
class ____: """Internal Row object. Mainly used for filtering.""" __slots__ = ['_row', 'tags'] def __init__(self, row=list(), tags=list()): self._row = list(row) self.tags = list(tags) def __iter__(self): return (col for col in self._row) def __len__(self): return len(self._row) def __repr__(self): return repr(self._row) def __getitem__(self, i): return self._row[i] def __setitem__(self, i, value): self._row[i] = value def __delitem__(self, i): del self._row[i] def __getstate__(self): return self._row, self.tags def __setstate__(self, state): self._row, self.tags = state def rpush(self, value): self.insert(len(self._row), value) def lpush(self, value): self.insert(0, value) def append(self, value): self.rpush(value) def insert(self, index, value): self._row.insert(index, value) def __contains__(self, item): return (item in self._row) @property def tuple(self): """Tuple representation of :class:`Row`.""" return tuple(self._row) @property def list(self): """List representation of :class:`Row`.""" return list(self._row) def has_tag(self, tag): """Returns true if current row contains tag.""" if tag is None: return False elif isinstance(tag, str): return (tag in self.tags) else: return bool(len(set(tag) & set(self.tags)))
Row
python
dask__distributed
distributed/http/scheduler/info.py
{ "start": 4076, "end": 4672 }
class ____(RequestHandler): @log_errors async def get(self, worker): worker = escape.url_unescape(worker) try: logs = await self.server.get_worker_logs(workers=[worker]) except Exception: if not any(worker == w.address for w in self.server.workers.values()): self.send_error(404) return raise logs = logs[worker] self.render( "logs.html", title="Logs: " + worker, logs=logs, **merge(self.extra, rel_path_statics), )
WorkerLogs
python
sphinx-doc__sphinx
sphinx/domains/cpp/_ast.py
{ "start": 77429, "end": 78414 }
class ____(ASTBase): def __init__(self, expr: ASTExpression | None) -> None: self.expr = expr def __eq__(self, other: object) -> bool: if not isinstance(other, ASTExplicitSpec): return NotImplemented return self.expr == other.expr def __hash__(self) -> int: return hash(self.expr) def _stringify(self, transform: StringifyTransform) -> str: res = ['explicit'] if self.expr is not None: res.extend(('(', transform(self.expr), ')')) return ''.join(res) def describe_signature( self, signode: TextElement, env: BuildEnvironment, symbol: Symbol ) -> None: signode += addnodes.desc_sig_keyword('explicit', 'explicit') if self.expr is not None: signode += addnodes.desc_sig_punctuation('(', '(') self.expr.describe_signature(signode, 'markType', env, symbol) signode += addnodes.desc_sig_punctuation(')', ')')
ASTExplicitSpec
python
zarr-developers__zarr-python
tests/test_indexing.py
{ "start": 67199, "end": 71414 }
class ____: @pytest.mark.parametrize( ("indexer", "expected"), [ # int ((0,), np.array([1, 2])), ((1,), np.array([3, 4])), ((0, 1), np.array(2)), # slice ((slice(None),), np.array([[1, 2], [3, 4]])), ((slice(0, 1),), np.array([[1, 2]])), ((slice(1, 2),), np.array([[3, 4]])), ((slice(0, 2),), np.array([[1, 2], [3, 4]])), ((slice(0, 0),), np.empty(shape=(0, 2), dtype="i8")), # ellipsis ((...,), np.array([[1, 2], [3, 4]])), ((0, ...), np.array([1, 2])), ((..., 0), np.array([1, 3])), ((0, 1, ...), np.array(2)), # combined ((0, slice(None)), np.array([1, 2])), ((slice(None), 0), np.array([1, 3])), ((slice(None), slice(None)), np.array([[1, 2], [3, 4]])), # array of ints (([0]), np.array([[1, 2]])), (([1]), np.array([[3, 4]])), (([0], [1]), np.array(2)), (([0, 1], [0]), np.array([[1], [3]])), (([0, 1], [0, 1]), np.array([[1, 2], [3, 4]])), # boolean array (np.array([True, True]), np.array([[1, 2], [3, 4]])), (np.array([True, False]), np.array([[1, 2]])), (np.array([False, True]), np.array([[3, 4]])), (np.array([False, False]), np.empty(shape=(0, 2), dtype="i8")), ], ) @pytest.mark.asyncio async def test_async_oindex(self, store, indexer, expected): z = zarr.create_array(store=store, shape=(2, 2), chunks=(1, 1), zarr_format=3, dtype="i8") z[...] = np.array([[1, 2], [3, 4]]) async_zarr = z._async_array result = await async_zarr.oindex.getitem(indexer) assert_array_equal(result, expected) @pytest.mark.asyncio async def test_async_oindex_with_zarr_array(self, store): group = zarr.create_group(store=store, zarr_format=3) z1 = group.create_array(name="z1", shape=(2, 2), chunks=(1, 1), dtype="i8") z1[...] = np.array([[1, 2], [3, 4]]) async_zarr = z1._async_array # create boolean zarr array to index with z2 = group.create_array(name="z2", shape=(2,), chunks=(1,), dtype="?") z2[...] = np.array([True, False]) result = await async_zarr.oindex.getitem(z2) expected = np.array([[1, 2]]) assert_array_equal(result, expected) @pytest.mark.parametrize( ("indexer", "expected"), [ (([0], [0]), np.array(1)), (([0, 1], [0, 1]), np.array([1, 4])), (np.array([[False, True], [False, True]]), np.array([2, 4])), ], ) @pytest.mark.asyncio async def test_async_vindex(self, store, indexer, expected): z = zarr.create_array(store=store, shape=(2, 2), chunks=(1, 1), zarr_format=3, dtype="i8") z[...] = np.array([[1, 2], [3, 4]]) async_zarr = z._async_array result = await async_zarr.vindex.getitem(indexer) assert_array_equal(result, expected) @pytest.mark.asyncio async def test_async_vindex_with_zarr_array(self, store): group = zarr.create_group(store=store, zarr_format=3) z1 = group.create_array(name="z1", shape=(2, 2), chunks=(1, 1), dtype="i8") z1[...] = np.array([[1, 2], [3, 4]]) async_zarr = z1._async_array # create boolean zarr array to index with z2 = group.create_array(name="z2", shape=(2, 2), chunks=(1, 1), dtype="?") z2[...] = np.array([[False, True], [False, True]]) result = await async_zarr.vindex.getitem(z2) expected = np.array([2, 4]) assert_array_equal(result, expected) @pytest.mark.asyncio async def test_async_invalid_indexer(self, store): z = zarr.create_array(store=store, shape=(2, 2), chunks=(1, 1), zarr_format=3, dtype="i8") z[...] = np.array([[1, 2], [3, 4]]) async_zarr = z._async_array with pytest.raises(IndexError): await async_zarr.vindex.getitem("invalid_indexer") with pytest.raises(IndexError): await async_zarr.oindex.getitem("invalid_indexer")
TestAsync
python
pandas-dev__pandas
asv_bench/benchmarks/io/csv.py
{ "start": 8237, "end": 8687 }
class ____(StringIORewind): params = (["nan", "0", ""],) param_names = ["bad_date_value"] def setup(self, bad_date_value): self.StringIO_input = StringIO((f"{bad_date_value},\n") * 50000) def time_read_csv(self, bad_date_value): read_csv( self.data(self.StringIO_input), header=None, names=["foo", "bar"], parse_dates=["foo"], )
ReadCSVConcatDatetimeBadDateValue
python
ray-project__ray
rllib/examples/learners/classes/vpg_torch_learner.py
{ "start": 547, "end": 2835 }
class ____(TorchLearner): @override(TorchLearner) def build(self) -> None: super().build() # Prepend the returns-to-go connector piece to have that information # available in the train batch. if self.config.add_default_connectors_to_learner_pipeline: self._learner_connector.prepend(ComputeReturnsToGo(gamma=self.config.gamma)) @override(TorchLearner) def compute_loss_for_module( self, *, module_id: ModuleID, config: "AlgorithmConfig", batch: Dict[str, Any], fwd_out: Dict[str, TensorType], ) -> TensorType: rl_module = self.module[module_id] # Create the action distribution from the parameters output by the RLModule. action_dist_inputs = fwd_out[Columns.ACTION_DIST_INPUTS] action_dist_class = rl_module.get_train_action_dist_cls() action_dist = action_dist_class.from_logits(action_dist_inputs) # Compute log probabilities of the actions taken during sampling. log_probs = action_dist.logp(batch[Columns.ACTIONS]) # Compute the policy gradient loss. # Since we're not using a baseline, we use returns to go directly. loss = -torch.mean(log_probs * batch[Columns.RETURNS_TO_GO]) # Just for exercise, log the average return to go per discrete action. for act, ret_to_go in zip(batch[Columns.ACTIONS], batch[Columns.RETURNS_TO_GO]): self.metrics.log_value( key=(module_id, f"action_{act}_return_to_go_mean"), value=ret_to_go, reduce="mean", clear_on_reduce=True, ) return loss @override(Learner) def after_gradient_based_update(self, *, timesteps): # This is to check if in the multi-gpu case, the weights across workers are # the same. Only for testing purposes. if self.config.report_mean_weights: for module_id in self.module.keys(): parameters = convert_to_numpy( self.get_parameters(self.module[module_id]) ) mean_ws = np.mean([w.mean() for w in parameters]) self.metrics.log_value((module_id, "mean_weight"), mean_ws, window=1)
VPGTorchLearner
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py
{ "start": 3690, "end": 3860 }
class ____(ParentD): def f(self): builtins.super(ChildD3, self).f() super # Python injects __class__ into scope import builtins as builtins_alias
ChildD3
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocol52.py
{ "start": 277, "end": 333 }
class ____[**P, T](Protocol): x: Callable[P, T]
ProtoB
python
realpython__materials
python-all-attribute/shapes_v1.py
{ "start": 174, "end": 454 }
class ____: def __init__(self, side): self.side = _validate(side) def area(self): return self.side**2 def _validate(value): if not isinstance(value, int | float) or value <= 0: raise ValueError("positive number expected") return value
Square
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/paramSpec49.py
{ "start": 246, "end": 454 }
class ____: def dispatch( self, task_declaration: TaskDeclaration[P], count: int, /, *args: P.args, **kwargs: P.kwargs, ) -> None: pass
Dispatcher
python
PrefectHQ__prefect
src/prefect/events/schemas/automations.py
{ "start": 15693, "end": 15802 }
class ____(AutomationCore): id: UUID = Field(default=..., description="The ID of this automation")
Automation
python
huggingface__transformers
tests/models/modernbert/test_modeling_modernbert.py
{ "start": 1605, "end": 9443 }
class ____: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, pad_token_id=0, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_activation="gelu", mlp_dropout=0.0, attention_dropout=0.0, embedding_dropout=0.0, classifier_dropout=0.0, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_activation = hidden_activation self.mlp_dropout = mlp_dropout self.attention_dropout = attention_dropout self.embedding_dropout = embedding_dropout self.classifier_dropout = classifier_dropout self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): """ Returns a tiny configuration by default. """ config = ModernBertConfig( vocab_size=self.vocab_size, pad_token_id=self.pad_token_id, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_activation=self.hidden_activation, mlp_dropout=self.mlp_dropout, attention_dropout=self.attention_dropout, embedding_dropout=self.embedding_dropout, classifier_dropout=self.classifier_dropout, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) if test := os.environ.get("PYTEST_CURRENT_TEST", None): test_name = test.split(":")[-1].split(" ")[0] # If we're testing `test_retain_grad_hidden_states_attentions`, we normally get an error # that compilation doesn't work. Users can then set compile=False when loading the model, # much like here. We're testing whether it works once they've done that. # If we're testing `test_inputs_embeds_matches_input_ids`, then we'd like to test with `reference_compile` # set to False, otherwise the input_ids with compiled input embeddings will not match the inputs_embeds # with atol=1e-8 and rtol=1e-5 if test_name in ("test_retain_grad_hidden_states_attentions", "test_inputs_embeds_matches_input_ids"): config.reference_compile = False # Some tests require attentions to be outputted, in that case we'll set the attention implementation to eager # as the others don't support outputted attentions if test_name in ( "test_attention_outputs", "test_hidden_states_output", "test_retain_grad_hidden_states_attentions", ): config._attn_implementation = "eager" return config def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): model = ModernBertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ModernBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ModernBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ModernBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ModernBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch
ModernBertModelTester
python
openai__openai-python
src/openai/types/realtime/realtime_transcription_session_create_response.py
{ "start": 1551, "end": 1616 }
class ____(BaseModel): input: Optional[AudioInput] = None
Audio
python
sympy__sympy
sympy/stats/frv.py
{ "start": 6513, "end": 11036 }
class ____(PSpace): """ A Finite Probability Space Represents the probabilities of a finite number of events. """ is_Finite = True def __new__(cls, domain, density): density = {sympify(key): sympify(val) for key, val in density.items()} public_density = Dict(density) obj = PSpace.__new__(cls, domain, public_density) obj._density = density return obj def prob_of(self, elem): elem = sympify(elem) density = self._density if isinstance(list(density.keys())[0], FiniteSet): return density.get(elem, S.Zero) return density.get(tuple(elem)[0][1], S.Zero) def where(self, condition): assert all(r.symbol in self.symbols for r in random_symbols(condition)) return ConditionalFiniteDomain(self.domain, condition) def compute_density(self, expr): expr = rv_subs(expr, self.values) d = FiniteDensity() for elem in self.domain: val = expr.xreplace(dict(elem)) prob = self.prob_of(elem) d[val] = d.get(val, S.Zero) + prob return d @cacheit def compute_cdf(self, expr): d = self.compute_density(expr) cum_prob = S.Zero cdf = [] for key in sorted(d): prob = d[key] cum_prob += prob cdf.append((key, cum_prob)) return dict(cdf) @cacheit def sorted_cdf(self, expr, python_float=False): cdf = self.compute_cdf(expr) items = list(cdf.items()) sorted_items = sorted(items, key=lambda val_cumprob: val_cumprob[1]) if python_float: sorted_items = [(v, float(cum_prob)) for v, cum_prob in sorted_items] return sorted_items @cacheit def compute_characteristic_function(self, expr): d = self.compute_density(expr) t = Dummy('t', real=True) return Lambda(t, sum(exp(I*k*t)*v for k,v in d.items())) @cacheit def compute_moment_generating_function(self, expr): d = self.compute_density(expr) t = Dummy('t', real=True) return Lambda(t, sum(exp(k*t)*v for k,v in d.items())) def compute_expectation(self, expr, rvs=None, **kwargs): rvs = rvs or self.values expr = rv_subs(expr, rvs) probs = [self.prob_of(elem) for elem in self.domain] if isinstance(expr, (Logic, Relational)): parse_domain = [tuple(elem)[0][1] for elem in self.domain] bools = [expr.xreplace(dict(elem)) for elem in self.domain] else: parse_domain = [expr.xreplace(dict(elem)) for elem in self.domain] bools = [True for elem in self.domain] return sum(Piecewise((prob * elem, blv), (S.Zero, True)) for prob, elem, blv in zip(probs, parse_domain, bools)) def compute_quantile(self, expr): cdf = self.compute_cdf(expr) p = Dummy('p', real=True) set = ((nan, (p < 0) | (p > 1)),) for key, value in cdf.items(): set = set + ((key, p <= value), ) return Lambda(p, Piecewise(*set)) def probability(self, condition): cond_symbols = frozenset(rs.symbol for rs in random_symbols(condition)) cond = rv_subs(condition) if not cond_symbols.issubset(self.symbols): raise ValueError("Cannot compare foreign random symbols, %s" %(str(cond_symbols - self.symbols))) if isinstance(condition, Relational) and \ (not cond.free_symbols.issubset(self.domain.free_symbols)): rv = condition.lhs if isinstance(condition.rhs, Symbol) else condition.rhs return sum(Piecewise( (self.prob_of(elem), condition.subs(rv, list(elem)[0][1])), (S.Zero, True)) for elem in self.domain) return sympify(sum(self.prob_of(elem) for elem in self.where(condition))) def conditional_space(self, condition): domain = self.where(condition) prob = self.probability(condition) density = {key: val / prob for key, val in self._density.items() if domain._test(key)} return FinitePSpace(domain, density) def sample(self, size=(), library='scipy', seed=None): """ Internal sample method Returns dictionary mapping RandomSymbol to realization value. """ return {self.value: self.distribution.sample(size, library, seed)}
FinitePSpace
python
huggingface__transformers
src/transformers/models/data2vec/modeling_data2vec_vision.py
{ "start": 34451, "end": 35574 }
class ____(nn.Module): def __init__(self, config: Data2VecVisionConfig) -> None: super().__init__() self.layernorm = ( nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if config.use_mean_pooling else None ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if self.layernorm is not None: # Mean pool the final hidden states of the patch tokens patch_tokens = hidden_states[:, 1:, :] pooled_output = self.layernorm(patch_tokens.mean(1)) else: # Pool by simply taking the final hidden state of the [CLS] token pooled_output = hidden_states[:, 0] return pooled_output @auto_docstring( custom_intro=""" Data2VecVision Model transformer with an image classification head on top (a linear layer on top of the average of the final hidden states of the patch tokens) e.g. for ImageNet. """ ) # Copied from transformers.models.beit.modeling_beit.BeitForImageClassification with BEIT->DATA2VEC_VISION,Beit->Data2VecVision,beit->data2vec_vision
Data2VecVisionPooler
python
ansible__ansible
lib/ansible/_internal/_errors/_captured.py
{ "start": 678, "end": 3842 }
class ____(AnsibleCapturedError, _error_utils.ContributesToTaskResult): """ An exception representing error detail captured in a foreign context where an action/module result dictionary is involved. This exception provides a result dictionary via the ContributesToTaskResult mixin. """ def __init__(self, event: _messages.Event, result: dict[str, t.Any]) -> None: super().__init__(event=event) self._result = result @property def result_contribution(self) -> _c.Mapping[str, object]: return self._result @classmethod def maybe_raise_on_result(cls, result: dict[str, t.Any]) -> None: """Normalize the result and raise an exception if the result indicated failure.""" if error_summary := cls.normalize_result_exception(result): raise error_summary.error_type(error_summary.event, result) @classmethod def normalize_result_exception(cls, result: dict[str, t.Any]) -> CapturedErrorSummary | None: """ Normalize the result `exception`, if any, to be a `CapturedErrorSummary` instance. If a new `CapturedErrorSummary` was created, the `error_type` will be `cls`. The `exception` key will be removed if falsey. A `CapturedErrorSummary` instance will be returned if `failed` is truthy. """ if type(cls) is AnsibleResultCapturedError: # pylint: disable=unidiomatic-typecheck raise TypeError('The normalize_result_exception method cannot be called on the AnsibleCapturedError base type, use a derived type.') if not isinstance(result, dict): raise TypeError(f'Malformed result. Received {type(result)} instead of {dict}.') failed = result.get('failed') # DTFIX-FUTURE: warn if failed is present and not a bool, or exception is present without failed being True exception = result.pop('exception', None) if not failed and not exception: return None if isinstance(exception, CapturedErrorSummary): error_summary = exception elif isinstance(exception, _messages.ErrorSummary): error_summary = CapturedErrorSummary( event=exception.event, error_type=cls, ) else: # translate non-ErrorDetail errors error_summary = CapturedErrorSummary( event=_messages.Event( msg=str(result.get('msg', 'Unknown error.')), formatted_traceback=cls._normalize_traceback(exception), ), error_type=cls, ) result.update(exception=error_summary) return error_summary if failed else None # even though error detail was normalized, only return it if the result indicated failure @classmethod def _normalize_traceback(cls, value: object | None) -> str | None: """Normalize the provided traceback value, returning None if it is falsey.""" if not value: return None value = str(value).rstrip() if not value: return None return value + '\n'
AnsibleResultCapturedError
python
spack__spack
lib/spack/spack/subprocess_context.py
{ "start": 1664, "end": 3306 }
class ____: """Captures the in-memory process state of a package installation that needs to be transmitted to a child process. """ def __init__(self, pkg, *, ctx=None): ctx = ctx or multiprocessing.get_context() self.serialize = ctx.get_start_method() != "fork" from spack.environment import active_environment if self.serialize: self.serialized_pkg = serialize(pkg) self.global_state = GlobalStateMarshaler() self.test_patches = store_patches() self.serialized_env = serialize(active_environment()) else: self.pkg = pkg self.global_state = None self.test_patches = None self.env = active_environment() self.spack_working_dir = spack.paths.spack_working_dir def restore(self): spack.paths.spack_working_dir = self.spack_working_dir # Activating the environment modifies the global configuration, so globals have to # be restored afterward, in case other modifications were applied on top (e.g. from # command line) if self.serialize: self.global_state.restore() self.test_patches.restore() env = pickle.load(self.serialized_env) if self.serialize else self.env if env: from spack.environment import activate activate(env) # Order of operation is important, since the package might be retrieved # from a repo defined within the environment configuration return deserialize(self.serialized_pkg) if self.serialize else self.pkg
PackageInstallContext
python
doocs__leetcode
solution/0400-0499/0416.Partition Equal Subset Sum/Solution.py
{ "start": 0, "end": 415 }
class ____: def canPartition(self, nums: List[int]) -> bool: m, mod = divmod(sum(nums), 2) if mod: return False n = len(nums) f = [[False] * (m + 1) for _ in range(n + 1)] f[0][0] = True for i, x in enumerate(nums, 1): for j in range(m + 1): f[i][j] = f[i - 1][j] or (j >= x and f[i - 1][j - x]) return f[n][m]
Solution
python
numba__numba
numba/core/errors.py
{ "start": 1530, "end": 1698 }
class ____(NumbaWarning, PendingDeprecationWarning): """ Warning category for use of a feature that is pending deprecation. """
NumbaPendingDeprecationWarning
python
openai__openai-python
src/openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py
{ "start": 1608, "end": 2440 }
class ____(BaseModel): content_index: int """The index of the content part containing the audio.""" event_id: str """The unique ID of the server event.""" item_id: str """The ID of the item containing the audio that is being transcribed.""" transcript: str """The transcribed text.""" type: Literal["conversation.item.input_audio_transcription.completed"] """ The event type, must be `conversation.item.input_audio_transcription.completed`. """ usage: Usage """ Usage statistics for the transcription, this is billed according to the ASR model's pricing rather than the realtime model's pricing. """ logprobs: Optional[List[LogProbProperties]] = None """The log probabilities of the transcription."""
ConversationItemInputAudioTranscriptionCompletedEvent
python
allegroai__clearml
clearml/backend_api/services/v2_13/tasks.py
{ "start": 264117, "end": 285001 }
class ____(Response): """ Response of tasks.get_all endpoint. :param tasks: List of tasks :type tasks: Sequence[Task] """ _service = "tasks" _action = "get_all" _version = "2.13" _schema = { "definitions": { "artifact": { "properties": { "content_size": { "description": "Raw data length in bytes", "type": "integer", }, "display_data": { "description": "User-defined list of key/value pairs, sorted", "items": {"items": {"type": "string"}, "type": "array"}, "type": "array", }, "hash": { "description": "Hash of entire raw data", "type": "string", }, "key": {"description": "Entry key", "type": "string"}, "mode": { "$ref": "#/definitions/artifact_mode_enum", "description": "System defined input/output indication", }, "timestamp": { "description": "Epoch time when artifact was created", "type": "integer", }, "type": {"description": "System defined type", "type": "string"}, "type_data": { "$ref": "#/definitions/artifact_type_data", "description": "Additional fields defined by the system", }, "uri": {"description": "Raw data location", "type": "string"}, }, "required": ["key", "type"], "type": "object", }, "artifact_mode_enum": { "default": "output", "enum": ["input", "output"], "type": "string", }, "artifact_type_data": { "properties": { "content_type": { "description": "System defined raw data content type", "type": ["string", "null"], }, "data_hash": { "description": "Hash of raw data, without any headers or descriptive parts", "type": ["string", "null"], }, "preview": { "description": "Description or textual data", "type": ["string", "null"], }, }, "type": "object", }, "configuration_item": { "properties": { "description": { "description": "The parameter description. Optional", "type": ["string", "null"], }, "name": { "description": "Name of the parameter. Should be unique", "type": ["string", "null"], }, "type": { "description": "Type of the parameter. Optional", "type": ["string", "null"], }, "value": { "description": "Value of the parameter", "type": ["string", "null"], }, }, "type": "object", }, "execution": { "properties": { "artifacts": { "description": "Task artifacts", "items": {"$ref": "#/definitions/artifact"}, "type": ["array", "null"], }, "framework": { "description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ", "type": ["string", "null"], }, "model_desc": { "additionalProperties": True, "description": "Json object representing the Model descriptors", "type": ["object", "null"], }, "model_labels": { "additionalProperties": {"type": "integer"}, "description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks", "type": ["object", "null"], }, "parameters": { "additionalProperties": True, "description": "Json object containing the Task parameters", "type": ["object", "null"], }, "queue": { "description": "Queue ID where task was queued.", "type": ["string", "null"], }, }, "type": "object", }, "last_metrics_event": { "properties": { "max_value": { "description": "Maximum value reported", "type": ["number", "null"], }, "metric": { "description": "Metric name", "type": ["string", "null"], }, "min_value": { "description": "Minimum value reported", "type": ["number", "null"], }, "value": { "description": "Last value reported", "type": ["number", "null"], }, "variant": { "description": "Variant name", "type": ["string", "null"], }, }, "type": "object", }, "last_metrics_variants": { "additionalProperties": {"$ref": "#/definitions/last_metrics_event"}, "description": "Last metric events, one for each variant hash", "type": "object", }, "output": { "properties": { "destination": { "description": "Storage id. This is where output files will be stored.", "type": ["string", "null"], }, "error": { "description": "Last error text", "type": ["string", "null"], }, "model": {"description": "Model id.", "type": ["string", "null"]}, "result": { "description": "Task result. Values: 'success', 'failure'", "type": ["string", "null"], }, }, "type": "object", }, "params_item": { "properties": { "description": { "description": "The parameter description. Optional", "type": ["string", "null"], }, "name": { "description": "Name of the parameter. The combination of section and name should be unique", "type": ["string", "null"], }, "section": { "description": "Section that the parameter belongs to", "type": ["string", "null"], }, "type": { "description": "Type of the parameter. Optional", "type": ["string", "null"], }, "value": { "description": "Value of the parameter", "type": ["string", "null"], }, }, "type": "object", }, "script": { "properties": { "binary": { "default": "python", "description": "Binary to use when running the script", "type": ["string", "null"], }, "branch": { "description": "Repository branch id If not provided and tag not provided, default repository branch is used.", "type": ["string", "null"], }, "diff": { "description": "Uncommitted changes found in the repository when task was run", "type": ["string", "null"], }, "entry_point": { "description": "Path to execute within the repository", "type": ["string", "null"], }, "repository": { "description": "Name of the repository where the script is located", "type": ["string", "null"], }, "requirements": { "description": "A JSON object containing requirements strings by key", "type": ["object", "null"], }, "tag": { "description": "Repository tag", "type": ["string", "null"], }, "version_num": { "description": "Version (changeset) number. Optional (default is head version) Unused if tag is provided.", "type": ["string", "null"], }, "working_dir": { "description": "Path to the folder from which to run the script Default - root folder of repository", "type": ["string", "null"], }, }, "type": "object", }, "section_params": { "additionalProperties": {"$ref": "#/definitions/params_item"}, "description": "Task section params", "type": "object", }, "task": { "properties": { "active_duration": { "description": "Task duration time (seconds)", "type": ["integer", "null"], }, "comment": { "description": "Free text comment", "type": ["string", "null"], }, "company": { "description": "Company ID", "type": ["string", "null"], }, "completed": { "description": "Task end time (UTC)", "format": "date-time", "type": ["string", "null"], }, "configuration": { "additionalProperties": {"$ref": "#/definitions/configuration_item"}, "description": "Task configuration params", "type": ["object", "null"], }, "container": { "type": "object", "description": "Docker container parameters", "additionalProperties": {"type": ["string", "null"]}, }, "created": { "description": "Task creation time (UTC) ", "format": "date-time", "type": ["string", "null"], }, "execution": { "description": "Task execution params", "oneOf": [ {"$ref": "#/definitions/execution"}, {"type": "null"}, ], }, "hyperparams": { "additionalProperties": {"$ref": "#/definitions/section_params"}, "description": "Task hyper params per section", "type": ["object", "null"], }, "id": {"description": "Task id", "type": ["string", "null"]}, "last_change": { "description": "Last time any update was done to the task", "format": "date-time", "type": ["string", "null"], }, "last_iteration": { "description": "Last iteration reported for this task", "type": ["integer", "null"], }, "last_metrics": { "additionalProperties": {"$ref": "#/definitions/last_metrics_variants"}, "description": "Last metric variants (hash to events), one for each metric hash", "type": ["object", "null"], }, "last_update": { "description": "Last time this task was created, edited, changed or events for this task were reported", "format": "date-time", "type": ["string", "null"], }, "last_worker": { "description": "ID of last worker that handled the task", "type": ["string", "null"], }, "last_worker_report": { "description": "Last time a worker reported while working on this task", "format": "date-time", "type": ["string", "null"], }, "models": { "description": "Task models", "oneOf": [ {"$ref": "#/definitions/task_models"}, {"type": "null"}, ], }, "name": {"description": "Task Name", "type": ["string", "null"]}, "output": { "description": "Task output params", "oneOf": [{"$ref": "#/definitions/output"}, {"type": "null"}], }, "parent": { "description": "Parent task id", "type": ["string", "null"], }, "project": { "description": "Project ID of the project to which this task is assigned", "type": ["string", "null"], }, "published": { "description": "Last status change time", "format": "date-time", "type": ["string", "null"], }, "script": { "description": "Script info", "oneOf": [{"$ref": "#/definitions/script"}, {"type": "null"}], }, "started": { "description": "Task start time (UTC)", "format": "date-time", "type": ["string", "null"], }, "status": { "description": "", "oneOf": [ {"$ref": "#/definitions/task_status_enum"}, {"type": "null"}, ], }, "status_changed": { "description": "Last status change time", "format": "date-time", "type": ["string", "null"], }, "status_message": { "description": "free text string representing info about the status", "type": ["string", "null"], }, "status_reason": { "description": "Reason for last status change", "type": ["string", "null"], }, "system_tags": { "description": "System tags list. This field is reserved for system use, please don't use it.", "items": {"type": "string"}, "type": ["array", "null"], }, "tags": { "description": "User-defined tags list", "items": {"type": "string"}, "type": ["array", "null"], }, "type": { "description": "Type of task. Values: 'training', 'testing'", "oneOf": [ {"$ref": "#/definitions/task_type_enum"}, {"type": "null"}, ], }, "user": { "description": "Associated user id", "type": ["string", "null"], }, }, "type": "object", }, "task_model_item": { "properties": { "model": {"description": "The model ID", "type": "string"}, "name": {"description": "The task model name", "type": "string"}, }, "required": ["name", "model"], "type": "object", }, "task_models": { "properties": { "input": { "description": "The list of task input models", "items": {"$ref": "#/definitions/task_model_item"}, "type": ["array", "null"], }, "output": { "description": "The list of task output models", "items": {"$ref": "#/definitions/task_model_item"}, "type": ["array", "null"], }, }, "type": "object", }, "task_status_enum": { "enum": [ "created", "queued", "in_progress", "stopped", "published", "publishing", "closed", "failed", "completed", "unknown", ], "type": "string", }, "task_type_enum": { "enum": [ "training", "testing", "inference", "data_processing", "application", "monitor", "controller", "optimizer", "service", "qc", "custom", ], "type": "string", }, }, "properties": { "tasks": { "description": "List of tasks", "items": {"$ref": "#/definitions/task"}, "type": ["array", "null"], } }, "type": "object", } def __init__(self, tasks: Optional[List[Any]] = None, **kwargs: Any) -> None: super(GetAllResponse, self).__init__(**kwargs) self.tasks = tasks @schema_property("tasks") def tasks(self) -> Optional[List[Any]]: return self._property_tasks @tasks.setter def tasks(self, value: Optional[List[Any]]) -> None: if value is None: self._property_tasks = None return self.assert_isinstance(value, "tasks", (list, tuple)) if any((isinstance(v, dict) for v in value)): value = [Task.from_dict(v) if isinstance(v, dict) else v for v in value] else: self.assert_isinstance(value, "tasks", Task, is_array=True) self._property_tasks = value
GetAllResponse
python
pytorch__pytorch
test/test_utils.py
{ "start": 22988, "end": 23147 }
class ____(TestCase): def test_smoke(self): info_output = get_pretty_env_info() self.assertTrue(info_output.count("\n") >= 17)
TestCollectEnv
python
run-llama__llama_index
llama-index-integrations/program/llama-index-program-evaporate/llama_index/program/evaporate/df.py
{ "start": 762, "end": 1498 }
class ____(BaseModel): """ Data-frame class. Consists of a `rows` field which is a list of dictionaries, as well as a `columns` field which is a list of column names. """ description: Optional[str] = None columns: List[DataFrameColumn] = Field(..., description="List of column names.") rows: List[DataFrameRow] = Field( ..., description="""List of DataFrameRow objects. Each DataFrameRow contains \ valuesin order of the data frame column.""", ) def to_df(self) -> pd.DataFrame: """To dataframe.""" return pd.DataFrame( [row.row_values for row in self.rows], columns=[col.column_name for col in self.columns], )
DataFrame
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/autoVariance3.py
{ "start": 662, "end": 972 }
class ____(Generic[T]): def __getitem__(self, index: int) -> T: ... def __iter__(self) -> Iterator[T]: ... vco1_1: ShouldBeCovariant1[float] = ShouldBeCovariant1[int]() # This should generate an error based on variance. vco1_2: ShouldBeCovariant1[int] = ShouldBeCovariant1[float]()
ShouldBeCovariant1
python
django-extensions__django-extensions
django_extensions/db/fields/__init__.py
{ "start": 15967, "end": 16818 }
class ____(CreationDateTimeField): """ ModificationDateTimeField By default, sets editable=False, blank=True, auto_now=True Sets value to now every time the object is saved. """ def __init__(self, *args, **kwargs): kwargs.setdefault("auto_now", True) DateTimeField.__init__(self, *args, **kwargs) def get_internal_type(self): return "DateTimeField" def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.auto_now is not False: kwargs["auto_now"] = True return name, path, args, kwargs def pre_save(self, model_instance, add): if not getattr(model_instance, "update_modified", True): return getattr(model_instance, self.attname) return super().pre_save(model_instance, add)
ModificationDateTimeField
python
donnemartin__system-design-primer
solutions/object_oriented_design/call_center/call_center.py
{ "start": 1253, "end": 1502 }
class ____(Employee): def __init__(self, employee_id, name): super(Operator, self).__init__(employee_id, name, Rank.SUPERVISOR) def escalate_call(self): self.call.level = Rank.DIRECTOR self._escalate_call()
Supervisor
python
django-extensions__django-extensions
tests/testapp/models.py
{ "start": 7555, "end": 7976 }
class ____(models.Model): def custom_slug_one(self, content): return content.upper() def custom_slug_two(content): return content.lower() slugify_function = custom_slug_one title = models.CharField(max_length=42) slug = AutoSlugField(populate_from="title", slugify_function=custom_slug_two) class Meta: app_label = "django_extensions"
CustomFuncPrecedenceSluggedTestModel
python
kamyu104__LeetCode-Solutions
Python/maximum-score-words-formed-by-letters.py
{ "start": 56, "end": 1132 }
class ____(object): def maxScoreWords(self, words, letters, score): """ :type words: List[str] :type letters: List[str] :type score: List[int] :rtype: int """ def backtracking(words, word_scores, word_counts, curr, curr_score, letter_count, result): result[0] = max(result[0], curr_score) for i in xrange(curr, len(words)): if any(letter_count[c] < word_counts[i][c] for c in word_counts[i]): continue backtracking(words, word_scores, word_counts, i+1, curr_score+word_scores[i], letter_count-word_counts[i], result) letter_count = collections.Counter(letters) word_counts = map(collections.Counter, words) word_scores = [sum(score[ord(c)-ord('a')] for c in words[i]) for i in xrange(len(words))] result = [0] backtracking(words, word_scores, word_counts, 0, 0, letter_count, result) return result[0]
Solution
python
doocs__leetcode
solution/3600-3699/3660.Jump Game IX/Solution.py
{ "start": 0, "end": 426 }
class ____: def maxValue(self, nums: List[int]) -> List[int]: n = len(nums) ans = [0] * n pre_max = [nums[0]] * n for i in range(1, n): pre_max[i] = max(pre_max[i - 1], nums[i]) suf_min = inf for i in range(n - 1, -1, -1): ans[i] = ans[i + 1] if pre_max[i] > suf_min else pre_max[i] suf_min = min(suf_min, nums[i]) return ans
Solution
python
pypa__setuptools
setuptools/tests/test_scripts.py
{ "start": 34, "end": 379 }
class ____: def test_header(self): hdr = _scripts.WindowsScriptWriter.get_header('') assert hdr.startswith('#!') assert hdr.endswith('\n') hdr = hdr.lstrip('#!') hdr = hdr.rstrip('\n') # header should not start with an escaped quote assert not hdr.startswith('\\"')
TestWindowsScriptWriter
python
mlflow__mlflow
mlflow/genai/evaluation/constant.py
{ "start": 0, "end": 767 }
class ____: """ Expectation column names that are used by Agent Evaluation. Ref: https://docs.databricks.com/aws/en/generative-ai/agent-evaluation/evaluation-schema """ EXPECTED_RESPONSE = "expected_response" EXPECTED_RETRIEVED_CONTEXT = "expected_retrieved_context" EXPECTED_FACTS = "expected_facts" GUIDELINES = "guidelines" @classmethod def get_all(cls) -> set[str]: return { cls.EXPECTED_RESPONSE, cls.EXPECTED_RETRIEVED_CONTEXT, cls.EXPECTED_FACTS, cls.GUIDELINES, } # A column name for storing custom expectations dictionary in Agent Evaluation. AGENT_EVAL_CUSTOM_EXPECTATION_KEY = "custom_expected" # Input dataset column names
AgentEvaluationReserverKey
python
huggingface__transformers
src/transformers/models/mistral3/modular_mistral3.py
{ "start": 2785, "end": 4065 }
class ____(nn.Module): def __init__(self, config: Mistral3Config): super().__init__() self.norm = Mistral3RMSNorm(config.vision_config.hidden_size, eps=config.text_config.rms_norm_eps) self.patch_merger = Mistral3PatchMerger(config) # We have hidden_size * the number of vision feature layers num_feature_layers = 1 if isinstance(config.vision_feature_layer, int) else len(config.vision_feature_layer) self.linear_1 = nn.Linear( config.vision_config.hidden_size * num_feature_layers, config.text_config.hidden_size, bias=config.multimodal_projector_bias, ) self.act = ACT2FN[config.projector_hidden_act] self.linear_2 = nn.Linear( config.text_config.hidden_size, config.text_config.hidden_size, bias=config.multimodal_projector_bias ) def forward(self, image_features: torch.Tensor, image_sizes: torch.Tensor): image_features = self.norm(image_features) image_features = self.patch_merger(image_features, image_sizes) hidden_states = self.linear_1(image_features) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states
Mistral3MultiModalProjector
python
getsentry__sentry
src/sentry/workflow_engine/models/workflow.py
{ "start": 1623, "end": 6370 }
class ____(DefaultFieldsModel, OwnerModel, JSONConfigBase): """ A workflow is a way to execute actions in a specified order. Workflows are initiated after detectors have been processed, driven by changes to their state. """ __relocation_scope__ = RelocationScope.Organization objects: ClassVar[WorkflowManager] = WorkflowManager() objects_for_deletion: ClassVar[BaseManager] = BaseManager() name = models.CharField(max_length=256) organization = FlexibleForeignKey("sentry.Organization") # If the workflow is not enabled, it will not be evaluated / invoke actions. This is how we "snooze" a workflow enabled = models.BooleanField(db_default=True) # The workflow's status - used for tracking deletion state status = models.SmallIntegerField(db_default=ObjectStatus.ACTIVE) # Required as the 'when' condition for the workflow, this evaluates states emitted from the detectors when_condition_group = FlexibleForeignKey( "workflow_engine.DataConditionGroup", null=True, blank=True, db_index=False ) environment = FlexibleForeignKey("sentry.Environment", null=True, blank=True) created_by_id = HybridCloudForeignKey( settings.AUTH_USER_MODEL, null=True, blank=True, on_delete="SET_NULL" ) DEFAULT_FREQUENCY = 30 config_schema = { "$schema": "https://json-schema.org/draft/2020-12/schema", "title": "Workflow Schema", "type": "object", "properties": { "frequency": { "description": "How often the workflow should fire for a Group (minutes)", "type": "integer", "minimum": 0, }, }, "additionalProperties": False, } __repr__ = sane_repr("organization_id") class Meta: app_label = "workflow_engine" db_table = "workflow_engine_workflow" constraints = [ models.UniqueConstraint( fields=["when_condition_group_id"], name="workflow_engine_workflow_when_condition_group_id_11d9ba05_uniq", ), ] def get_audit_log_data(self) -> dict[str, Any]: return {"name": self.name} def get_snapshot(self) -> WorkflowSnapshot: when_condition_group = None if self.when_condition_group: when_condition_group = self.when_condition_group.get_snapshot() environment_id = None if self.environment: environment_id = self.environment.id return { "id": self.id, "enabled": self.enabled, "environment_id": environment_id, "status": self.status, "triggers": when_condition_group, } def evaluate_trigger_conditions( self, event_data: WorkflowEventData, when_data_conditions: list[DataCondition] | None = None ) -> tuple[TriggerResult, list[DataCondition]]: """ Evaluate the conditions for the workflow trigger and return if the evaluation was successful. If there aren't any workflow trigger conditions, the workflow is considered triggered. """ # TODO - investigate circular import issue from sentry.workflow_engine.processors.data_condition_group import ( process_data_condition_group, ) if self.when_condition_group_id is None: return TriggerResult.TRUE, [] workflow_event_data = replace(event_data, workflow_env=self.environment) try: group = DataConditionGroup.objects.get_from_cache(id=self.when_condition_group_id) except DataConditionGroup.DoesNotExist: # This isn't expected under normal conditions, but weird things can happen in the # midst of deletions and migrations. logger.exception( "DataConditionGroup does not exist", extra={"id": self.when_condition_group_id}, ) return TriggerResult(False, ConditionError(msg="DataConditionGroup does not exist")), [] group_evaluation, remaining_conditions = process_data_condition_group( group, workflow_event_data, when_data_conditions ) return group_evaluation.logic_result, remaining_conditions def get_slow_conditions(workflow: Workflow) -> list[DataCondition]: if not workflow.when_condition_group: return [] slow_conditions = [ condition for condition in workflow.when_condition_group.conditions.all() if is_slow_condition(condition) ] return slow_conditions @receiver(pre_save, sender=Workflow) def enforce_config_schema(sender, instance: Workflow, **kwargs): instance.validate_config(instance.config_schema)
Workflow
python
getsentry__sentry
src/sentry/uptime/endpoints/organization_uptime_summary.py
{ "start": 1827, "end": 10422 }
class ____(OrganizationEndpoint): publish_status = { "GET": ApiPublishStatus.EXPERIMENTAL, } owner = ApiOwner.CRONS permission_classes = (OrganizationPermission,) def get(self, request: Request, organization: Organization) -> Response: start, end = get_date_range_from_params(request.GET) projects = self.get_projects(request, organization, include_all_accessible=True) uptime_detector_ids = request.GET.getlist("uptimeDetectorId") if not uptime_detector_ids: return self.respond( "Uptime detector ids must be provided", status=400, ) if len(uptime_detector_ids) > MAX_UPTIME_SUBSCRIPTION_IDS: return self.respond( f"Too many uptime detector ids provided. Maximum is {MAX_UPTIME_SUBSCRIPTION_IDS}", status=400, ) try: subscription_id_to_original_id, subscription_ids = ( authorize_and_map_uptime_detector_subscription_ids(uptime_detector_ids, projects) ) except ValueError: return self.respond("Invalid uptime detector ids provided", status=400) try: eap_response = self._make_eap_request( organization, projects, subscription_ids, start, end, ) formatted_response = self._format_response(eap_response) except Exception: logger.exception("Error making EAP RPC request for uptime check summary") return self.respond("error making request", status=400) # Map the response back to the original detector IDs mapped_response = self._map_response_to_original_ids( subscription_id_to_original_id, formatted_response ) # Serialize the UptimeSummary objects serialized_response = { project_id: serialize(stats, request.user) for project_id, stats in mapped_response.items() } return self.respond(serialized_response) def _make_eap_request( self, organization: Organization, projects: list[Project], subscription_ids: list[str], start: datetime, end: datetime, ) -> TraceItemTableResponse: start_timestamp = Timestamp() start_timestamp.FromDatetime(start) end_timestamp = Timestamp() end_timestamp.FromDatetime(end) subscription_attribute_key = AttributeKey( name="subscription_id", type=AttributeKey.Type.TYPE_STRING, ) query_filter = TraceItemFilter( comparison_filter=ComparisonFilter( key=subscription_attribute_key, op=ComparisonFilter.OP_IN, value=AttributeValue(val_str_array=StrArray(values=subscription_ids)), ) ) def failure_filter(incident_status: IncidentStatus) -> TraceItemFilter: status_filter = TraceItemFilter( comparison_filter=ComparisonFilter( key=AttributeKey(name="check_status", type=AttributeKey.Type.TYPE_STRING), op=ComparisonFilter.OP_EQUALS, value=AttributeValue(val_str=CHECKSTATUS_FAILURE), ) ) incident_filter = TraceItemFilter( comparison_filter=ComparisonFilter( key=AttributeKey(name="incident_status", type=AttributeKey.Type.TYPE_INT), op=ComparisonFilter.OP_EQUALS, value=AttributeValue(val_int=incident_status.value), ) ) return TraceItemFilter(and_filter=AndFilter(filters=[status_filter, incident_filter])) columns: list[Column] = [ Column(label="uptime_subscription_id", key=subscription_attribute_key), Column( label="total_checks", aggregation=AttributeAggregation( aggregate=Function.FUNCTION_COUNT, key=subscription_attribute_key, label="count()", ), ), Column( label="failed_checks", conditional_aggregation=AttributeConditionalAggregation( aggregate=Function.FUNCTION_COUNT, key=subscription_attribute_key, filter=failure_filter(incident_status=IncidentStatus.NO_INCIDENT), ), ), Column( label="downtime_checks", conditional_aggregation=AttributeConditionalAggregation( aggregate=Function.FUNCTION_COUNT, key=subscription_attribute_key, filter=failure_filter(incident_status=IncidentStatus.IN_INCIDENT), ), ), Column( label="missed_window_checks", conditional_aggregation=AttributeConditionalAggregation( aggregate=Function.FUNCTION_COUNT, key=subscription_attribute_key, filter=TraceItemFilter( comparison_filter=ComparisonFilter( key=AttributeKey( name="check_status", type=AttributeKey.Type.TYPE_STRING ), op=ComparisonFilter.OP_EQUALS, value=AttributeValue(val_str=CHECKSTATUS_MISSED_WINDOW), ) ), ), ), Column( label="avg_duration_us", aggregation=AttributeAggregation( aggregate=Function.FUNCTION_AVG, key=AttributeKey(name="check_duration_us", type=AttributeKey.Type.TYPE_INT), label="avg(check_duration_us)", ), ), ] request = TraceItemTableRequest( meta=RequestMeta( organization_id=organization.id, project_ids=[project.id for project in projects], trace_item_type=TraceItemType.TRACE_ITEM_TYPE_UPTIME_RESULT, start_timestamp=start_timestamp, end_timestamp=end_timestamp, downsampled_storage_config=DownsampledStorageConfig( mode=DownsampledStorageConfig.MODE_HIGHEST_ACCURACY ), ), group_by=[subscription_attribute_key], filter=query_filter, columns=columns, ) responses = table_rpc([request]) assert len(responses) == 1 return responses[0] def _format_response(self, response: TraceItemTableResponse) -> dict[str, UptimeSummary]: """ Formats the response from the EAP RPC request into a dictionary mapping subscription ids to UptimeSummary """ column_values = response.column_values column_names = [cv.attribute_name for cv in column_values] formatted_data: dict[str, UptimeSummary] = {} if not column_values: return {} for row_idx in range(len(column_values[0].results)): row_dict: dict[str, AttributeValue] = { col_name: column_values[col_idx].results[row_idx] for col_idx, col_name in enumerate(column_names) } summary_stats = UptimeSummary( total_checks=int(row_dict["total_checks"].val_double), failed_checks=int(row_dict["failed_checks"].val_double), downtime_checks=int(row_dict["downtime_checks"].val_double), missed_window_checks=int(row_dict["missed_window_checks"].val_double), avg_duration_us=row_dict["avg_duration_us"].val_double, ) subscription_id = row_dict["uptime_subscription_id"].val_str formatted_data[subscription_id] = summary_stats return formatted_data def _map_response_to_original_ids( self, subscription_id_to_original_id: dict[str, int], formatted_response: dict[str, UptimeSummary], ) -> dict[int, UptimeSummary]: """ Map the response back to the original detector IDs """ return { subscription_id_to_original_id[subscription_id]: data for subscription_id, data in formatted_response.items() }
OrganizationUptimeSummaryEndpoint
python
kamyu104__LeetCode-Solutions
Python/minimum-number-of-flips-to-make-binary-grid-palindromic-ii.py
{ "start": 49, "end": 895 }
class ____(object): def minFlips(self, grid): """ :type grid: List[List[int]] :rtype: int """ m, n = len(grid), len(grid[0]) result = 0 for i in xrange(m//2): for j in xrange(n//2): c = grid[i][j]+grid[i][~j]+grid[~i][j]+grid[~i][~j] result += min(c, 4-c) diff = cnt1 = 0 if m%2: for j in xrange(n//2): diff += grid[m//2][j]^grid[m//2][~j] cnt1 += grid[m//2][j]+grid[m//2][~j] if n%2: for i in xrange(m//2): diff += grid[i][n//2]^grid[~i][n//2] cnt1 += grid[i][n//2]+grid[~i][n//2] if m%2 and n%2: result += grid[m//2][n//2] if diff == 0: result += (-cnt1)%4 return result+diff
Solution
python
patrick-kidger__equinox
equinox/nn/_pool.py
{ "start": 19094, "end": 19450 }
class ____(AdaptivePool): """Adaptive three-dimensional downsampling using average for the target shape.""" def __init__(self, target_shape: int | Sequence[int]): """**Arguments:** - `target_shape`: The target output shape. """ super().__init__(target_shape, num_spatial_dims=3, operation=jnp.mean)
AdaptiveAvgPool3d
python
openai__openai-python
src/openai/types/realtime/realtime_truncation_retention_ratio_param.py
{ "start": 252, "end": 687 }
class ____(TypedDict, total=False): post_instructions: int """ Maximum tokens allowed in the conversation after instructions (which including tool definitions). For example, setting this to 5,000 would mean that truncation would occur when the conversation exceeds 5,000 tokens after instructions. This cannot be higher than the model's context window size minus the maximum output tokens. """
TokenLimits
python
google__flatbuffers
python/flatbuffers/flexbuffers.py
{ "start": 9825, "end": 10257 }
class ____(Object): """Base class for all data accessors which need to read encoded size.""" __slots__ = ('_size',) def __init__(self, buf, byte_width, size=0): super().__init__(buf, byte_width) if size == 0: self._size = _Unpack(U, self.SizeBytes) else: self._size = size @property def SizeBytes(self): return self._buf[-self._byte_width : 0] def __len__(self): return self._size
Sized
python
readthedocs__readthedocs.org
readthedocs/proxito/views/serve.py
{ "start": 15584, "end": 15662 }
class ____(SettingsOverrideObject): _default_class = ServeDocsBase
ServeDocs
python
apache__airflow
providers/apache/beam/tests/unit/apache/beam/operators/test_beam.py
{ "start": 2675, "end": 5179 }
class ____: @pytest.fixture(autouse=True) def setup_test_cases(self): self.default_op_kwargs = {"task_id": TASK_ID, "runner": DEFAULT_RUNNER} def test_init(self, default_options, pipeline_options): op = BeamBasePipelineOperator( **self.default_op_kwargs, default_pipeline_options=copy.deepcopy(default_options), pipeline_options=copy.deepcopy(pipeline_options), dataflow_config={}, ) # Should not change into the operator constructor, it might define in templated_fields assert op.default_pipeline_options == default_options assert op.pipeline_options == pipeline_options assert op.dataflow_config == {} def test_async_execute_should_throw_exception(self): """Tests that an AirflowException is raised in case of error event""" op = BeamBasePipelineOperator(**self.default_op_kwargs) with pytest.raises(AirflowException): op.execute_complete( context=mock.MagicMock(), event={"status": "error", "message": "test failure message"} ) def test_async_execute_logging_should_execute_successfully(self, caplog): """Asserts that logging occurs as expected""" op = BeamBasePipelineOperator(**self.default_op_kwargs) op.execute_complete( context=mock.MagicMock(), event={ "status": "success", "message": "Pipeline has finished SUCCESSFULLY", }, ) assert f"{TASK_ID} completed with response Pipeline has finished SUCCESSFULLY" in caplog.text def test_early_dataflow_id_xcom_push(self, default_options, pipeline_options): op = BeamBasePipelineOperator( **self.default_op_kwargs, default_pipeline_options=copy.deepcopy(default_options), pipeline_options=copy.deepcopy(pipeline_options), dataflow_config={}, ) sample_df_job_id = "sample_df_job_id_value" # Mock the task instance with xcom_push method mock_ti = MagicMock() op._execute_context = {"ti": mock_ti} assert op.dataflow_job_id is None op.dataflow_job_id = sample_df_job_id mock_ti.xcom_push.assert_called_once_with(key="dataflow_job_id", value=sample_df_job_id) mock_ti.xcom_push.reset_mock() op.dataflow_job_id = "sample_df_job_same_value_id" mock_ti.xcom_push.assert_not_called()
TestBeamBasePipelineOperator
python
django__django
tests/delete_regress/models.py
{ "start": 2219, "end": 2332 }
class ____(models.Model): version = models.ForeignKey(Version, models.SET_NULL, blank=True, null=True)
Location
python
ApeWorX__ape
src/ape/cli/paramtype.py
{ "start": 397, "end": 848 }
class ____(click.ParamType): """ A type that accepts a raw-JSON str and loads it into a dictionary. """ def convert(self, value, param, ctx): if not value: return {} elif isinstance(value, str): try: return json.loads(value) except ValueError as err: self.fail(f"Invalid JSON string: {err}", param, ctx) return value # Good already.
JSON
python
huggingface__transformers
tests/models/blt/test_modeling_blt.py
{ "start": 1292, "end": 6375 }
class ____(CausalLMModelTester): if is_torch_available(): base_model_class = BltModel def __init__( self, parent, ignore_index=-100, seq_length=7, is_training=True, ): super().__init__(parent) self.parent = parent self.ignore_index = ignore_index self.seq_length = seq_length self.is_training = is_training self.batch_size = 3 # Common parameters for all configs self.hidden_size = 16 self.num_hidden_layers = 1 self.num_attention_heads = 2 self.num_key_value_heads = 2 self.intermediate_size = 32 self.hidden_act = "silu" self.max_position_embeddings = 32 self.vocab_size = 32 self.rope_theta = 500000.0 self.rope_parameters = {"rope_type": "default"} self.rms_norm_eps = 1e-5 self.dropout = 0.0 self.encoder_hash_byte_group_size = [2, 3] self.encoder_hash_byte_group_vocab = 64 self.encoder_hash_byte_group_nb_functions = 1 # Common parameters for all configs self.patcher_config = { "hidden_size": self.hidden_size, "num_hidden_layers": self.num_hidden_layers, "num_attention_heads": self.num_attention_heads, "num_key_value_heads": self.num_key_value_heads, "intermediate_size": self.intermediate_size, "max_position_embeddings": self.max_position_embeddings, "rope_theta": self.rope_theta, "rope_parameters": self.rope_parameters, "hidden_act": self.hidden_act, "rms_norm_eps": self.rms_norm_eps, "dropout": self.dropout, } self.encoder_config = { "hidden_size": self.hidden_size, "num_hidden_layers": self.num_hidden_layers, "num_attention_heads": self.num_attention_heads, "num_key_value_heads": self.num_key_value_heads, "intermediate_size": self.intermediate_size, "max_position_embeddings": self.max_position_embeddings, "rope_theta": self.rope_theta, "rope_parameters": self.rope_parameters, "hidden_act": self.hidden_act, "rms_norm_eps": self.rms_norm_eps, "dropout": self.dropout, } self.decoder_config = { "vocab_size": self.vocab_size, "hidden_size": self.hidden_size, "hidden_size_global": self.hidden_size * 2, # Must match global transformer output size "num_hidden_layers": self.num_hidden_layers, "num_attention_heads": self.num_attention_heads, "num_key_value_heads": self.num_key_value_heads, "intermediate_size": self.intermediate_size, "max_position_embeddings": self.max_position_embeddings, "rope_theta": self.rope_theta, "rope_parameters": self.rope_parameters, "hidden_act": self.hidden_act, "rms_norm_eps": self.rms_norm_eps, "dropout": self.dropout, } self.global_config = { "hidden_size": self.hidden_size * 2, # Double the hidden size for global transformer "num_hidden_layers": self.num_hidden_layers, "num_attention_heads": self.num_attention_heads, "num_key_value_heads": self.num_key_value_heads, "intermediate_size": self.intermediate_size, "max_position_embeddings": self.max_position_embeddings, "rope_theta": self.rope_theta, "rope_parameters": self.rope_parameters, "hidden_act": self.hidden_act, "rms_norm_eps": self.rms_norm_eps, "dropout": self.dropout, } self.num_hidden_layers = self.encoder_config["num_hidden_layers"] def get_config(self): config = BltConfig( vocab_size=self.vocab_size, max_position_embeddings=self.max_position_embeddings, patch_in_forward=False, # Disable patching for tests patch_size=4, patching_mode="entropy", patching_threshold=1.335442066192627, patching_batch_size=1, max_patch_length=None, cross_attn_k=2, encoder_hash_byte_group_size=self.encoder_hash_byte_group_size, encoder_hash_byte_group_vocab=self.encoder_hash_byte_group_vocab, encoder_hash_byte_group_nb_functions=self.encoder_hash_byte_group_nb_functions, patcher_config=self.patcher_config, encoder_config=self.encoder_config, decoder_config=self.decoder_config, global_config=self.global_config, rope_parameters=self.rope_parameters, tie_word_embeddings=False, ) config.num_attention_heads = config.decoder_config.num_attention_heads config.num_hidden_layers = config.encoder_config.num_hidden_layers config.hidden_size = config.decoder_config.hidden_size return config @require_torch
BltModelTester
python
networkx__networkx
networkx/algorithms/tests/test_regular.py
{ "start": 1624, "end": 2437 }
class ____: @pytest.mark.parametrize( "graph,expected", [ (nx.cycle_graph(4), True), (nx.complete_graph(5), True), (nx.path_graph(5), False), (nx.lollipop_graph(5, 5), False), (nx.cycle_graph(3, create_using=nx.DiGraph), True), (nx.Graph([(0, 1)]), True), (nx.DiGraph([(0, 1)]), False), (nx.MultiGraph([(0, 1), (0, 1)]), True), (nx.MultiDiGraph([(0, 1), (0, 1)]), False), ], ) def test_is_regular(self, graph, expected): assert reg.is_regular(graph) == expected def test_is_regular_empty_graph_raises(self): G = nx.Graph() with pytest.raises(nx.NetworkXPointlessConcept, match="Graph has no nodes"): nx.is_regular(G)
TestIsRegular
python
tensorflow__tensorflow
tensorflow/python/distribute/step_fn.py
{ "start": 1369, "end": 1831 }
class ____(Step): """Step with a standard implementation of input handling. Args: dataset_fn: a function that returns a tf.data Dataset that produces the input for the model. """ def __init__(self, dataset_fn, distribution): super(StandardInputStep, self).__init__(distribution) self._iterator = distribution.make_input_fn_iterator(lambda _: dataset_fn()) def initialize(self): return self._iterator.initializer
StandardInputStep