language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
keras-team__keras
keras/src/ops/numpy.py
{ "start": 194033, "end": 195505 }
class ____(Operation): def __init__(self, axis=None, *, name=None): super().__init__(name=name) self.axis = axis def call(self, x, indices): return backend.numpy.take(x, indices, axis=self.axis) def compute_output_spec(self, x, indices): x_shape = list(x.shape) if isinstance(indices, KerasTensor): indices_shape = list(indices.shape) ragged = indices.ragged else: indices_shape = list(getattr(np.array(indices), "shape", [])) ragged = False if self.axis is None: return KerasTensor(indices_shape, dtype=x.dtype) # make sure axis is non-negative axis = len(x_shape) + self.axis if self.axis < 0 else self.axis output_shape = x_shape[:axis] + indices_shape + x_shape[axis + 1 :] return KerasTensor(output_shape, dtype=x.dtype, ragged=ragged) @keras_export(["keras.ops.take", "keras.ops.numpy.take"]) def take(x, indices, axis=None): """Take elements from a tensor along an axis. Args: x: Source tensor. indices: The indices of the values to extract. axis: The axis over which to select values. By default, the flattened input tensor is used. Returns: The corresponding tensor of values. """ if any_symbolic_tensors((x, indices)): return Take(axis=axis).symbolic_call(x, indices) return backend.numpy.take(x, indices, axis=axis)
Take
python
PyCQA__pylint
tests/functional/u/unused/unused_private_member.py
{ "start": 6865, "end": 7484 }
class ____: __instance = None @classmethod # Use class method here def instance(cls): if cls.__instance is None: cls() return cls.__instance def __init__(self): try: FalsePositive4681b.__instance = 42 # This should be fine except Exception: # pylint: disable=broad-except print("Error") FalsePositive4681b.__instance = False # This should be fine # https://github.com/pylint-dev/pylint/issues/4849 # Accessing private static methods from classmethods via `cls` should not result in a # false positive
FalsePositive4681b
python
encode__django-rest-framework
tests/test_renderers.py
{ "start": 21344, "end": 22771 }
class ____(TestCase): """ Test rendering ChoiceField with HTMLFormRenderer. """ def setUp(self): choices = ((1, 'Option1'), (2, 'Option2'), (12, 'Option12')) class TestSerializer(serializers.Serializer): test_field = serializers.ChoiceField(choices=choices, initial=2) self.TestSerializer = TestSerializer self.renderer = HTMLFormRenderer() def test_render_initial_option(self): serializer = self.TestSerializer() result = self.renderer.render(serializer.data) self.assertIsInstance(result, SafeText) self.assertInHTML('<option value="2" selected>Option2</option>', result) self.assertInHTML('<option value="1">Option1</option>', result) self.assertInHTML('<option value="12">Option12</option>', result) def test_render_selected_option(self): serializer = self.TestSerializer(data={'test_field': '12'}) serializer.is_valid() result = self.renderer.render(serializer.data) self.assertIsInstance(result, SafeText) self.assertInHTML('<option value="12" selected>Option12</option>', result) self.assertInHTML('<option value="1">Option1</option>', result) self.assertInHTML('<option value="2">Option2</option>', result)
TestChoiceFieldHTMLFormRenderer
python
numpy__numpy
numpy/_core/tests/test_shape_base.py
{ "start": 1813, "end": 3034 }
class ____: def test_0D_array(self): a = array(1) b = array(2) res = [atleast_2d(a), atleast_2d(b)] desired = [array([[1]]), array([[2]])] assert_array_equal(res, desired) def test_1D_array(self): a = array([1, 2]) b = array([2, 3]) res = [atleast_2d(a), atleast_2d(b)] desired = [array([[1, 2]]), array([[2, 3]])] assert_array_equal(res, desired) def test_2D_array(self): a = array([[1, 2], [1, 2]]) b = array([[2, 3], [2, 3]]) res = [atleast_2d(a), atleast_2d(b)] desired = [a, b] assert_array_equal(res, desired) def test_3D_array(self): a = array([[1, 2], [1, 2]]) b = array([[2, 3], [2, 3]]) a = array([a, a]) b = array([b, b]) res = [atleast_2d(a), atleast_2d(b)] desired = [a, b] assert_array_equal(res, desired) def test_r2array(self): """ Test to make sure equivalent Travis O's r2array function """ assert_(atleast_2d(3).shape == (1, 1)) assert_(atleast_2d([3j, 1]).shape == (1, 2)) assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
TestAtleast2d
python
kamyu104__LeetCode-Solutions
Python/cutting-ribbons.py
{ "start": 54, "end": 571 }
class ____(object): def maxLength(self, ribbons, k): """ :type ribbons: List[int] :type k: int :rtype: int """ def check(ribbons, k, s): return reduce(lambda total,x: total+x//s, ribbons, 0) >= k left, right = 1, sum(ribbons)//k while left <= right: mid = left + (right-left)//2 if not check(ribbons, k, mid): right = mid-1 else: left = mid+1 return right
Solution
python
spack__spack
lib/spack/spack/vendor/macholib/mach_o.py
{ "start": 31731, "end": 31938 }
class ____(Structure): _fields_ = (("offset", p_uint32), ("size", p_uint32)) def describe(self): s = {} s["offset"] = int(self.offset) s["size"] = int(self.size)
symseg_command
python
PyCQA__pylint
tests/functional/c/ctor_arguments.py
{ "start": 503, "end": 544 }
class ____(Class1Arg): pass
Subclass1Arg
python
kamyu104__LeetCode-Solutions
Python/leftmost-column-with-at-least-a-one.py
{ "start": 145, "end": 554 }
class ____(object): def leftMostColumnWithOne(self, binaryMatrix): """ :type binaryMatrix: BinaryMatrix :rtype: int """ m, n = binaryMatrix.dimensions() r, c = 0, n-1 while r < m and c >= 0: if not binaryMatrix.get(r, c): r += 1 else: c -= 1 return c+1 if c+1 != n else -1
Solution
python
huggingface__transformers
src/transformers/models/modernbert/modeling_modernbert.py
{ "start": 8143, "end": 9098 }
class ____(nn.Module): """Applies the GLU at the end of each ModernBERT layer. Compared to the default BERT architecture, this block replaces :class:`~transformers.model.bert.modeling_bert.BertIntermediate` and :class:`~transformers.model.bert.modeling_bert.SelfOutput` with a single module that has similar functionality. """ def __init__(self, config: ModernBertConfig): super().__init__() self.config = config self.Wi = nn.Linear(config.hidden_size, int(config.intermediate_size) * 2, bias=config.mlp_bias) self.act = ACT2FN[config.hidden_activation] self.drop = nn.Dropout(config.mlp_dropout) self.Wo = nn.Linear(config.intermediate_size, config.hidden_size, bias=config.mlp_bias) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: input, gate = self.Wi(hidden_states).chunk(2, dim=-1) return self.Wo(self.drop(self.act(input) * gate))
ModernBertMLP
python
scrapy__scrapy
scrapy/core/downloader/handlers/http11.py
{ "start": 9752, "end": 11837 }
class ____(Agent): """An agent that uses a L{TunnelingTCP4ClientEndpoint} to make HTTPS downloads. It may look strange that we have chosen to subclass Agent and not ProxyAgent but consider that after the tunnel is opened the proxy is transparent to the client; thus the agent should behave like there is no proxy involved. """ def __init__( self, *, reactor: ReactorBase, proxyConf: tuple[str, int, bytes | None], contextFactory: IPolicyForHTTPS, connectTimeout: float | None = None, bindAddress: bytes | None = None, pool: HTTPConnectionPool | None = None, ): super().__init__(reactor, contextFactory, connectTimeout, bindAddress, pool) self._proxyConf: tuple[str, int, bytes | None] = proxyConf self._contextFactory: IPolicyForHTTPS = contextFactory def _getEndpoint(self, uri: URI) -> TunnelingTCP4ClientEndpoint: return TunnelingTCP4ClientEndpoint( reactor=self._reactor, host=uri.host, port=uri.port, proxyConf=self._proxyConf, contextFactory=self._contextFactory, timeout=self._endpointFactory._connectTimeout, bindAddress=self._endpointFactory._bindAddress, ) def _requestWithEndpoint( self, key: Any, endpoint: TCP4ClientEndpoint, method: bytes, parsedURI: URI, headers: TxHeaders | None, bodyProducer: IBodyProducer | None, requestPath: bytes, ) -> Deferred[IResponse]: # proxy host and port are required for HTTP pool `key` # otherwise, same remote host connection request could reuse # a cached tunneled connection to a different proxy key += self._proxyConf return super()._requestWithEndpoint( key=key, endpoint=endpoint, method=method, parsedURI=parsedURI, headers=headers, bodyProducer=bodyProducer, requestPath=requestPath, )
TunnelingAgent
python
encode__httpx
httpx/_transports/default.py
{ "start": 8667, "end": 9161 }
class ____(AsyncByteStream): def __init__(self, httpcore_stream: typing.AsyncIterable[bytes]) -> None: self._httpcore_stream = httpcore_stream async def __aiter__(self) -> typing.AsyncIterator[bytes]: with map_httpcore_exceptions(): async for part in self._httpcore_stream: yield part async def aclose(self) -> None: if hasattr(self._httpcore_stream, "aclose"): await self._httpcore_stream.aclose()
AsyncResponseStream
python
jazzband__django-formtools
formtools/wizard/views.py
{ "start": 23404, "end": 23589 }
class ____(WizardView): """ A WizardView with pre-configured SessionStorage backend. """ storage_name = 'formtools.wizard.storage.session.SessionStorage'
SessionWizardView
python
scrapy__scrapy
tests/test_settings/__init__.py
{ "start": 15605, "end": 21777 }
class ____: def setup_method(self): self.settings = Settings() @mock.patch.dict("scrapy.settings.SETTINGS_PRIORITIES", {"default": 10}) @mock.patch("scrapy.settings.default_settings", default_settings) def test_initial_defaults(self): settings = Settings() assert len(settings.attributes) == 2 assert "TEST_DEFAULT" in settings.attributes attr = settings.attributes["TEST_DEFAULT"] assert isinstance(attr, SettingsAttribute) assert attr.value == "defvalue" assert attr.priority == 10 @mock.patch.dict("scrapy.settings.SETTINGS_PRIORITIES", {}) @mock.patch("scrapy.settings.default_settings", {}) def test_initial_values(self): settings = Settings({"TEST_OPTION": "value"}, 10) assert len(settings.attributes) == 1 assert "TEST_OPTION" in settings.attributes attr = settings.attributes["TEST_OPTION"] assert isinstance(attr, SettingsAttribute) assert attr.value == "value" assert attr.priority == 10 @mock.patch("scrapy.settings.default_settings", default_settings) def test_autopromote_dicts(self): settings = Settings() mydict = settings.get("TEST_DICT") assert isinstance(mydict, BaseSettings) assert "key" in mydict assert mydict["key"] == "val" assert mydict.getpriority("key") == 0 @mock.patch("scrapy.settings.default_settings", default_settings) def test_getdict_autodegrade_basesettings(self): settings = Settings() mydict = settings.getdict("TEST_DICT") assert isinstance(mydict, dict) assert len(mydict) == 1 assert "key" in mydict assert mydict["key"] == "val" def test_passing_objects_as_values(self): class TestPipeline: def process_item(self, i): return i settings = Settings( { "ITEM_PIPELINES": { TestPipeline: 800, }, "DOWNLOAD_HANDLERS": { "ftp": FileDownloadHandler, }, } ) assert "ITEM_PIPELINES" in settings.attributes mypipeline, priority = settings.getdict("ITEM_PIPELINES").popitem() assert priority == 800 assert mypipeline == TestPipeline assert isinstance(mypipeline(), TestPipeline) assert mypipeline().process_item("item") == "item" myhandler = settings.getdict("DOWNLOAD_HANDLERS").pop("ftp") assert myhandler == FileDownloadHandler myhandler_instance = build_from_crawler(myhandler, get_crawler()) assert isinstance(myhandler_instance, FileDownloadHandler) assert hasattr(myhandler_instance, "download_request") def test_pop_item_with_default_value(self): settings = Settings() with pytest.raises(KeyError): settings.pop("DUMMY_CONFIG") dummy_config_value = settings.pop("DUMMY_CONFIG", "dummy_value") assert dummy_config_value == "dummy_value" def test_pop_item_with_immutable_settings(self): settings = Settings( {"DUMMY_CONFIG": "dummy_value", "OTHER_DUMMY_CONFIG": "other_dummy_value"} ) assert settings.pop("DUMMY_CONFIG") == "dummy_value" settings.freeze() with pytest.raises( TypeError, match="Trying to modify an immutable Settings object" ): settings.pop("OTHER_DUMMY_CONFIG") @pytest.mark.parametrize( ("before", "name", "item", "after"), [ ({}, "FOO", "BAR", {"FOO": ["BAR"]}), ({"FOO": []}, "FOO", "BAR", {"FOO": ["BAR"]}), ({"FOO": ["BAR"]}, "FOO", "BAZ", {"FOO": ["BAR", "BAZ"]}), ({"FOO": ["BAR"]}, "FOO", "BAR", {"FOO": ["BAR"]}), ({"FOO": ""}, "FOO", "BAR", {"FOO": ["BAR"]}), ({"FOO": "BAR"}, "FOO", "BAR", {"FOO": "BAR"}), ({"FOO": "BAR"}, "FOO", "BAZ", {"FOO": ["BAR", "BAZ"]}), ({"FOO": "BAR,BAZ"}, "FOO", "BAZ", {"FOO": "BAR,BAZ"}), ({"FOO": "BAR,BAZ"}, "FOO", "QUX", {"FOO": ["BAR", "BAZ", "QUX"]}), ], ) def test_add_to_list(before, name, item, after): settings = BaseSettings(before, priority=0) settings.add_to_list(name, item) expected_priority = settings.getpriority(name) or 0 expected_settings = BaseSettings(after, priority=expected_priority) assert settings == expected_settings, ( f"{settings[name]=} != {expected_settings[name]=}" ) assert settings.getpriority(name) == expected_settings.getpriority(name) @pytest.mark.parametrize( ("before", "name", "item", "after"), [ ({}, "FOO", "BAR", ValueError), ({"FOO": ["BAR"]}, "FOO", "BAR", {"FOO": []}), ({"FOO": ["BAR"]}, "FOO", "BAZ", ValueError), ({"FOO": ["BAR", "BAZ"]}, "FOO", "BAR", {"FOO": ["BAZ"]}), ({"FOO": ""}, "FOO", "BAR", ValueError), ({"FOO": "[]"}, "FOO", "BAR", ValueError), ({"FOO": "BAR"}, "FOO", "BAR", {"FOO": []}), ({"FOO": "BAR"}, "FOO", "BAZ", ValueError), ({"FOO": "BAR,BAZ"}, "FOO", "BAR", {"FOO": ["BAZ"]}), ], ) def test_remove_from_list(before, name, item, after): settings = BaseSettings(before, priority=0) if isinstance(after, type) and issubclass(after, Exception): with pytest.raises(after): settings.remove_from_list(name, item) return settings.remove_from_list(name, item) expected_priority = settings.getpriority(name) or 0 expected_settings = BaseSettings(after, priority=expected_priority) assert settings == expected_settings, ( f"{settings[name]=} != {expected_settings[name]=}" ) assert settings.getpriority(name) == expected_settings.getpriority(name) def test_deprecated_concurrent_requests_per_ip_setting(): with warnings.catch_warnings(record=True) as warns: settings = Settings({"CONCURRENT_REQUESTS_PER_IP": 1}) settings.get("CONCURRENT_REQUESTS_PER_IP") assert ( str(warns[0].message) == "The CONCURRENT_REQUESTS_PER_IP setting is deprecated, use CONCURRENT_REQUESTS_PER_DOMAIN instead." )
TestSettings
python
miyuchina__mistletoe
mistletoe/span_token.py
{ "start": 2819, "end": 3068 }
class ____(SpanToken): """ Emphasis token. ("*some text*") This is an inline token. Its children are inline (span) tokens. One of the core tokens. """ def __init__(self, match): self.delimiter = match.delimiter
Emphasis
python
run-llama__llama_index
llama-index-core/llama_index/core/instrumentation/events/llm.py
{ "start": 777, "end": 1069 }
class ____(BaseEvent): """ LLMPredictEndEvent. The result of an llm.predict() call. Args: output (str): Output. """ output: str @classmethod def class_name(cls) -> str: """Class name.""" return "LLMPredictEndEvent"
LLMPredictEndEvent
python
getsentry__sentry
src/sentry/preprod/api/models/size_analysis/project_preprod_size_analysis_compare_models.py
{ "start": 261, "end": 688 }
class ____(BaseModel): head_size_metric_id: int base_size_metric_id: int | None metrics_artifact_type: PreprodArtifactSizeMetrics.MetricsArtifactType identifier: str | None state: PreprodArtifactSizeComparison.State # Only present when state is SUCCESS comparison_id: int | None # Only present when state is FAILED error_code: str | None error_message: str | None
SizeAnalysisComparison
python
langchain-ai__langchain
libs/partners/anthropic/langchain_anthropic/middleware/prompt_caching.py
{ "start": 783, "end": 5075 }
class ____(AgentMiddleware): """Prompt Caching Middleware. Optimizes API usage by caching conversation prefixes for Anthropic models. Requires both `langchain` and `langchain-anthropic` packages to be installed. Learn more about Anthropic prompt caching [here](https://platform.claude.com/docs/en/build-with-claude/prompt-caching). """ def __init__( self, type: Literal["ephemeral"] = "ephemeral", # noqa: A002 ttl: Literal["5m", "1h"] = "5m", min_messages_to_cache: int = 0, unsupported_model_behavior: Literal["ignore", "warn", "raise"] = "warn", ) -> None: """Initialize the middleware with cache control settings. Args: type: The type of cache to use, only `'ephemeral'` is supported. ttl: The time to live for the cache, only `'5m'` and `'1h'` are supported. min_messages_to_cache: The minimum number of messages until the cache is used. unsupported_model_behavior: The behavior to take when an unsupported model is used. `'ignore'` will ignore the unsupported model and continue without caching. `'warn'` will warn the user and continue without caching. `'raise'` will raise an error and stop the agent. """ self.type = type self.ttl = ttl self.min_messages_to_cache = min_messages_to_cache self.unsupported_model_behavior = unsupported_model_behavior def _should_apply_caching(self, request: ModelRequest) -> bool: """Check if caching should be applied to the request. Args: request: The model request to check. Returns: `True` if caching should be applied, `False` otherwise. Raises: ValueError: If model is unsupported and behavior is set to `'raise'`. """ if not isinstance(request.model, ChatAnthropic): msg = ( "AnthropicPromptCachingMiddleware caching middleware only supports " f"Anthropic models, not instances of {type(request.model)}" ) if self.unsupported_model_behavior == "raise": raise ValueError(msg) if self.unsupported_model_behavior == "warn": warn(msg, stacklevel=3) return False messages_count = ( len(request.messages) + 1 if request.system_message else len(request.messages) ) return messages_count >= self.min_messages_to_cache def wrap_model_call( self, request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse], ) -> ModelCallResult: """Modify the model request to add cache control blocks. Args: request: The model request to potentially modify. handler: The handler to execute the model request. Returns: The model response from the handler. """ if not self._should_apply_caching(request): return handler(request) model_settings = request.model_settings new_model_settings = { **model_settings, "cache_control": {"type": self.type, "ttl": self.ttl}, } return handler(request.override(model_settings=new_model_settings)) async def awrap_model_call( self, request: ModelRequest, handler: Callable[[ModelRequest], Awaitable[ModelResponse]], ) -> ModelCallResult: """Modify the model request to add cache control blocks (async version). Args: request: The model request to potentially modify. handler: The async handler to execute the model request. Returns: The model response from the handler. """ if not self._should_apply_caching(request): return await handler(request) model_settings = request.model_settings new_model_settings = { **model_settings, "cache_control": {"type": self.type, "ttl": self.ttl}, } return await handler(request.override(model_settings=new_model_settings))
AnthropicPromptCachingMiddleware
python
django__django
tests/cache/tests.py
{ "start": 72620, "end": 72897 }
class ____(FileBasedCacheTests): def mkdtemp(self): tmp_dir = super().mkdtemp() return Path(tmp_dir) @override_settings( CACHES={ "default": { "BACKEND": "cache.liberal_backend.CacheClass", }, } )
FileBasedCachePathLibTests
python
pytorch__pytorch
torch/export/unflatten.py
{ "start": 58135, "end": 59848 }
class ____: parent_fqn: str parent_module: torch.nn.Module parent_call_module: torch.fx.Node fqn: str call_idx: int module: torch.nn.Module def _outline_submodules(orig_graph: torch.fx.Graph, root_module: UnflattenedModule): seen_nodes: dict[str, torch.fx.Node] = {} seen_modules: dict[int, list[_SubmoduleEntry]] = defaultdict(list) seen_attrs: dict[str, set[str]] = defaultdict(set) created_modules: dict[str, torch.nn.Module] = {} _ModuleFrame( orig_graph, tuple(orig_graph.nodes), seen_nodes, seen_modules, seen_attrs, created_modules, None, [("", None, 0)], "", { entry.fqn: entry.signature for entry in root_module.module_call_graph if entry.signature }, module=root_module, ).run_outer() return seen_modules, seen_attrs def _reorder_submodules( parent: torch.nn.Module, fqn_order: dict[str, int], prefix: str = "" ): # TODO Can be optimized by adding submodules ahead of time. if prefix == "": for fqn in list(fqn_order.keys())[1:]: if _get_submodule(parent, fqn) is None: _add_submodule(parent, fqn, torch.nn.Module()) children = [] for name, child in list(parent._modules.items()): if child is None: continue fqn = prefix + name _reorder_submodules(child, fqn_order, prefix=fqn.split("@")[0] + ".") delattr(parent, name) children.append((fqn_order[fqn], name, child)) children.sort(key=operator.itemgetter(0)) for _, name, child in children: parent.register_module(name, child)
_SubmoduleEntry
python
airbytehq__airbyte
airbyte-integrations/connectors/source-stripe/unit_tests/integration/test_authorizations.py
{ "start": 3583, "end": 9495 }
class ____(TestCase): @HttpMocker() def test_given_one_page_when_read_then_return_records(self, http_mocker: HttpMocker) -> None: http_mocker.get( _authorizations_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(), _authorizations_response().with_record(_an_authorization()).with_record(_an_authorization()).build(), ) output = self._read(_config().with_start_date(_A_START_DATE)) assert len(output.records) == 2 @HttpMocker() def test_given_many_pages_when_read_then_return_records(self, http_mocker: HttpMocker) -> None: http_mocker.get( _authorizations_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(), _authorizations_response().with_pagination().with_record(_an_authorization().with_id("last_record_id_from_first_page")).build(), ) http_mocker.get( _authorizations_request() .with_starting_after("last_record_id_from_first_page") .with_created_gte(_A_START_DATE) .with_created_lte(_NOW) .with_limit(100) .build(), _authorizations_response().with_record(_an_authorization()).with_record(_an_authorization()).build(), ) output = self._read(_config().with_start_date(_A_START_DATE)) assert len(output.records) == 3 @HttpMocker() def test_given_no_state_when_read_then_return_ignore_lookback(self, http_mocker: HttpMocker) -> None: http_mocker.get( _authorizations_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(), _authorizations_response().with_record(_an_authorization()).build(), ) self._read(_config().with_start_date(_A_START_DATE).with_lookback_window_in_days(10)) # request matched http_mocker @HttpMocker() def test_when_read_then_add_cursor_field(self, http_mocker: HttpMocker) -> None: http_mocker.get( _authorizations_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(), _authorizations_response().with_record(_an_authorization()).build(), ) output = self._read(_config().with_start_date(_A_START_DATE).with_lookback_window_in_days(10)) assert output.records[0].record.data["updated"] == output.records[0].record.data["created"] @HttpMocker() def test_given_slice_range_when_read_then_perform_multiple_requests(self, http_mocker: HttpMocker) -> None: start_date = _NOW - timedelta(days=30) slice_range = timedelta(days=20) slice_datetime = start_date + slice_range http_mocker.get( _authorizations_request() .with_created_gte(start_date) .with_created_lte(slice_datetime - _AVOIDING_INCLUSIVE_BOUNDARIES) .with_limit(100) .build(), _authorizations_response().build(), ) http_mocker.get( _authorizations_request().with_created_gte(slice_datetime).with_created_lte(_NOW).with_limit(100).build(), _authorizations_response().build(), ) self._read(_config().with_start_date(start_date).with_slice_range_in_days(slice_range.days)) # request matched http_mocker @HttpMocker() def test_given_http_status_400_when_read_then_stream_did_not_run(self, http_mocker: HttpMocker) -> None: http_mocker.get( _authorizations_request().with_any_query_params().build(), a_response_with_status(400), ) output = self._read(_config()) assert_stream_did_not_run(output, _STREAM_NAME, "Your account is not set up to use Issuing") @HttpMocker() def test_given_http_status_401_when_read_then_config_error(self, http_mocker: HttpMocker) -> None: http_mocker.get( _authorizations_request().with_any_query_params().build(), a_response_with_status(401), ) output = self._read(_config(), expecting_exception=True) assert output.errors[-1].trace.error.failure_type == FailureType.config_error @HttpMocker() def test_given_rate_limited_when_read_then_retry_and_return_records(self, http_mocker: HttpMocker) -> None: http_mocker.get( _authorizations_request().with_any_query_params().build(), [ a_response_with_status(429), _authorizations_response().with_record(_an_authorization()).build(), ], ) output = self._read(_config().with_start_date(_A_START_DATE)) assert len(output.records) == 1 @HttpMocker() def test_given_http_status_500_once_before_200_when_read_then_retry_and_return_records(self, http_mocker: HttpMocker) -> None: http_mocker.get( _authorizations_request().with_any_query_params().build(), [a_response_with_status(500), _authorizations_response().with_record(_an_authorization()).build()], ) output = self._read(_config()) assert len(output.records) == 1 @HttpMocker() def test_given_http_status_500_when_read_then_raise_config_error(self, http_mocker: HttpMocker) -> None: http_mocker.get( _authorizations_request().with_any_query_params().build(), a_response_with_status(500), ) with patch.object(HttpStatusErrorHandler, "max_retries", new=1): output = self._read(_config(), expecting_exception=True) assert output.errors[-1].trace.error.failure_type == FailureType.config_error def _read(self, config: ConfigBuilder, expecting_exception: bool = False) -> EntrypointOutput: return _read(config, SyncMode.full_refresh, expecting_exception=expecting_exception) @freezegun.freeze_time(_NOW.isoformat())
FullRefreshTest
python
apache__airflow
dev/breeze/src/airflow_breeze/prepare_providers/provider_documentation.py
{ "start": 5399, "end": 5502 }
class ____(Exception): """Raised when package has no changes."""
PrepareReleaseDocsNoChangesException
python
pdm-project__pdm
src/pdm/cli/commands/init.py
{ "start": 696, "end": 12518 }
class ____(BaseCommand): """Initialize a pyproject.toml for PDM. Built-in templates: - default: `pdm init`, A simple template with a basic structure. - minimal: `pdm init minimal`, A minimal template with only `pyproject.toml`. """ supports_other_generator = True def __init__(self) -> None: self.interactive = True def initialize_git(self, project: Project) -> None: """Initialize a git repository if git is available and .git doesn't exist.""" import shutil import subprocess if (project.root / ".git").exists(): project.core.ui.info("Git repository already exists, skipping initialization.") return git_command = shutil.which("git") if not git_command: project.core.ui.info("Git command not found, skipping initialization.") return try: subprocess.run( [git_command, "init"], cwd=project.root, check=True, capture_output=True, encoding="utf-8", ) project.core.ui.info("Git repository initialized successfully.") except subprocess.CalledProcessError as e: project.core.ui.error(f"Failed to initialize Git repository: {e.stderr}") def do_init(self, project: Project, options: argparse.Namespace) -> None: """Bootstrap the project and create a pyproject.toml""" hooks = HookManager(project, options.skip) if options.generator == "copier": self._init_copier(project, options) elif options.generator == "cookiecutter": self._init_cookiecutter(project, options) else: self.set_python(project, options.python, hooks) self._init_builtin(project, options) if options.init_git: self.initialize_git(project) hooks.try_emit("post_init") def _init_copier(self, project: Project, options: argparse.Namespace) -> None: if not package_installed("copier"): raise PdmUsageError( "--copier is passed but copier is not installed. Install it by `pdm self add copier`" ) from None from copier.cli import CopierApp if not options.template: raise PdmUsageError("template argument is required when --copier is passed") _, retval = CopierApp.run( ["copier", "copy", options.template, str(project.root), *options.generator_args], exit=False ) if retval != 0: raise RuntimeError("Copier exited with non-zero status code") def _init_cookiecutter(self, project: Project, options: argparse.Namespace) -> None: if not package_installed("cookiecutter"): raise PdmUsageError( "--cookiecutter is passed but cookiecutter is not installed. Install it by `pdm self add cookiecutter`" ) from None from cookiecutter.cli import main as cookiecutter if not options.template: raise PdmUsageError("template argument is required when --cookiecutter is passed") if options.project_path: project.core.ui.warn( "Cookiecutter generator does not respect --project option. " "It will always create a project dir under the current directory", ) try: cookiecutter.main([options.template, *options.generator_args], standalone_mode=False) except SystemExit as e: raise RuntimeError("Cookiecutter exited with an error") from e def _init_builtin(self, project: Project, options: argparse.Namespace) -> None: metadata = self.get_metadata_from_input(project, options) template = options.template if not template: template = "default" if options.dist else "minimal" with ProjectTemplate(template) as template: template.generate(project.root, metadata, options.overwrite) project.pyproject.reload() def set_interactive(self, value: bool) -> None: self.interactive = value def ask(self, question: str, default: str) -> str: if not self.interactive: return default return termui.ask(question, default=default) def ask_project(self, project: Project) -> str: default = sanitize_project_name(project.root.name) name = self.ask("Project name", default) if default == name or validate_project_name(name): return name project.core.ui.echo( "Project name is not valid, it should follow PEP 426", err=True, style="warning", ) return self.ask_project(project) def get_metadata_from_input(self, project: Project, options: argparse.Namespace) -> dict[str, Any]: from pdm.formats.base import array_of_inline_tables, make_array, make_inline_table if options.name: if not validate_project_name(options.name): raise ProjectError("Project name is not valid, it should follow PEP 426") name = options.name else: name = self.ask_project(project) version = self.ask("Project version", options.project_version or "0.1.0") is_dist = options.dist or bool(options.backend) if not is_dist and self.interactive: is_dist = termui.confirm( "Do you want to build this project for distribution(such as wheel)?\n" "If yes, it will be installed by default when running `pdm install`." ) options.dist = is_dist build_backend: type[BuildBackend] | None = None python = project.python if is_dist: description = self.ask("Project description", "") if options.backend: build_backend = get_backend(options.backend) elif self.interactive: all_backends = list(_BACKENDS) project.core.ui.echo("Which build backend to use?") for i, backend in enumerate(all_backends): project.core.ui.echo(f"{i}. [success]{backend}[/]") selected_backend = termui.ask( "Please select", prompt_type=int, choices=[str(i) for i in range(len(all_backends))], show_choices=False, default=0, ) build_backend = get_backend(all_backends[int(selected_backend)]) else: build_backend = DEFAULT_BACKEND default_python_requires = f">={python.major}.{python.minor}" else: description = "" default_python_requires = f"=={python.major}.{python.minor}.*" license = self.ask("License(SPDX name)", options.license or "MIT") git_user, git_email = get_user_email_from_git() author = self.ask("Author name", git_user) email = self.ask("Author email", git_email) python_requires = self.ask("Python requires('*' to allow any)", default_python_requires) data = { "project": { "name": name, "version": version, "authors": array_of_inline_tables([{"name": author, "email": email}]), "license": make_inline_table({"text": license}), "dependencies": make_array([], True), }, "tool": {"pdm": {"distribution": is_dist}}, } if python_requires and python_requires != "*": get_specifier(python_requires) data["project"]["requires-python"] = python_requires # type: ignore[index] if description: data["project"]["description"] = description # type: ignore[index] if build_backend is not None: data["build-system"] = cast(dict, build_backend.build_system()) return data def add_arguments(self, parser: argparse.ArgumentParser) -> None: skip_option.add_to_parser(parser) status = { False: termui.style("\\[not installed]", style="error"), True: termui.style("\\[installed]", style="success"), } if self.supports_other_generator: generator = parser.add_mutually_exclusive_group() generator.add_argument( "--copier", action="store_const", dest="generator", const="copier", help=f"Use Copier to generate project {status[package_installed('copier')]}", ) generator.add_argument( "--cookiecutter", action="store_const", dest="generator", const="cookiecutter", help=f"Use Cookiecutter to generate project {status[package_installed('cookiecutter')]}", ) group = parser.add_argument_group("builtin generator options") group.add_argument( "-n", "--non-interactive", action="store_true", help="Don't ask questions but use default values", ) group.add_argument("--python", help="Specify the Python version/path to use") group.add_argument( "--dist", "--lib", dest="dist", action="store_true", help="Create a package for distribution" ) group.add_argument("--backend", choices=list(_BACKENDS), help="Specify the build backend, which implies --dist") group.add_argument("--license", help="Specify the license (SPDX name)") group.add_argument("--name", help="Specify the project name") group.add_argument("--project-version", help="Specify the project's version") group.add_argument( "--no-git", dest="init_git", action="store_false", default=True, help="Do not initialize a git repository" ) parser.add_argument( "template", nargs="?", help="Specify the project template, which can be a local path or a Git URL" ) if self.supports_other_generator: parser.add_argument("generator_args", nargs=argparse.REMAINDER, help="Arguments passed to the generator") parser.add_argument("-r", "--overwrite", action="store_true", help="Overwrite existing files") parser.set_defaults(search_parent=False, generator="builtin") def set_python(self, project: Project, python: str | None, hooks: HookManager) -> None: from pdm.cli.commands.use import Command as UseCommand python_info = UseCommand().do_use( project, python or "", first=bool(python) or not self.interactive, ignore_remembered=True, ignore_requires_python=True, save=False, hooks=hooks, ) if python_info.get_venv() is None: project.core.ui.info( "You are using the PEP 582 mode, no virtualenv is created.\n" "You can change configuration with `pdm config python.use_venv True`.\n" "For more info, please visit https://peps.python.org/pep-0582/" ) project.python = python_info def handle(self, project: Project, options: argparse.Namespace) -> None: if project.pyproject.exists(): project.core.ui.echo("pyproject.toml already exists, update it now.", style="primary") else: project.core.ui.echo("Creating a pyproject.toml for PDM...", style="primary") self.set_interactive(not options.non_interactive and termui.is_interactive()) self.do_init(project, options=options) project.core.ui.echo("Project is initialized successfully", style="primary") if self.interactive: actions.ask_for_import(project)
Command
python
pandas-dev__pandas
pandas/core/internals/managers.py
{ "start": 33247, "end": 66776 }
class ____(libinternals.BlockManager, BaseBlockManager): """ BaseBlockManager that holds 2D blocks. """ ndim = 2 # ---------------------------------------------------------------- # Constructors def __init__( self, blocks: Sequence[Block], axes: Sequence[Index], verify_integrity: bool = True, ) -> None: if verify_integrity: # Assertion disabled for performance # assert all(isinstance(x, Index) for x in axes) for block in blocks: if self.ndim != block.ndim: raise AssertionError( f"Number of Block dimensions ({block.ndim}) must equal " f"number of axes ({self.ndim})" ) # As of 2.0, the caller is responsible for ensuring that # DatetimeTZBlock with block.ndim == 2 has block.values.ndim ==2; # previously there was a special check for fastparquet compat. self._verify_integrity() def _verify_integrity(self) -> None: mgr_shape = self.shape tot_items = sum(len(x.mgr_locs) for x in self.blocks) for block in self.blocks: if block.shape[1:] != mgr_shape[1:]: raise_construction_error(tot_items, block.shape[1:], self.axes) if len(self.items) != tot_items: raise AssertionError( "Number of manager items must equal union of " f"block items\n# manager items: {len(self.items)}, # " f"tot_items: {tot_items}" ) @classmethod def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> Self: """ Constructor for BlockManager and SingleBlockManager with same signature. """ return cls(blocks, axes, verify_integrity=False) # ---------------------------------------------------------------- # Indexing def fast_xs(self, loc: int) -> SingleBlockManager: """ Return the array corresponding to `frame.iloc[loc]`. Parameters ---------- loc : int Returns ------- np.ndarray or ExtensionArray """ if len(self.blocks) == 1: # TODO: this could be wrong if blk.mgr_locs is not slice(None)-like; # is this ruled out in the general case? result: np.ndarray | ExtensionArray = self.blocks[0].iget( (slice(None), loc) ) # in the case of a single block, the new block is a view bp = BlockPlacement(slice(0, len(result))) block = new_block( result, placement=bp, ndim=1, refs=self.blocks[0].refs, ) return SingleBlockManager(block, self.axes[0]) dtype = interleaved_dtype([blk.dtype for blk in self.blocks]) n = len(self) if isinstance(dtype, ExtensionDtype): # TODO: use object dtype as workaround for non-performant # EA.__setitem__ methods. (primarily ArrowExtensionArray.__setitem__ # when iteratively setting individual values) # https://github.com/pandas-dev/pandas/pull/54508#issuecomment-1675827918 result = np.empty(n, dtype=object) else: result = np.empty(n, dtype=dtype) result = ensure_wrapped_if_datetimelike(result) for blk in self.blocks: # Such assignment may incorrectly coerce NaT to None # result[blk.mgr_locs] = blk._slice((slice(None), loc)) for i, rl in enumerate(blk.mgr_locs): item = blk.iget((i, loc)) if ( result.dtype.kind in "iub" and lib.is_float(item) and isna(item) and isinstance(blk.dtype, CategoricalDtype) ): # GH#58954 caused bc interleaved_dtype is wrong for Categorical # TODO(GH#38240) this will be unnecessary # Note that doing this in a try/except would work for the # integer case, but not for bool, which will cast the NaN # entry to True. if result.dtype.kind == "b": new_dtype = object else: new_dtype = np.float64 result = result.astype(new_dtype) result[rl] = item if isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() result = cls._from_sequence(result, dtype=dtype) bp = BlockPlacement(slice(0, len(result))) block = new_block(result, placement=bp, ndim=1) return SingleBlockManager(block, self.axes[0]) def iget(self, i: int, track_ref: bool = True) -> SingleBlockManager: """ Return the data as a SingleBlockManager. """ block = self.blocks[self.blknos[i]] values = block.iget(self.blklocs[i]) # shortcut for select a single-dim from a 2-dim BM bp = BlockPlacement(slice(0, len(values))) nb = type(block)( values, placement=bp, ndim=1, refs=block.refs if track_ref else None ) return SingleBlockManager(nb, self.axes[1]) def iget_values(self, i: int) -> ArrayLike: """ Return the data for column i as the values (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution. """ # TODO(CoW) making the arrays read-only might make this safer to use? block = self.blocks[self.blknos[i]] values = block.iget(self.blklocs[i]) return values @property def column_arrays(self) -> list[np.ndarray]: """ Used in the JSON C code to access column arrays. This optimizes compared to using `iget_values` by converting each Warning! This doesn't handle Copy-on-Write, so should be used with caution (current use case of consuming this in the JSON code is fine). """ # This is an optimized equivalent to # result = [self.iget_values(i) for i in range(len(self.items))] result: list[np.ndarray | None] = [None] * len(self.items) for blk in self.blocks: mgr_locs = blk._mgr_locs values = blk.array_values._values_for_json() if values.ndim == 1: # TODO(EA2D): special casing not needed with 2D EAs result[mgr_locs[0]] = values else: for i, loc in enumerate(mgr_locs): result[loc] = values[i] # error: Incompatible return value type (got "List[None]", # expected "List[ndarray[Any, Any]]") return result # type: ignore[return-value] def iset( self, loc: int | slice | np.ndarray, value: ArrayLike, inplace: bool = False, refs: BlockValuesRefs | None = None, ) -> None: """ Set new item in-place. Does not consolidate. Adds new Block if not contained in the current set of items """ # FIXME: refactor, clearly separate broadcasting & zip-like assignment # can prob also fix the various if tests for sparse/categorical if self._blklocs is None and self.ndim > 1: self._rebuild_blknos_and_blklocs() # Note: we exclude DTA/TDA here value_is_extension_type = is_1d_only_ea_dtype(value.dtype) if not value_is_extension_type: if value.ndim == 2: value = value.T else: value = ensure_block_shape(value, ndim=2) if value.shape[1:] != self.shape[1:]: raise AssertionError( "Shape of new values must be compatible with manager shape" ) if lib.is_integer(loc): # We have 6 tests where loc is _not_ an int. # In this case, get_blkno_placements will yield only one tuple, # containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1))) # Check if we can use _iset_single fastpath loc = cast(int, loc) blkno = self.blknos[loc] blk = self.blocks[blkno] if len(blk._mgr_locs) == 1: # TODO: fastest way to check this? return self._iset_single( loc, value, inplace=inplace, blkno=blkno, blk=blk, refs=refs, ) # error: Incompatible types in assignment (expression has type # "List[Union[int, slice, ndarray]]", variable has type "Union[int, # slice, ndarray]") loc = [loc] # type: ignore[assignment] # categorical/sparse/datetimetz if value_is_extension_type: def value_getitem(placement): return value else: def value_getitem(placement): return value[placement.indexer] # Accessing public blknos ensures the public versions are initialized blknos = self.blknos[loc] blklocs = self.blklocs[loc].copy() unfit_mgr_locs = [] unfit_val_locs = [] removed_blknos = [] for blkno_l, val_locs in libinternals.get_blkno_placements(blknos, group=True): blk = self.blocks[blkno_l] blk_locs = blklocs[val_locs.indexer] if inplace and blk.should_store(value): # Updating inplace -> check if we need to do Copy-on-Write if not self._has_no_reference_block(blkno_l): self._iset_split_block( blkno_l, blk_locs, value_getitem(val_locs), refs=refs ) else: blk.set_inplace(blk_locs, value_getitem(val_locs)) continue else: unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) unfit_val_locs.append(val_locs) # If all block items are unfit, schedule the block for removal. if len(val_locs) == len(blk.mgr_locs): removed_blknos.append(blkno_l) continue else: # Defer setting the new values to enable consolidation self._iset_split_block(blkno_l, blk_locs, refs=refs) if removed_blknos: # Remove blocks & update blknos accordingly is_deleted = np.zeros(self.nblocks, dtype=np.bool_) is_deleted[removed_blknos] = True new_blknos = np.empty(self.nblocks, dtype=np.intp) new_blknos.fill(-1) new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos)) self._blknos = new_blknos[self._blknos] self.blocks = tuple( blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos) ) if unfit_val_locs: unfit_idxr = np.concatenate(unfit_mgr_locs) unfit_count = len(unfit_idxr) new_blocks: list[Block] = [] if value_is_extension_type: # This code (ab-)uses the fact that EA blocks contain only # one item. # TODO(EA2D): special casing unnecessary with 2D EAs new_blocks.extend( new_block_2d( values=value, placement=BlockPlacement(slice(mgr_loc, mgr_loc + 1)), refs=refs, ) for mgr_loc in unfit_idxr ) self._blknos[unfit_idxr] = np.arange(unfit_count) + len(self.blocks) self._blklocs[unfit_idxr] = 0 else: # unfit_val_locs contains BlockPlacement objects unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) new_blocks.append( new_block_2d( values=value_getitem(unfit_val_items), placement=BlockPlacement(unfit_idxr), refs=refs, ) ) self._blknos[unfit_idxr] = len(self.blocks) self._blklocs[unfit_idxr] = np.arange(unfit_count) self.blocks += tuple(new_blocks) # Newly created block's dtype may already be present. self._known_consolidated = False def _iset_split_block( self, blkno_l: int, blk_locs: np.ndarray | list[int], value: ArrayLike | None = None, refs: BlockValuesRefs | None = None, ) -> None: """Removes columns from a block by splitting the block. Avoids copying the whole block through slicing and updates the manager after determining the new block structure. Optionally adds a new block, otherwise has to be done by the caller. Parameters ---------- blkno_l: The block number to operate on, relevant for updating the manager blk_locs: The locations of our block that should be deleted. value: The value to set as a replacement. refs: The reference tracking object of the value to set. """ blk = self.blocks[blkno_l] if self._blklocs is None: self._rebuild_blknos_and_blklocs() nbs_tup = tuple(blk.delete(blk_locs)) if value is not None: locs = blk.mgr_locs.as_array[blk_locs] first_nb = new_block_2d(value, BlockPlacement(locs), refs=refs) else: first_nb = nbs_tup[0] nbs_tup = tuple(nbs_tup[1:]) nr_blocks = len(self.blocks) blocks_tup = ( self.blocks[:blkno_l] + (first_nb,) + self.blocks[blkno_l + 1 :] + nbs_tup ) self.blocks = blocks_tup if not nbs_tup and value is not None: # No need to update anything if split did not happen return self._blklocs[first_nb.mgr_locs.indexer] = np.arange(len(first_nb)) for i, nb in enumerate(nbs_tup): self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb)) self._blknos[nb.mgr_locs.indexer] = i + nr_blocks def _iset_single( self, loc: int, value: ArrayLike, inplace: bool, blkno: int, blk: Block, refs: BlockValuesRefs | None = None, ) -> None: """ Fastpath for iset when we are only setting a single position and the Block currently in that position is itself single-column. In this case we can swap out the entire Block and blklocs and blknos are unaffected. """ # Caller is responsible for verifying value.shape if inplace and blk.should_store(value): copy = not self._has_no_reference_block(blkno) iloc = self.blklocs[loc] blk.set_inplace(slice(iloc, iloc + 1), value, copy=copy) return nb = new_block_2d(value, placement=blk._mgr_locs, refs=refs) old_blocks = self.blocks new_blocks = old_blocks[:blkno] + (nb,) + old_blocks[blkno + 1 :] self.blocks = new_blocks return def column_setitem( self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False ) -> None: """ Set values ("setitem") into a single column (not setting the full column). This is a method on the BlockManager level, to avoid creating an intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`) """ if not self._has_no_reference(loc): blkno = self.blknos[loc] # Split blocks to only copy the column we want to modify blk_loc = self.blklocs[loc] # Copy our values values = self.blocks[blkno].values if values.ndim == 1: values = values.copy() else: # Use [blk_loc] as indexer to keep ndim=2, this already results in a # copy values = values[[blk_loc]] self._iset_split_block(blkno, [blk_loc], values) # this manager is only created temporarily to mutate the values in place # so don't track references, otherwise the `setitem` would perform CoW again col_mgr = self.iget(loc, track_ref=False) if inplace_only: col_mgr.setitem_inplace(idx, value) else: new_mgr = col_mgr.setitem((idx,), value) self.iset(loc, new_mgr._block.values, inplace=True) def insert(self, loc: int, item: Hashable, value: ArrayLike, refs=None) -> None: """ Insert item at selected position. Parameters ---------- loc : int item : hashable value : np.ndarray or ExtensionArray refs : The reference tracking object of the value to set. """ new_axis = self.items.insert(loc, item) if value.ndim == 2: value = value.T if len(value) > 1: raise ValueError( f"Expected a 1D array, got an array with shape {value.T.shape}" ) else: value = ensure_block_shape(value, ndim=self.ndim) bp = BlockPlacement(slice(loc, loc + 1)) block = new_block_2d(values=value, placement=bp, refs=refs) if not len(self.blocks): # Fastpath self._blklocs = np.array([0], dtype=np.intp) self._blknos = np.array([0], dtype=np.intp) else: self._insert_update_mgr_locs(loc) self._insert_update_blklocs_and_blknos(loc) self.axes[0] = new_axis self.blocks += (block,) self._known_consolidated = False if ( get_option("performance_warnings") and sum(not block.is_extension for block in self.blocks) > 100 ): warnings.warn( "DataFrame is highly fragmented. This is usually the result " "of calling `frame.insert` many times, which has poor performance. " "Consider joining all columns at once using pd.concat(axis=1) " "instead. To get a de-fragmented frame, use `newframe = frame.copy()`", PerformanceWarning, stacklevel=find_stack_level(), ) def _insert_update_mgr_locs(self, loc) -> None: """ When inserting a new Block at location 'loc', we increment all of the mgr_locs of blocks above that by one. """ # Faster version of set(arr) for sequences of small numbers blknos = np.bincount(self.blknos[loc:]).nonzero()[0] for blkno in blknos: # .620 this way, .326 of which is in increment_above blk = self.blocks[blkno] blk._mgr_locs = blk._mgr_locs.increment_above(loc) def _insert_update_blklocs_and_blknos(self, loc) -> None: """ When inserting a new Block at location 'loc', we update our _blklocs and _blknos. """ # Accessing public blklocs ensures the public versions are initialized if loc == self.blklocs.shape[0]: # np.append is a lot faster, let's use it if we can. self._blklocs = np.append(self._blklocs, 0) self._blknos = np.append(self._blknos, len(self.blocks)) elif loc == 0: # As of numpy 1.26.4, np.concatenate faster than np.append self._blklocs = np.concatenate([[0], self._blklocs]) self._blknos = np.concatenate([[len(self.blocks)], self._blknos]) else: new_blklocs, new_blknos = libinternals.update_blklocs_and_blknos( self.blklocs, self.blknos, loc, len(self.blocks) ) self._blklocs = new_blklocs self._blknos = new_blknos def idelete(self, indexer) -> BlockManager: """ Delete selected locations, returning a new BlockManager. """ is_deleted = np.zeros(self.shape[0], dtype=np.bool_) is_deleted[indexer] = True taker = (~is_deleted).nonzero()[0] nbs = self._slice_take_blocks_ax0(taker, only_slice=True, ref_inplace_op=True) new_columns = self.items[~is_deleted] axes = [new_columns, self.axes[1]] return type(self)(tuple(nbs), axes, verify_integrity=False) # ---------------------------------------------------------------- # Block-wise Operation def grouped_reduce(self, func: Callable) -> Self: """ Apply grouped reduction function blockwise, returning a new BlockManager. Parameters ---------- func : grouped reduction function Returns ------- BlockManager """ result_blocks: list[Block] = [] for blk in self.blocks: if blk.is_object: # split on object-dtype blocks bc some columns may raise # while others do not. for sb in blk._split(): applied = sb.apply(func) result_blocks = extend_blocks(applied, result_blocks) else: applied = blk.apply(func) result_blocks = extend_blocks(applied, result_blocks) if len(result_blocks) == 0: nrows = 0 else: nrows = result_blocks[0].values.shape[-1] index = default_index(nrows) return type(self).from_blocks(result_blocks, [self.axes[0], index]) def reduce(self, func: Callable) -> Self: """ Apply reduction function blockwise, returning a single-row BlockManager. Parameters ---------- func : reduction function Returns ------- BlockManager """ # If 2D, we assume that we're operating column-wise assert self.ndim == 2 res_blocks = [blk.reduce(func) for blk in self.blocks] index = default_index(1) # placeholder new_mgr = type(self).from_blocks(res_blocks, [self.items, index]) return new_mgr def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager: """ Apply array_op blockwise with another (aligned) BlockManager. """ return operate_blockwise(self, other, array_op) def _equal_values(self: BlockManager, other: BlockManager) -> bool: """ Used in .equals defined in base class. Only check the column values assuming shape and indexes have already been checked. """ return blockwise_all(self, other, array_equals) def quantile( self, *, qs: Index, # with dtype float 64 interpolation: QuantileInterpolation = "linear", ) -> Self: """ Iterate over blocks applying quantile reduction. This routine is intended for reduction type operations and will do inference on the generated blocks. Parameters ---------- interpolation : type of interpolation, default 'linear' qs : list of the quantiles to be computed Returns ------- BlockManager """ # Series dispatches to DataFrame for quantile, which allows us to # simplify some of the code here and in the blocks assert self.ndim >= 2 assert is_list_like(qs) # caller is responsible for this new_axes = list(self.axes) new_axes[1] = Index(qs, dtype=np.float64) blocks = [ blk.quantile(qs=qs, interpolation=interpolation) for blk in self.blocks ] return type(self)(blocks, new_axes) # ---------------------------------------------------------------- def unstack(self, unstacker, fill_value) -> BlockManager: """ Return a BlockManager with all blocks unstacked. Parameters ---------- unstacker : reshape._Unstacker fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager """ new_columns = unstacker.get_new_columns(self.items) new_index = unstacker.new_index allow_fill = not unstacker.mask_all if allow_fill: # calculating the full mask once and passing it to Block._unstack is # faster than letting calculating it in each repeated call new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape) needs_masking = new_mask2D.any(axis=0) else: needs_masking = np.zeros(unstacker.full_shape[1], dtype=bool) new_blocks: list[Block] = [] columns_mask: list[np.ndarray] = [] if len(self.items) == 0: factor = 1 else: fac = len(new_columns) / len(self.items) assert fac == int(fac) factor = int(fac) for blk in self.blocks: mgr_locs = blk.mgr_locs new_placement = mgr_locs.tile_for_unstack(factor) blocks, mask = blk._unstack( unstacker, fill_value, new_placement=new_placement, needs_masking=needs_masking, ) new_blocks.extend(blocks) columns_mask.extend(mask) # Block._unstack should ensure this holds, assert mask.sum() == sum(len(nb._mgr_locs) for nb in blocks) # In turn this ensures that in the BlockManager call below # we have len(new_columns) == sum(x.shape[0] for x in new_blocks) # which suffices to allow us to pass verify_inegrity=False new_columns = new_columns[columns_mask] bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False) return bm def to_iter_dict(self) -> Generator[tuple[str, Self]]: """ Yield a tuple of (str(dtype), BlockManager) Returns ------- values : a tuple of (str(dtype), BlockManager) """ key = lambda block: str(block.dtype) for dtype, blocks in itertools.groupby(sorted(self.blocks, key=key), key=key): # TODO(EA2D): the combine will be unnecessary with 2D EAs yield dtype, self._combine(list(blocks)) def as_array( self, dtype: np.dtype | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the blockmanager data into an numpy array. Parameters ---------- dtype : np.dtype or None, default None Data type of the return array. copy : bool, default False If True then guarantee that a copy is returned. A value of False does not guarantee that the underlying data is not copied. na_value : object, default lib.no_default Value to be used as the missing value sentinel. Returns ------- arr : ndarray """ passed_nan = lib.is_float(na_value) and isna(na_value) if len(self.blocks) == 0: arr = np.empty(self.shape, dtype=float) return arr.transpose() if self.is_single_block: blk = self.blocks[0] if na_value is not lib.no_default: # We want to copy when na_value is provided to avoid # mutating the original object if lib.is_np_dtype(blk.dtype, "f") and passed_nan: # We are already numpy-float and na_value=np.nan pass else: copy = True if blk.is_extension: # Avoid implicit conversion of extension blocks to object # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no # attribute "to_numpy" arr = blk.values.to_numpy( # type: ignore[union-attr] dtype=dtype, na_value=na_value, copy=copy, ).reshape(blk.shape) elif not copy: arr = np.asarray(blk.values, dtype=dtype) else: arr = np.array(blk.values, dtype=dtype, copy=copy) if passed_nan and blk.dtype.kind in "mM": arr[isna(blk.values)] = na_value if not copy: arr = arr.view() arr.flags.writeable = False else: arr = self._interleave(dtype=dtype, na_value=na_value) # The underlying data was copied within _interleave, so no need # to further copy if copy=True or setting na_value if na_value is lib.no_default: pass elif arr.dtype.kind == "f" and passed_nan: pass else: arr[isna(arr)] = na_value return arr.transpose() def _interleave( self, dtype: np.dtype | None = None, na_value: object = lib.no_default, ) -> np.ndarray: """ Return ndarray from blocks with specified item order Items must be contained in the blocks """ if not dtype: # Incompatible types in assignment (expression has type # "Optional[Union[dtype[Any], ExtensionDtype]]", variable has # type "Optional[dtype[Any]]") dtype = interleaved_dtype( # type: ignore[assignment] [blk.dtype for blk in self.blocks] ) # error: Argument 1 to "ensure_np_dtype" has incompatible type # "Optional[dtype[Any]]"; expected "Union[dtype[Any], ExtensionDtype]" dtype = ensure_np_dtype(dtype) # type: ignore[arg-type] result = np.empty(self.shape, dtype=dtype) itemmask = np.zeros(self.shape[0]) if dtype == np.dtype("object") and na_value is lib.no_default: # much more performant than using to_numpy below for blk in self.blocks: rl = blk.mgr_locs arr = blk.get_values(dtype) result[rl.indexer] = arr itemmask[rl.indexer] = 1 return result for blk in self.blocks: rl = blk.mgr_locs if blk.is_extension: # Avoid implicit conversion of extension blocks to object # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no # attribute "to_numpy" arr = blk.values.to_numpy( # type: ignore[union-attr] dtype=dtype, na_value=na_value, ) else: arr = blk.get_values(dtype) result[rl.indexer] = arr if na_value is not lib.no_default and blk.dtype.kind in "mM": result[rl.indexer][isna(arr)] = na_value itemmask[rl.indexer] = 1 if not itemmask.all(): raise AssertionError("Some items were not contained in blocks") return result # ---------------------------------------------------------------- # Consolidation def is_consolidated(self) -> bool: """ Return True if more than one block with the same dtype """ if not self._known_consolidated: self._consolidate_check() return self._is_consolidated def _consolidate_check(self) -> None: if len(self.blocks) == 1: # fastpath self._is_consolidated = True self._known_consolidated = True return dtypes = [blk.dtype for blk in self.blocks if blk._can_consolidate] self._is_consolidated = len(dtypes) == len(set(dtypes)) self._known_consolidated = True def _consolidate_inplace(self) -> None: if not self.is_consolidated(): self.blocks = _consolidate(self.blocks) self._is_consolidated = True self._known_consolidated = True self._rebuild_blknos_and_blklocs() # ---------------------------------------------------------------- # Concatenation @classmethod def concat_horizontal(cls, mgrs: list[Self], axes: list[Index]) -> Self: """ Concatenate uniformly-indexed BlockManagers horizontally. """ offset = 0 blocks: list[Block] = [] for mgr in mgrs: for blk in mgr.blocks: # We need to do getitem_block here otherwise we would be altering # blk.mgr_locs in place, which would render it invalid. This is only # relevant in the copy=False case. nb = blk.slice_block_columns(slice(None)) nb._mgr_locs = nb._mgr_locs.add(offset) blocks.append(nb) offset += len(mgr.items) new_mgr = cls(tuple(blocks), axes) return new_mgr @classmethod def concat_vertical(cls, mgrs: list[Self], axes: list[Index]) -> Self: """ Concatenate uniformly-indexed BlockManagers vertically. """ raise NotImplementedError("This logic lives (for now) in internals.concat")
BlockManager
python
kamyu104__LeetCode-Solutions
Python/find-most-frequent-vowel-and-consonant.py
{ "start": 48, "end": 446 }
class ____(object): def maxFreqSum(self, s): """ :type s: str :rtype: int """ VOWELS = {'a', 'e', 'i', 'o', 'u'} cnt = [0]*26 for x in s: cnt[ord(x)-ord('a')] += 1 return max(cnt[i] for i in xrange(26) if chr(i+ord('a')) in VOWELS)+\ max(cnt[i] for i in xrange(26) if chr(i+ord('a')) not in VOWELS)
Solution
python
apache__airflow
airflow-core/src/airflow/api_fastapi/core_api/security.py
{ "start": 7291, "end": 7529 }
class ____(PermittedDagFilter): """A parameter that filters the permitted task instances for the user.""" def to_orm(self, select: Select) -> Select: return select.where(TI.dag_id.in_(self.value or set()))
PermittedTIFilter
python
keon__algorithms
algorithms/graph/check_digraph_strongly_connected.py
{ "start": 291, "end": 2079 }
class ____: """ A directed graph where edges are one-way (a two-way edge can be represented by using two edges). """ def __init__(self,vertex_count): """ Create a new graph with vertex_count vertices. """ self.vertex_count = vertex_count self.graph = defaultdict(list) def add_edge(self,source,target): """ Add an edge going from source to target """ self.graph[source].append(target) def dfs(self): """ Determine if all nodes are reachable from node 0 """ visited = [False] * self.vertex_count self.dfs_util(0,visited) if visited == [True]*self.vertex_count: return True return False def dfs_util(self,source,visited): """ Determine if all nodes are reachable from the given node """ visited[source] = True for adjacent in self.graph[source]: if not visited[adjacent]: self.dfs_util(adjacent,visited) def reverse_graph(self): """ Create a new graph where every edge a->b is replaced with an edge b->a """ reverse_graph = Graph(self.vertex_count) for source, adjacent in self.graph.items(): for target in adjacent: # Note: we reverse the order of arguments # pylint: disable=arguments-out-of-order reverse_graph.add_edge(target,source) return reverse_graph def is_strongly_connected(self): """ Determine if the graph is strongly connected. """ if self.dfs(): reversed_graph = self.reverse_graph() if reversed_graph.dfs(): return True return False
Graph
python
ray-project__ray
python/ray/_private/thirdparty/pynvml/pynvml.py
{ "start": 118685, "end": 119193 }
class ____(_PrintableStructure): _fields_ = [ ('version', c_uint), ('str', c_char * NVML_PERF_MODES_BUFFER_SIZE), ] nvmlDevicePerfModes_v1 = 0x1000804 @convertStrBytes def nvmlDeviceGetPerformanceModes(handle): perfModes = c_nvmlDevicePerfModes_v1_t() perfModes.version = nvmlDevicePerfModes_v1 fn = _nvmlGetFunctionPointer("nvmlDeviceGetPerformanceModes") ret = fn(handle, byref(perfModes)) _nvmlCheckReturn(ret) return perfModes.str
c_nvmlDevicePerfModes_v1_t
python
tensorflow__tensorflow
tensorflow/tools/proto_splitter/split.py
{ "start": 1934, "end": 9915 }
class ____(Splitter): """A Splitter that can be composed with other splitters. This Splitter writes to the riegeli file format. See README for details. """ def __init__( self, proto, *, proto_as_initial_chunk: bool = True, parent_splitter: Optional["ComposableSplitter"] = None, fields_in_parent: Optional[util.FieldTypes] = None, ): """Initializes ComposableSplitter. Args: proto: Proto message to split. proto_as_initial_chunk: Whether to initialize chunks with the user-provided proto as the initial chunk. parent_splitter: The parent `ComposableSplitter` object. fields_in_parent: Fields to access `proto` from the parent splitter's proto. """ self._proto = proto self._parent_splitter = parent_splitter self._fields_in_parent = fields_in_parent # Whether chunks have been created. See `build_chunks()`. self._built = False # Keep a list of chunk ids in the order in which they were added to the # list. self._add_chunk_order = [] self._fix_chunk_order = False # Initialize chunks and ChunkedMessage (optionally with the first chunk as # the user-provided proto. if parent_splitter is not None: # If this is not the root Splitter class, skip the initialization of # the chunks/message since the parent's will be updated instead. self._chunks = None self._chunked_message = None elif proto_as_initial_chunk: self._chunks = [self._proto] self._chunked_message = chunk_pb2.ChunkedMessage(chunk_index=0) self._add_chunk_order.append(id(self._proto)) else: self._chunks = [] self._chunked_message = chunk_pb2.ChunkedMessage() def build_chunks(self) -> None: """Builds the Splitter object by generating chunks from the proto. Subclasses of `ComposableChunks` should only need to override this method. This method should be called once per Splitter to create the chunks. Users should call the methods `split` or `write` instead. """ @property def version_def(self) -> versions_pb2.VersionDef: """Version info about the splitter and join implementation required.""" return versions_pb2.VersionDef( splitter_version=1, join_version=0, bad_consumers=version_lib.get_bad_versions(), ) def split( self, ) -> tuple[Sequence[Union[message.Message, bytes]], chunk_pb2.ChunkedMessage]: """Splits a proto message into a Sequence of protos/bytes.""" if self._parent_splitter: raise ValueError( "A child ComposableSplitter's `split` method should not be called " "directly, since it inherit chunks from a parent object. Please call " "the parent's `split()` method instead." ) assert self._chunks is not None assert self._chunked_message is not None if not self._built: self.build_chunks() self._fix_chunks() self._built = True return self._chunks, self._chunked_message def write( self, file_prefix: str, writer_options: Optional[str] = None ) -> str: """Serializes a proto to disk. The writer writes all chunks into a riegeli file. The chunk metadata (ChunkMetadata) is written at the very end. Args: file_prefix: string prefix of the filepath. The writer will automatically attach a `.pb` or `.cpb` (chunked pb) suffix depending on whether the proto is split. writer_options: Optional writer options to pass to the riegeli writer. See https://github.com/google/riegeli/blob/master/doc/record_writer_options.md for options. Returns: The actual filepath the proto is written to. The filepath will be different depending on whether the proto is split, i.e., whether it will be a pb or not. """ if self._parent_splitter is not None: raise ValueError( "A child ComposableSplitter's `write` method should not be called " "directly, since it inherits unrelated chunks from a parent object. " "Please call the parent's `write()` method instead." ) start_time = time.time() chunks, chunked_message = self.split() if not chunked_message.chunked_fields: path = f"{file_prefix}.pb" file_io.atomic_write_string_to_file( path, self._proto.SerializeToString(deterministic=True) ) logging.info("Unchunked file exported to %s", path) return path path = f"{file_prefix}.cpb" writer_kwargs = {} if writer_options is not None: writer_kwargs["options"] = writer_options with riegeli.RecordWriter(file_io.FileIO(path, "wb"), **writer_kwargs) as f: metadata = chunk_pb2.ChunkMetadata( message=chunked_message, version=self.version_def ) for chunk in chunks: if isinstance(chunk, message.Message): f.write_message(chunk) chunk_type = chunk_pb2.ChunkInfo.Type.MESSAGE size = chunk.ByteSize() else: f.write_record(chunk) chunk_type = chunk_pb2.ChunkInfo.Type.BYTES size = len(chunk) metadata.chunks.add( type=chunk_type, size=size, offset=f.last_pos.numeric ) f.write_message(metadata) end = time.time() logging.info("Chunked file exported to %s", path) logging.info( "Total time spent splitting and writing the message: %s", end - start_time, ) logging.info( "Number of chunks created (including initial message): %s", len(chunks), ) return path def add_chunk( self, chunk: Union[message.Message, bytes], field_tags: util.FieldTypes, index=None, ) -> None: """Adds a new chunk and updates the ChunkedMessage proto. Args: chunk: Proto message or bytes. field_tags: Field information about the placement of the chunked data within self._proto. index: Optional index at which to insert the chunk. The chunk ordering is important for merging. """ if self._parent_splitter is not None: self._parent_splitter.add_chunk( chunk, self._fields_in_parent + field_tags, index ) else: assert self._chunks is not None assert self._chunked_message is not None field = self._chunked_message.chunked_fields.add( field_tag=util.get_field_tag(self._proto, field_tags) ) new_chunk_index = len(self._chunks) field.message.chunk_index = new_chunk_index self._add_chunk_order.append(id(chunk)) if index is None: self._chunks.append(chunk) else: self._chunks.insert(index, chunk) self._fix_chunk_order = True def _fix_chunks(self) -> None: """Fixes chunk indices in the ChunkedMessage.""" if not self._fix_chunk_order: return # The chunk_index of each nested ChunkedMessage is set to the length of the # list when the chunk was added. This would be fine if the chunks were # always added to the end of the list. However, this is not always the case # the indices must be updated. # Use the address of each chunk (python `id`) as lookup keys to the # ordered chunk indices. chunk_indices = {id(chunk): i for i, chunk in enumerate(self._chunks)} to_fix = [self._chunked_message] while to_fix: for field in to_fix.pop().chunked_fields: if field.message.chunked_fields: to_fix.append(field.message) if not field.message.HasField("chunk_index"): continue chunk_addr = self._add_chunk_order[field.message.chunk_index] assert ( chunk_addr in chunk_indices ), f"Found unexpected chunk {chunk_addr}" new_chunk_index = chunk_indices[chunk_addr] field.message.chunk_index = new_chunk_index self._add_chunk_order = [id(chunk) for chunk in self._chunks] self._fix_chunk_order = False
ComposableSplitter
python
doocs__leetcode
solution/0300-0399/0307.Range Sum Query - Mutable/Solution2.py
{ "start": 108, "end": 1353 }
class ____: __slots__ = ["nums", "tr"] def __init__(self, nums): self.nums = nums n = len(nums) self.tr = [Node() for _ in range(n << 2)] self.build(1, 1, n) def build(self, u, l, r): self.tr[u].l, self.tr[u].r = l, r if l == r: self.tr[u].v = self.nums[l - 1] return mid = (l + r) >> 1 self.build(u << 1, l, mid) self.build(u << 1 | 1, mid + 1, r) self.pushup(u) def modify(self, u, x, v): if self.tr[u].l == x and self.tr[u].r == x: self.tr[u].v = v return mid = (self.tr[u].l + self.tr[u].r) >> 1 if x <= mid: self.modify(u << 1, x, v) else: self.modify(u << 1 | 1, x, v) self.pushup(u) def query(self, u, l, r): if self.tr[u].l >= l and self.tr[u].r <= r: return self.tr[u].v mid = (self.tr[u].l + self.tr[u].r) >> 1 result = 0 if l <= mid: result += self.query(u << 1, l, r) if r > mid: result += self.query(u << 1 | 1, l, r) return result def pushup(self, u): self.tr[u].v = self.tr[u << 1].v + self.tr[u << 1 | 1].v
SegmentTree
python
matplotlib__matplotlib
lib/matplotlib/backend_tools.py
{ "start": 9661, "end": 10603 }
class ____(ToolBase): """ Send message with the current pointer position. This tool runs in the background reporting the position of the cursor. """ def __init__(self, *args, **kwargs): self._id_drag = None super().__init__(*args, **kwargs) def set_figure(self, figure): if self._id_drag: self.canvas.mpl_disconnect(self._id_drag) super().set_figure(figure) if figure: self._id_drag = self.canvas.mpl_connect( 'motion_notify_event', self.send_message) def send_message(self, event): """Call `matplotlib.backend_managers.ToolManager.message_event`.""" if self.toolmanager.messagelock.locked(): return from matplotlib.backend_bases import NavigationToolbar2 message = NavigationToolbar2._mouse_event_to_message(event) self.toolmanager.message_event(message, self)
ToolCursorPosition
python
Netflix__metaflow
test/unit/spin/flows/hello_spin_flow.py
{ "start": 52, "end": 569 }
class ____(FlowSpec): @step def start(self): chunk_size = 1024 * 1024 # 1 MB total_size = 1024 * 1024 * 1000 # 1000 MB data = bytearray() for _ in range(total_size // chunk_size): data.extend(random.randbytes(chunk_size)) self.a = data self.next(self.end) @step def end(self): print(f"Size of artifact a: {len(self.a)} bytes") print("HelloSpinFlow completed.") if __name__ == "__main__": HelloSpinFlow()
HelloSpinFlow
python
dask__dask
dask/dataframe/dask_expr/_groupby.py
{ "start": 31038, "end": 31140 }
class ____(GroupByBFill): func = staticmethod(functools.partial(_fillna, what="ffill"))
GroupByFFill
python
pytorch__pytorch
test/higher_order_ops/test_invoke_subgraph.py
{ "start": 96968, "end": 99235 }
class ____(torch.nn.Module): def forward(self, L_x_: "f32[8, 8]", L_y_: "f32[8, 8]"): l_x_ = L_x_ l_y_ = L_y_ subgraph_0 = self.subgraph_0 invoke_subgraph = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', l_x_, l_y_); subgraph_0 = l_x_ = None getitem: "f32[8, 8]" = invoke_subgraph[0] getitem_1: "f32[8, 8]" = invoke_subgraph[1]; invoke_subgraph = None subgraph_1 = self.subgraph_0 invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(subgraph_1, 'subgraph_0', getitem, l_y_); subgraph_1 = getitem = l_y_ = None getitem_2: "f32[8, 8]" = invoke_subgraph_1[0]; invoke_subgraph_1 = None add: "f32[8, 8]" = getitem_1 + getitem_2; getitem_1 = getitem_2 = None return (add,) class subgraph_0(torch.nn.Module): def forward(self, l_x_: "f32[8, 8]", l_y_: "f32[8, 8]"): a: "f32[8, 8]" = torch.sin(l_x_); l_x_ = None b: "f32[8, 8]" = torch.cos(l_y_); l_y_ = None return (a, b) """, ) # High priority - grads are wrong @unittest.expectedFailure def test_grad_accuracy_check(self): class Foo: def __init__(self, a, b): self.a = a self.b = b @nested_compile_region def gn(x): a = torch.sin(x) b = torch.cos(x) return (a, b) def fn(x): foo1 = gn(x) foo2 = gn(foo1[0]) return foo1[1] + foo2[0] + foo2[1] backend = AotEagerAndRecordGraphs() opt_fn = torch.compile(fn, backend=backend, fullgraph=True) x = torch.randn(8, 8, requires_grad=True) x_clone = x.detach().clone().requires_grad_(True) x.grad = None x_clone.grad = None ref = fn(x) res = opt_fn(x_clone) ref.sum().backward() res.sum().backward() self.assertEqual(ref, res) self.assertEqual(x.grad, x_clone.grad) @skipIfTorchDynamo("Not a torch._dynamo test") @parameterized_class( [ {"strict": False}, {"strict": True}, ], class_name_func=lambda cls, _, params: f"{cls.__name__}{'Strict' if params['strict'] else 'Nonstrict'}", )
GraphModule
python
pandas-dev__pandas
asv_bench/benchmarks/frame_methods.py
{ "start": 9951, "end": 10311 }
class ____: def setup(self): data = np.random.randn(1000, 500) df = DataFrame(data) df = df.where(df > 0) self.bools = df > 0 self.mask = isnull(df) def time_frame_mask_bools(self): self.bools.mask(self.mask) def time_frame_mask_floats(self): self.bools.astype(float).mask(self.mask)
MaskBool
python
tensorflow__tensorflow
tensorflow/python/keras/saving/saved_model/layer_serialization.py
{ "start": 1257, "end": 5221 }
class ____(base_serialization.SavedModelSaver): """Implements Layer SavedModel serialization.""" @property def object_identifier(self): return constants.LAYER_IDENTIFIER @property def python_properties(self): # TODO(kathywu): Add python property validator return self._python_properties_internal() def _python_properties_internal(self): """Returns dictionary of all python properties.""" # TODO(kathywu): Add support for metrics serialization. # TODO(kathywu): Synchronize with the keras spec (go/keras-json-spec) once # the python config serialization has caught up. metadata = dict( name=self.obj.name, trainable=self.obj.trainable, expects_training_arg=self.obj._expects_training_arg, # pylint: disable=protected-access dtype=policy.serialize(self.obj._dtype_policy), # pylint: disable=protected-access batch_input_shape=getattr(self.obj, '_batch_input_shape', None), stateful=self.obj.stateful, must_restore_from_config=self.obj._must_restore_from_config, # pylint: disable=protected-access ) metadata.update(get_serialized(self.obj)) if self.obj.input_spec is not None: # Layer's input_spec has already been type-checked in the property setter. metadata['input_spec'] = nest.map_structure( lambda x: generic_utils.serialize_keras_object(x) if x else None, self.obj.input_spec) if (self.obj.activity_regularizer is not None and hasattr(self.obj.activity_regularizer, 'get_config')): metadata['activity_regularizer'] = generic_utils.serialize_keras_object( self.obj.activity_regularizer) if self.obj._build_input_shape is not None: # pylint: disable=protected-access metadata['build_input_shape'] = self.obj._build_input_shape # pylint: disable=protected-access return metadata def objects_to_serialize(self, serialization_cache): return (self._get_serialized_attributes( serialization_cache).objects_to_serialize) def functions_to_serialize(self, serialization_cache): return (self._get_serialized_attributes( serialization_cache).functions_to_serialize) def _get_serialized_attributes(self, serialization_cache): """Generates or retrieves serialized attributes from cache.""" keras_cache = serialization_cache.setdefault(constants.KERAS_CACHE_KEY, {}) if self.obj in keras_cache: return keras_cache[self.obj] serialized_attr = keras_cache[self.obj] = ( serialized_attributes.SerializedAttributes.new(self.obj)) if (save_impl.should_skip_serialization(self.obj) or self.obj._must_restore_from_config): # pylint: disable=protected-access return serialized_attr object_dict, function_dict = self._get_serialized_attributes_internal( serialization_cache) serialized_attr.set_and_validate_objects(object_dict) serialized_attr.set_and_validate_functions(function_dict) return serialized_attr def _get_serialized_attributes_internal(self, serialization_cache): """Returns dictionary of serialized attributes.""" objects = save_impl.wrap_layer_objects(self.obj, serialization_cache) functions = save_impl.wrap_layer_functions(self.obj, serialization_cache) # Attribute validator requires that the default save signature is added to # function dict, even if the value is None. functions['_default_save_signature'] = None return objects, functions # TODO(kathywu): Move serialization utils (and related utils from # generic_utils.py) to a separate file. def get_serialized(obj): with generic_utils.skip_failed_serialization(): # Store the config dictionary, which may be used when reviving the object. # When loading, the program will attempt to revive the object from config, # and if that fails, the object will be revived from the SavedModel. return generic_utils.serialize_keras_object(obj)
LayerSavedModelSaver
python
allegroai__clearml
clearml/automation/trigger.py
{ "start": 7916, "end": 37332 }
class ____(BaseScheduler): """ Trigger Task execution if an event happens in the system. Examples: - New model is published/tagged, - New Dataset is created, - General Task failed, - Task metric below/above threshold, alert every X minutes """ _datasets_section = "datasets" _models_section = "models" _tasks_section = "tasks" _state_section = "state" def __init__( self, pooling_frequency_minutes: float = 3.0, sync_frequency_minutes: float = 15, force_create_task_name: Optional[str] = None, force_create_task_project: Optional[str] = None, ) -> None: """ Create a Task trigger service :param pooling_frequency_minutes: Check for new events every X minutes (default 3) :param sync_frequency_minutes: Sync task scheduler configuration every X minutes. Allow to change scheduler in runtime by editing the Task configuration object :param force_create_task_name: Optional, force creation of Task Scheduler service, even if main Task.init already exists. :param force_create_task_project: Optional, force creation of Task Scheduler service, even if main Task.init already exists. """ super(TriggerScheduler, self).__init__( sync_frequency_minutes=sync_frequency_minutes, force_create_task_name=force_create_task_name, force_create_task_project=force_create_task_project, pooling_frequency_minutes=pooling_frequency_minutes, ) self._task_triggers = [] self._dataset_triggers = [] self._model_triggers = [] self._executed_triggers = [] self._client = None def add_model_trigger( self, schedule_task_id: Union[str, Task] = None, schedule_queue: str = None, schedule_function: Callable[[str], None] = None, trigger_project: str = None, trigger_name: Optional[str] = None, trigger_on_publish: bool = None, trigger_on_tags: Optional[List[str]] = None, trigger_on_archive: bool = None, trigger_required_tags: Optional[List[str]] = None, name: Optional[str] = None, target_project: Optional[str] = None, add_tag: Union[bool, str] = True, single_instance: bool = False, reuse_task: bool = False, task_parameters: Optional[dict] = None, task_overrides: Optional[dict] = None, ) -> None: """ Create a cron job alike scheduling for a pre existing Task or function. Trigger the Task/function execution on changes in the model repository Notice it is recommended to give the trigger a descriptive unique name, if not provided a task ID is used. Notice `task_overrides` can except reference to the trigger model ID: example: ``task_overrides={'Args/model_id': '${model.id}'}`` Notice if schedule_function is passed, use the following function interface: .. code-block:: py def schedule_function(model_id): pass :param schedule_task_id: Task/task ID to be cloned and scheduled for execution :param schedule_queue: Queue name or ID to put the Task into (i.e. schedule) :param schedule_function: Optional, instead of providing Task ID to be scheduled, provide a function to be called. Notice the function is called from the scheduler context (i.e. running on the same machine as the scheduler) :param name: Name or description for the cron Task (should be unique if provided otherwise randomly generated) :param trigger_project: Only monitor models from this specific project (not recursive) :param trigger_name: Trigger only on models with name matching (regexp) :param trigger_on_publish: Trigger when model is published. :param trigger_on_tags: Trigger when all tags in the list are present :param trigger_on_archive: Trigger when model is archived :param trigger_required_tags: Trigger only on models with the following additional tags (must include all tags) :param target_project: Specify target project to put the cloned scheduled Task in. :param add_tag: Add tag to the executed Task. Provide specific tag (str) or pass True (default) to use the trigger name as tag :param single_instance: If True, do not launch the Task job if the previous instance is still running (skip until the next scheduled time period). Default False. :param reuse_task: If True, re-enqueue the same Task (i.e. do not clone it) every time, default False. :param task_parameters: Configuration parameters to the executed Task. for example: ``{'Args/batch': '12'}`` Notice: not available when reuse_task=True :param task_overrides: Change task definition. for example ``{'script.version_num': None, 'script.branch': 'main'}`` Notice: not available when reuse_task=True :return: True if job is successfully added to the scheduling list """ trigger = ModelTrigger( base_task_id=schedule_task_id, base_function=schedule_function, queue=schedule_queue, name=name, target_project=target_project, single_instance=single_instance, task_parameters=task_parameters, task_overrides=task_overrides, add_tag=(add_tag if isinstance(add_tag, str) else (name or schedule_task_id)) if add_tag else None, clone_task=not bool(reuse_task), match_name=trigger_name, project=Task.get_project_id(trigger_project) if trigger_project else None, tags=trigger_on_tags, required_tags=trigger_required_tags, on_publish=trigger_on_publish, on_archive=trigger_on_archive, ) trigger.verify() self._model_triggers.append(trigger) def add_dataset_trigger( self, schedule_task_id: Union[str, Task] = None, schedule_queue: str = None, schedule_function: Callable[[str], None] = None, trigger_project: str = None, trigger_name: Optional[str] = None, trigger_on_publish: bool = None, trigger_on_tags: Optional[List[str]] = None, trigger_on_archive: bool = None, trigger_required_tags: Optional[List[str]] = None, name: Optional[str] = None, target_project: Optional[str] = None, add_tag: Union[bool, str] = True, single_instance: bool = False, reuse_task: bool = False, task_parameters: Optional[dict] = None, task_overrides: Optional[dict] = None, ) -> None: """ Create a cron job alike scheduling for a pre existing Task or function. Trigger the Task/function execution on changes in the dataset repository (notice this is not the hyper-datasets). Notice, it is recommended to give the trigger a descriptive unique name. If not provided, a task ID is used. Notice `task_overrides` can except reference to the trigger model ID: example: ``task_overrides={'Args/dataset_id': '${dataset.id}'}``. Notice if schedule_function is passed, use the following function interface: .. code-block:: py def schedule_function(dataset_id): pass :param schedule_task_id: Task/task ID to be cloned and scheduled for execution :param schedule_queue: Queue name or ID to put the Task into (i.e. schedule) :param schedule_function: Optional, instead of providing Task ID to be scheduled, provide a function to be called. Notice the function is called from the scheduler context (i.e. running on the same machine as the scheduler) :param name: Name or description for the cron Task (should be unique if provided otherwise randomly generated) :param trigger_project: Only monitor datasets from this specific project (not recursive) :param trigger_name: Trigger only on datasets with name matching (regexp) :param trigger_on_publish: Trigger when dataset is published. :param trigger_on_tags: Trigger when all tags in the list are present :param trigger_on_archive: Trigger when dataset is archived :param trigger_required_tags: Trigger only on datasets with the following additional tags (must include all tags) :param target_project: Specify target project to put the cloned scheduled Task in. :param add_tag: Add tag to the executed Task. Provide specific tag (str) or pass True (default) to use the trigger name as tag :param single_instance: If True, do not launch the Task job if the previous instance is still running (skip until the next scheduled time period). Default False. :param reuse_task: If True, re-enqueue the same Task (i.e. do not clone it) every time, default False. :param task_parameters: Configuration parameters to the executed Task. For example: ``{'Args/batch': '12'}``. Notice: not available when reuse_task=True/ :param task_overrides: Change task definition. For example ``{'script.version_num': None, 'script.branch': 'main'}``. Notice: not available when reuse_task=True :return: True if job is successfully added to the scheduling list """ if trigger_project: trigger_project_list = Task.get_projects( name="^{}/\\.datasets/.*".format(trigger_project), search_hidden=True, _allow_extra_fields_=True, ) for project in trigger_project_list: trigger = DatasetTrigger( base_task_id=schedule_task_id, base_function=schedule_function, queue=schedule_queue, name=name, target_project=target_project, single_instance=single_instance, task_parameters=task_parameters, task_overrides=task_overrides, add_tag=(add_tag if isinstance(add_tag, str) else (name or schedule_task_id)) if add_tag else None, clone_task=not bool(reuse_task), match_name=trigger_name, project=project.id, tags=trigger_on_tags, required_tags=trigger_required_tags, on_publish=trigger_on_publish, on_archive=trigger_on_archive, ) trigger.verify() self._dataset_triggers.append(trigger) else: trigger = DatasetTrigger( base_task_id=schedule_task_id, base_function=schedule_function, queue=schedule_queue, name=name, target_project=target_project, single_instance=single_instance, task_parameters=task_parameters, task_overrides=task_overrides, add_tag=(add_tag if isinstance(add_tag, str) else (name or schedule_task_id)) if add_tag else None, clone_task=not bool(reuse_task), match_name=trigger_name, tags=trigger_on_tags, required_tags=trigger_required_tags, on_publish=trigger_on_publish, on_archive=trigger_on_archive, ) trigger.verify() self._dataset_triggers.append(trigger) def add_task_trigger( self, schedule_task_id: Union[str, Task] = None, schedule_queue: str = None, schedule_function: Callable[[str], None] = None, trigger_project: str = None, trigger_name: Optional[str] = None, trigger_on_tags: Optional[List[str]] = None, trigger_on_status: Optional[List[str]] = None, trigger_exclude_dev_tasks: Optional[bool] = None, trigger_on_metric: Optional[str] = None, trigger_on_variant: Optional[str] = None, trigger_on_threshold: Optional[float] = None, trigger_on_sign: Optional[str] = None, trigger_required_tags: Optional[List[str]] = None, name: Optional[str] = None, target_project: Optional[str] = None, add_tag: Union[bool, str] = True, single_instance: bool = False, reuse_task: bool = False, task_parameters: Optional[dict] = None, task_overrides: Optional[dict] = None, ) -> None: """ Create a cron job alike scheduling for a pre existing Task or function. Trigger the Task/function execution on changes in the Task Notice it is recommended to give the trigger a descriptive unique name, if not provided a task ID is used. Notice `task_overrides` can except reference to the trigger model ID: example: ``task_overrides={'Args/task_id': '${task.id}'}`` Notice if schedule_function is passed, use the following function interface: .. code-block:: py def schedule_function(task_id): pass :param schedule_task_id: Task/task ID to be cloned and scheduled for execution :param schedule_queue: Queue name or ID to put the Task into (i.e. schedule) :param schedule_function: Optional, instead of providing Task ID to be scheduled, provide a function to be called. Notice the function is called from the scheduler context (i.e. running on the same machine as the scheduler) :param name: Name or description for the cron Task (should be unique if provided otherwise randomly generated) :param trigger_project: Only monitor tasks from this specific project (not recursive) :param trigger_name: Trigger only on tasks with name matching (regexp) :param trigger_on_tags: Trigger when all tags in the list are present :param trigger_required_tags: Trigger only on tasks with the following additional tags (must include all tags) :param trigger_on_status: Trigger on Task status change. Expect list of status strings, e.g. ['failed', 'published']. TaskStatusEnum: ["created", "in_progress", "stopped", "closed", "failed", "completed", "queued", "published", "publishing", "unknown"] :param trigger_exclude_dev_tasks: If True only trigger on Tasks executed by clearml-agent (and not manually) :param trigger_on_metric: Trigger on metric/variant above/under threshold (metric=title, variant=series) :param trigger_on_variant: Trigger on metric/variant above/under threshold (metric=title, variant=series) :param trigger_on_threshold: Trigger on metric/variant above/under threshold (float number) :param trigger_on_sign: possible values "max"/"maximum" or "min"/"minimum", trigger Task if metric below "min" or "above" maximum. Default: "minimum" :param target_project: Specify target project to put the cloned scheduled Task in. :param add_tag: Add tag to the executed Task. Provide specific tag (str) or pass True (default) to use the trigger name as tag :param single_instance: If True, do not launch the Task job if the previous instance is still running (skip until the next scheduled time period). Default False. :param reuse_task: If True, re-enqueue the same Task (i.e. do not clone it) every time, default False. :param task_parameters: Configuration parameters to the executed Task. for example: ``{'Args/batch': '12'}`` Notice: not available when reuse_task=True/ :param task_overrides: Change task definition. for example ``{'script.version_num': None, 'script.branch': 'main'}``. Notice: not available when reuse_task=True :return: True if job is successfully added to the scheduling list """ trigger = TaskTrigger( base_task_id=schedule_task_id, base_function=schedule_function, queue=schedule_queue, name=name, target_project=target_project, single_instance=single_instance, task_parameters=task_parameters, task_overrides=task_overrides, add_tag=(add_tag if isinstance(add_tag, str) else (name or schedule_task_id)) if add_tag else None, clone_task=not bool(reuse_task), match_name=trigger_name, project=Task.get_project_id(trigger_project) if trigger_project else None, tags=trigger_on_tags, required_tags=trigger_required_tags, on_status=trigger_on_status, exclude_dev=trigger_exclude_dev_tasks, metrics=trigger_on_metric, variant=trigger_on_variant, threshold=trigger_on_threshold, value_sign=trigger_on_sign, ) trigger.verify() self._task_triggers.append(trigger) def start(self) -> None: """ Start the Task trigger loop (notice this function does not return) """ super(TriggerScheduler, self).start() def get_triggers(self) -> List[BaseTrigger]: """ Return all triggers (models, datasets, tasks) :return: List of trigger objects """ return self._model_triggers + self._dataset_triggers + self._task_triggers def _step(self) -> bool: if not self._client: self._client = APIClient() executed = False for trigger in self._model_triggers + self._dataset_triggers + self._task_triggers: ref_time = datetime_from_isoformat(trigger.last_update or datetime.now(timezone.utc)) objects = [] try: # noinspection PyProtectedMember objects = getattr(self._client, trigger.get_key()).get_all( _allow_extra_fields_=True, only_fields=list(trigger._only_fields or []), **trigger.build_query(ref_time, self._client) ) trigger.last_update = max([trigger.get_ref_time(o) for o in objects] or [ref_time]) if not objects: continue except Exception as ex: self._log("Exception occurred while checking trigger '{}' state: {}".format(trigger, ex)) executed |= bool(objects) # actually handle trigger for obj in objects: # create a unique instance list if not trigger._triggered_instances: trigger._triggered_instances = {} if obj.id in trigger._triggered_instances: continue trigger._triggered_instances[obj.id] = datetime.now(timezone.utc) self._launch_job(trigger, obj.id) return executed # noinspection PyMethodOverriding def _launch_job(self, job: BaseTrigger, trigger_id: str) -> None: if job.base_task_id: task_parameters = None if job.task_parameters: task_parameters = { k: trigger_id if v == job._task_param else v for k, v in job.task_parameters.items() # noqa } task_job = self._launch_job_task( job, task_parameters=task_parameters, add_tags=job.add_tag or None, ) if task_job: self._executed_triggers.append( ExecutedTrigger( name=job.name, task_id=task_job.task_id(), started=datetime.now(timezone.utc), trigger=str(job.__class__.__name__), ) ) if job.base_function: thread_job = self._launch_job_function(job, func_args=(trigger_id,)) if thread_job: self._executed_triggers.append( ExecutedTrigger( name=job.name, thread_id=str(thread_job.ident), started=datetime.now(timezone.utc), trigger=str(job.__class__.__name__), ) ) def _serialize(self) -> None: # noinspection PyProtectedMember self._task._set_configuration( config_type="json", description="Dataset trigger configuration", config_text=json.dumps( [j.to_dict() for j in self._dataset_triggers], default=datetime_to_isoformat, ), name=self._datasets_section, ) # noinspection PyProtectedMember self._task._set_configuration( config_type="json", description="Model trigger configuration", config_text=json.dumps( [j.to_dict() for j in self._model_triggers], default=datetime_to_isoformat, ), name=self._models_section, ) # noinspection PyProtectedMember self._task._set_configuration( config_type="json", description="Task trigger configuration", config_text=json.dumps( [j.to_dict() for j in self._task_triggers], default=datetime_to_isoformat, ), name=self._tasks_section, ) def _deserialize(self) -> None: self._task.reload() self._dataset_triggers = self.__deserialize_section( section=self._datasets_section, trigger_class=DatasetTrigger, current_triggers=self._dataset_triggers, ) self._model_triggers = self.__deserialize_section( section=self._models_section, trigger_class=ModelTrigger, current_triggers=self._model_triggers, ) self._task_triggers = self.__deserialize_section( section=self._tasks_section, trigger_class=TaskTrigger, current_triggers=self._task_triggers, ) def __deserialize_section( self, section: str, trigger_class: BaseTrigger, current_triggers: List[BaseTrigger], ) -> List[BaseTrigger]: # noinspection PyProtectedMember json_str = self._task._get_configuration_text(name=section) try: return self.__deserialize_triggers(json.loads(json_str), trigger_class, current_triggers) except Exception as ex: self._log("Failed deserializing configuration: {}".format(ex), level=logging.WARN) return current_triggers @staticmethod def __deserialize_triggers( trigger_jobs: List[dict], trigger_class: BaseTrigger, current_triggers: List[BaseTrigger], ) -> List[BaseTrigger]: trigger_jobs = [trigger_class().update(j) for j in trigger_jobs] # noqa trigger_jobs = {j.name: j for j in trigger_jobs} current_triggers = {j.name: j for j in current_triggers} # select only valid jobs, and update the valid ones state from the current one new_triggers = [ current_triggers[name].update(j) if name in current_triggers else j for name, j in trigger_jobs.items() ] # verify all jobs for j in new_triggers: j.verify() return new_triggers def _serialize_state(self) -> None: json_str = json.dumps( dict( dataset_triggers=[j.to_dict(full=True) for j in self._dataset_triggers], model_triggers=[j.to_dict(full=True) for j in self._model_triggers], task_triggers=[j.to_dict(full=True) for j in self._task_triggers], # pooling_frequency_minutes=self._pooling_frequency_minutes, # sync_frequency_minutes=self._sync_frequency_minutes, ), default=datetime_to_isoformat, ) self._task.upload_artifact( name=self._state_section, artifact_object=json_str, preview="scheduler internal state", ) def _deserialize_state(self) -> None: # get artifact self._task.reload() artifact_object = self._task.artifacts.get(self._state_section) if artifact_object is None: return state_json_str = artifact_object.get() if state_json_str is None: return state_dict = json.loads(state_json_str) self._dataset_triggers = self.__deserialize_triggers( state_dict.get("dataset_triggers", []), trigger_class=DatasetTrigger, # noqa current_triggers=self._dataset_triggers, ) self._model_triggers = self.__deserialize_triggers( state_dict.get("model_triggers", []), trigger_class=ModelTrigger, # noqa current_triggers=self._model_triggers, ) self._task_triggers = self.__deserialize_triggers( state_dict.get("task_triggers", []), trigger_class=TaskTrigger, current_triggers=self._task_triggers, # noqa ) def _update_execution_plots(self) -> None: if not self._task: return task_link_template = ( self._task.get_output_log_web_page() .replace("/{}/".format(self._task.project), "/{project}/") .replace("/{}/".format(self._task.id), "/{task}/") ) # plot the already executed Tasks executed_table = [["trigger", "name", "task id", "started", "finished"]] for executed_job in sorted(self._executed_triggers, key=lambda x: x.started, reverse=True): if not executed_job.finished: if executed_job.task_id: t = Task.get_task(task_id=executed_job.task_id) if t.status not in ("in_progress", "queued"): executed_job.finished = t.data.completed or datetime.now(timezone.utc) elif executed_job.thread_id: # noinspection PyBroadException try: a_thread = [t for t in enumerate_threads() if t.ident == executed_job.thread_id] if not a_thread or not a_thread[0].is_alive(): executed_job.finished = datetime.now(timezone.utc) except Exception: pass executed_table += [ [ executed_job.trigger, executed_job.name, '<a href="{}">{}</a>'.format( task_link_template.format(project="*", task=executed_job.task_id), executed_job.task_id, ) if executed_job.task_id else "function", str(executed_job.started).split(".", 1)[0], str(executed_job.finished).split(".", 1)[0], ] ] # plot the schedule definition self._task.get_logger().report_table( title="Triggers Executed", series=" ", iteration=0, table_plot=executed_table, ) self.__report_trigger_table(triggers=self._model_triggers, title="Model Triggers") self.__report_trigger_table(triggers=self._dataset_triggers, title="Dataset Triggers") self.__report_trigger_table(triggers=self._task_triggers, title="Task Triggers") def __report_trigger_table(self, triggers: List[BaseTrigger], title: str) -> None: if not triggers: return task_link_template = ( self._task.get_output_log_web_page() .replace("/{}/".format(self._task.project), "/{project}/") .replace("/{}/".format(self._task.id), "/{task}/") ) columns = [k for k in BaseTrigger().__dict__.keys() if not k.startswith("_")] columns += [k for k in triggers[0].__dict__.keys() if k not in columns and not k.startswith("_")] column_task_id = columns.index("base_task_id") scheduler_table = [columns] for j in triggers: j_dict = j.to_dict() j_dict["base_function"] = ( "{}.{}".format( getattr(j.base_function, "__module__", ""), getattr(j.base_function, "__name__", ""), ) if j.base_function else "" ) if not j_dict.get("base_task_id"): j_dict["clone_task"] = "" row = [ str(j_dict.get(c)).split(".", 1)[0] if isinstance(j_dict.get(c), datetime) else str(j_dict.get(c) or "") for c in columns ] if row[column_task_id]: row[column_task_id] = '<a href="{}">{}</a>'.format( task_link_template.format(project="*", task=row[column_task_id]), row[column_task_id], ) scheduler_table += [row] self._task.get_logger().report_table(title=title, series=" ", iteration=0, table_plot=scheduler_table)
TriggerScheduler
python
getsentry__sentry
tests/sentry_plugins/trello/test_plugin.py
{ "start": 363, "end": 498 }
class ____(PluginTestCase): @cached_property def plugin(self) -> TrelloPlugin: return TrelloPlugin()
TrelloPluginTestBase
python
pandas-dev__pandas
pandas/io/html.py
{ "start": 16986, "end": 20467 }
class ____(_HtmlFrameParser): """ HTML to DataFrame parser that uses BeautifulSoup under the hood. See Also -------- pandas.io.html._HtmlFrameParser pandas.io.html._LxmlFrameParser Notes ----- Documentation strings for this class are in the base class :class:`pandas.io.html._HtmlFrameParser`. """ def _parse_tables(self, document, match, attrs): element_name = "table" tables = document.find_all(element_name, attrs=attrs) if not tables: raise ValueError("No tables found") result = [] unique_tables = set() tables = self._handle_hidden_tables(tables, "attrs") for table in tables: if self.displayed_only: for elem in table.find_all("style"): elem.decompose() for elem in table.find_all(style=re.compile(r"display:\s*none")): elem.decompose() if table not in unique_tables and table.find(string=match) is not None: result.append(table) unique_tables.add(table) if not result: raise ValueError(f"No tables found matching pattern {match.pattern!r}") return result def _href_getter(self, obj) -> str | None: a = obj.find("a", href=True) return None if not a else a["href"] def _text_getter(self, obj): return obj.text def _equals_tag(self, obj, tag) -> bool: return obj.name == tag def _parse_td(self, row): return row.find_all(("td", "th"), recursive=False) def _parse_thead_tr(self, table): return table.select("thead tr") def _parse_tbody_tr(self, table): from_tbody = table.select("tbody tr") from_root = table.find_all("tr", recursive=False) # HTML spec: at most one of these lists has content return from_tbody + from_root def _parse_tfoot_tr(self, table): return table.select("tfoot tr") def _setup_build_doc(self): raw_text = _read(self.io, self.encoding, self.storage_options) if not raw_text: raise ValueError(f"No text parsed from document: {self.io}") return raw_text def _build_doc(self): from bs4 import BeautifulSoup bdoc = self._setup_build_doc() if isinstance(bdoc, bytes) and self.encoding is not None: udoc = bdoc.decode(self.encoding) from_encoding = None else: udoc = bdoc from_encoding = self.encoding soup = BeautifulSoup(udoc, features="html5lib", from_encoding=from_encoding) for br in soup.find_all("br"): br.replace_with("\n" + br.text) return soup def _build_xpath_expr(attrs) -> str: """ Build an xpath expression to simulate bs4's ability to pass in kwargs to search for attributes when using the lxml parser. Parameters ---------- attrs : dict A dict of HTML attributes. These are NOT checked for validity. Returns ------- expr : unicode An XPath expression that checks for the given HTML attributes. """ # give class attribute as class_ because class is a python keyword if "class_" in attrs: attrs["class"] = attrs.pop("class_") s = " and ".join([f"@{k}={v!r}" for k, v in attrs.items()]) return f"[{s}]" _re_namespace = {"re": "http://exslt.org/regular-expressions"}
_BeautifulSoupHtml5LibFrameParser
python
astropy__astropy
astropy/time/formats.py
{ "start": 66180, "end": 67831 }
class ____(TimeISO): """ Year, day-of-year and time as "YYYY:DOY:HH:MM:SS.sss...". The day-of-year (DOY) goes from 001 to 365 (366 in leap years). For example, 2000:001:00:00:00.000 is midnight on January 1, 2000. The allowed subformats are: - 'date_hms': date + hours, mins, secs (and optional fractional secs) - 'date_hm': date + hours, mins - 'date': date """ name = "yday" subfmts = ( ( "date_hms", "%Y:%j:%H:%M:%S", "{year:d}:{yday:03d}:{hour:02d}:{min:02d}:{sec:02d}", ), ("date_hm", "%Y:%j:%H:%M", "{year:d}:{yday:03d}:{hour:02d}:{min:02d}"), ("date", "%Y:%j", "{year:d}:{yday:03d}"), ) # Define positions and starting delimiter for year, month, day, hour, # minute, seconds components of an ISO time. This is used by the fast # C-parser parse_ymdhms_times() # # "2000:123:13:14:15.678" # 012345678901234567890 # yyyy:ddd:hh:mm:ss.fff # Parsed as ('yyyy', ':ddd', ':hh', ':mm', ':ss', '.fff') # # delims: character at corresponding `starts` position (0 => no character) # starts: position where component starts (including delimiter if present) # stops: position where component ends (-1 => continue to end of string) fast_parser_pars = dict( delims=(0, 0, ord(":"), ord(":"), ord(":"), ord(":"), ord(".")), starts=(0, -1, 4, 8, 11, 14, 17), stops=(3, -1, 7, 10, 13, 16, -1), # Break allowed before: # y m d h m s f break_allowed=(0, 0, 0, 1, 0, 1, 1), has_day_of_year=1, )
TimeYearDayTime
python
facebook__pyre-check
tools/incremental_test/environment.py
{ "start": 1590, "end": 2271 }
class ____(Environment): def run( self, working_directory: Path, command: str, stdin: Optional[str] ) -> CommandOutput: LOG.debug( f"Invoking subprocess `{command}` at `{working_directory}`" f"{' with stdin' if stdin is not None else ''}" ) result = subprocess.run( command.split(), cwd=working_directory, universal_newlines=True, input=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) return CommandOutput( return_code=result.returncode, stdout=result.stdout, stderr=result.stderr )
SubprocessEnvironment
python
tensorflow__tensorflow
tensorflow/python/training/supervisor.py
{ "start": 39343, "end": 40120 }
class ____(coordinator.LooperThread): """A thread to save summaries on a timer.""" def __init__(self, sv, sess): """Create a SVSummaryThread. Args: sv: A `Supervisor`. sess: A `Session`. """ super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs) self._sv = sv self._sess = sess def run_loop(self): if self._sv.global_step is not None: summary_strs, global_step = self._sess.run( [self._sv.summary_op, self._sv.global_step]) else: summary_strs = self._sess.run(self._sv.summary_op) global_step = None if self._sv.summary_writer: logging.info("Recording summary at step %s.", global_step) self._sv.summary_writer.add_summary(summary_strs, global_step)
SVSummaryThread
python
numba__numba
numba/tests/gdb/test_pretty_print.py
{ "start": 350, "end": 2416 }
class ____(TestCase): def test(self): rdt_a = np.dtype([("x", np.int16), ("y", np.float64)], align=True) @njit(debug=True) def foo(): a = 1.234 b = (1, 2, 3) c = ('a', b, 4) d = np.arange(5.) e = np.array([[1, 3j], [2, 4j]]) f = "Some string" + " L-Padded string".lstrip() g = 11 + 22j h = np.arange(24).reshape((4, 6))[::2, ::3] i = np.zeros(2, dtype=rdt_a) return a, b, c, d, e, f, g, h, i foo() extension = collect_gdbinfo().extension_loc driver = GdbMIDriver(__file__, init_cmds=['-x', extension], debug=False) driver.set_breakpoint(line=29) driver.run() driver.check_hit_breakpoint(1) # Ideally the function would be run to get the string repr of locals # but not everything appears in DWARF e.g. string literals. Further, # str on NumPy arrays seems to vary a bit in output. Therefore a custom # match is used. driver.stack_list_variables(1) output = driver._captured.after.decode('UTF-8') done_str = output.splitlines()[0] pat = r'^\^done,variables=\[\{(.*)\}\]$' lcls_strs = re.match(pat, done_str).groups()[0].split('},{') lcls = {k: v for k, v in [re.match(r'name="(.*)",value="(.*)"', x).groups() for x in lcls_strs]} expected = dict() expected['a'] = r'1\.234' expected['b'] = r'\(1, 2, 3\)' expected['c'] = r'\(0x0, \(1, 2, 3\), 4\)' expected['d'] = r'\\n\[0. 1. 2. 3. 4.\]' expected['e'] = r'\\n\[\[1.\+0.j 0.\+3.j\]\\n \[2.\+0.j 0.\+4.j\]\]' expected['f'] = "'Some stringL-Padded string'" expected['g'] = r"11\+22j" expected['h'] = r'\\n\[\[ 0 3\]\\n \[12 15\]\]' expected['i'] = r'\\n\[\(0, 0.\) \(0, 0.\)\]' for k, v in expected.items(): self.assertRegex(lcls[k], v) driver.quit() if __name__ == '__main__': unittest.main()
Test
python
tornadoweb__tornado
tornado/web.py
{ "start": 100501, "end": 100933 }
class ____(HTTPError): """Exception raised by `RequestHandler.get_argument`. This is a subclass of `HTTPError`, so if it is uncaught a 400 response code will be used instead of 500 (and a stack trace will not be logged). .. versionadded:: 3.1 """ def __init__(self, arg_name: str) -> None: super().__init__(400, "Missing argument %s" % arg_name) self.arg_name = arg_name
MissingArgumentError
python
kamyu104__LeetCode-Solutions
Python/maximum-number-of-fish-in-a-grid.py
{ "start": 43, "end": 1146 }
class ____(object): def findMaxFish(self, grid): """ :type grid: List[List[int]] :rtype: int """ DIRECTIONS = ((1, 0), (0, 1), (-1, 0), (0, -1)) def bfs(i, j): result = grid[i][j] grid[i][j] = 0 q = [(i, j)] while q: new_q = [] for i, j in q: for di, dj in DIRECTIONS: ni, nj = i+di, j+dj if not (0 <= ni < len(grid) and 0 <= nj < len(grid[0]) and grid[ni][nj]): continue result += grid[ni][nj] grid[ni][nj] = 0 new_q.append((ni, nj)) q = new_q return result result = 0 for i in xrange(len(grid)): for j in xrange(len(grid[0])): if grid[i][j]: result = max(result, bfs(i, j)) return result # Time: O(m * n) # Space: O(m * n) # dfs
Solution
python
python__mypy
mypy/nodes.py
{ "start": 19041, "end": 25034 }
class ____(FuncBase, SymbolNode, Statement): """A logical node representing all the variants of a multi-declaration function. A multi-declaration function is often an @overload, but can also be a @property with a setter and a/or a deleter. This node has no explicit representation in the source program. Overloaded variants must be consecutive in the source file. """ __slots__ = ( "items", "unanalyzed_items", "impl", "deprecated", "setter_index", "_is_trivial_self", ) items: list[OverloadPart] unanalyzed_items: list[OverloadPart] impl: OverloadPart | None deprecated: str | None setter_index: int | None def __init__(self, items: list[OverloadPart]) -> None: super().__init__() self.items = items self.unanalyzed_items = items.copy() self.impl = None self.deprecated = None self.setter_index = None self._is_trivial_self: bool | None = None if items: # TODO: figure out how to reliably set end position (we don't know the impl here). self.set_line(items[0].line, items[0].column) @property def name(self) -> str: if self.items: return self.items[0].name else: # This may happen for malformed overload assert self.impl is not None return self.impl.name @property def is_trivial_self(self) -> bool: """Check we can use bind_self() fast path for this overload. This will return False if at least one overload: * Has an explicit self annotation, or Self in signature. * Has a non-trivial decorator. """ if self._is_trivial_self is not None: return self._is_trivial_self for i, item in enumerate(self.items): # Note: bare @property is removed in visit_decorator(). trivial = 1 if i > 0 or not self.is_property else 0 if isinstance(item, FuncDef): if not item.is_trivial_self: self._is_trivial_self = False return False elif len(item.decorators) > trivial or not item.func.is_trivial_self: self._is_trivial_self = False return False self._is_trivial_self = True return True @property def setter(self) -> Decorator: # Do some consistency checks first. first_item = self.items[0] assert isinstance(first_item, Decorator) assert first_item.var.is_settable_property assert self.setter_index is not None item = self.items[self.setter_index] assert isinstance(item, Decorator) return item def accept(self, visitor: StatementVisitor[T]) -> T: return visitor.visit_overloaded_func_def(self) def serialize(self) -> JsonDict: return { ".class": "OverloadedFuncDef", "items": [i.serialize() for i in self.items], "type": None if self.type is None else self.type.serialize(), "fullname": self._fullname, "impl": None if self.impl is None else self.impl.serialize(), "flags": get_flags(self, FUNCBASE_FLAGS), "deprecated": self.deprecated, "setter_index": self.setter_index, } @classmethod def deserialize(cls, data: JsonDict) -> OverloadedFuncDef: assert data[".class"] == "OverloadedFuncDef" res = OverloadedFuncDef( [cast(OverloadPart, SymbolNode.deserialize(d)) for d in data["items"]] ) if data.get("impl") is not None: res.impl = cast(OverloadPart, SymbolNode.deserialize(data["impl"])) # set line for empty overload items, as not set in __init__ if len(res.items) > 0: res.set_line(res.impl.line) if data.get("type") is not None: typ = mypy.types.deserialize_type(data["type"]) assert isinstance(typ, mypy.types.ProperType) res.type = typ res._fullname = data["fullname"] set_flags(res, data["flags"]) res.deprecated = data["deprecated"] res.setter_index = data["setter_index"] # NOTE: res.info will be set in the fixup phase. return res def write(self, data: WriteBuffer) -> None: write_tag(data, OVERLOADED_FUNC_DEF) write_tag(data, LIST_GEN) write_int_bare(data, len(self.items)) for item in self.items: item.write(data) mypy.types.write_type_opt(data, self.type) write_str(data, self._fullname) if self.impl is None: write_tag(data, LITERAL_NONE) else: self.impl.write(data) write_flags(data, self, FUNCBASE_FLAGS) write_str_opt(data, self.deprecated) write_int_opt(data, self.setter_index) write_tag(data, END_TAG) @classmethod def read(cls, data: ReadBuffer) -> OverloadedFuncDef: assert read_tag(data) == LIST_GEN res = OverloadedFuncDef([read_overload_part(data) for _ in range(read_int_bare(data))]) typ = mypy.types.read_type_opt(data) if typ is not None: assert isinstance(typ, mypy.types.ProperType) res.type = typ res._fullname = read_str(data) tag = read_tag(data) if tag != LITERAL_NONE: res.impl = read_overload_part(data, tag) # set line for empty overload items, as not set in __init__ if len(res.items) > 0: res.set_line(res.impl.line) read_flags(data, res, FUNCBASE_FLAGS) res.deprecated = read_str_opt(data) res.setter_index = read_int_opt(data) # NOTE: res.info will be set in the fixup phase. assert read_tag(data) == END_TAG return res def is_dynamic(self) -> bool: return all(item.is_dynamic() for item in self.items)
OverloadedFuncDef
python
huggingface__transformers
src/transformers/models/sam/modeling_sam.py
{ "start": 43554, "end": 44261 }
class ____(PreTrainedModel): config: SamConfig base_model_prefix = "sam" main_input_name = "pixel_values" input_modalities = ("image",) _no_split_modules = ["SamVisionAttention"] supports_gradient_checkpointing = True _supports_sdpa = True @torch.no_grad() def _init_weights(self, module: nn.Module): super()._init_weights(module) if isinstance(module, SamVisionAttention): if module.use_rel_pos: init.zeros_(module.rel_pos_h) init.zeros_(module.rel_pos_w) elif isinstance(module, SamVisionEncoder): if self.config.use_abs_pos: init.zeros_(module.pos_embed)
SamPreTrainedModel
python
django__django
django/templatetags/i18n.py
{ "start": 659, "end": 983 }
class ____(Node): def __init__(self, lang_code, variable): self.lang_code = lang_code self.variable = variable def render(self, context): lang_code = self.lang_code.resolve(context) context[self.variable] = translation.get_language_info(lang_code) return ""
GetLanguageInfoNode
python
django__django
tests/migrations/test_migrations_squashed_double/0002_auto.py
{ "start": 43, "end": 306 }
class ____(migrations.Migration): dependencies = [("migrations", "0001_initial")] operations = [ migrations.AlterField( model_name="a", name="foo", field=models.BooleanField(default=True), ), ]
Migration
python
walkccc__LeetCode
solutions/1732. Find the Highest Altitude/1732.py
{ "start": 0, "end": 195 }
class ____: def largestAltitude(self, gain: list[int]) -> int: ans = 0 currAltitude = 0 for g in gain: currAltitude += g ans = max(ans, currAltitude) return ans
Solution
python
Netflix__metaflow
metaflow/plugins/metadata_providers/local.py
{ "start": 486, "end": 23938 }
class ____(MetadataProvider): TYPE = "local" DATASTORE_DIR = DATASTORE_LOCAL_DIR # ".metaflow" @classmethod def _get_storage_class(cls): # This method is meant to be overridden from metaflow.plugins.datastores.local_storage import LocalStorage return LocalStorage def __init__(self, environment, flow, event_logger, monitor): super(LocalMetadataProvider, self).__init__( environment, flow, event_logger, monitor ) @classmethod def compute_info(cls, val): storage_class = cls._get_storage_class() v = os.path.realpath(os.path.join(val, cls.DATASTORE_DIR)) if os.path.isdir(v): storage_class.datastore_root = v return val raise ValueError( "Could not find directory %s in directory %s" % (cls.DATASTORE_DIR, val) ) @classmethod def default_info(cls): storage_class = cls._get_storage_class() def print_clean(line, **kwargs): print(line) v = storage_class.get_datastore_root_from_config( print_clean, create_on_absent=False ) if v is None: return "<No %s directory found in current working tree>" % cls.DATASTORE_DIR return os.path.dirname(v) def version(self): return "local" def new_run_id(self, tags=None, sys_tags=None): # We currently just use the timestamp to create an ID. We can be reasonably certain # that it is unique and this makes it possible to do without coordination or # reliance on POSIX locks in the filesystem. run_id = "%d" % (time.time() * 1e6) self._new_run(run_id, tags, sys_tags) return run_id def register_run_id(self, run_id, tags=None, sys_tags=None): try: # This metadata provider only generates integer IDs so if this is # an integer, we don't register it again (since it was "registered" # on creation). However, some IDs are created outside the metadata # provider and need to be properly registered int(run_id) return False except ValueError: return self._new_run(run_id, tags, sys_tags) def new_task_id(self, run_id, step_name, tags=None, sys_tags=None): self._task_id_seq += 1 task_id = str(self._task_id_seq) self._new_task(run_id, step_name, task_id, tags=tags, sys_tags=sys_tags) return task_id def register_task_id( self, run_id, step_name, task_id, attempt=0, tags=None, sys_tags=None ): try: # Same logic as register_run_id int(task_id) except ValueError: return self._new_task( run_id, step_name, task_id, attempt=attempt, tags=tags, sys_tags=sys_tags, ) else: self._register_system_metadata(run_id, step_name, task_id, attempt) return False def register_data_artifacts( self, run_id, step_name, task_id, attempt_id, artifacts ): meta_dir = self.__class__._create_and_get_metadir( self._flow_name, run_id, step_name, task_id ) artlist = self._artifacts_to_json( run_id, step_name, task_id, attempt_id, artifacts ) artdict = {"%d_artifact_%s" % (attempt_id, art["name"]): art for art in artlist} self._save_meta(meta_dir, artdict) def register_metadata(self, run_id, step_name, task_id, metadata): meta_dir = self.__class__._create_and_get_metadir( self._flow_name, run_id, step_name, task_id ) metalist = self._metadata_to_json(run_id, step_name, task_id, metadata) ts = int(round(time.time() * 1000)) metadict = { "sysmeta_%s_%d" % (meta["field_name"], ts): meta for meta in metalist } self._save_meta(meta_dir, metadict) @classmethod def _mutate_user_tags_for_run( cls, flow_id, run_id, tags_to_add=None, tags_to_remove=None ): MutationResult = namedtuple( "MutationResult", field_names="tags_are_consistent tags" ) def _optimistically_mutate(): # get existing tags run = cls.get_object("run", "self", {}, None, flow_id, run_id) if not run: raise MetaflowTaggingError( msg="Run not found (%s, %s)" % (flow_id, run_id) ) existing_user_tag_set = frozenset(run["tags"]) existing_system_tag_set = frozenset(run["system_tags"]) tags_to_remove_set = frozenset(tags_to_remove) # make sure no existing system tags get added as a user tag tags_to_add_set = frozenset(tags_to_add) - existing_system_tag_set # from this point on we work with sets of tags only if tags_to_remove_set & existing_system_tag_set: raise MetaflowTaggingError( msg="Cannot remove a tag that is an existing system tag (%s)" % str(sorted(tags_to_remove_set & existing_system_tag_set)) ) # remove tags first, then add next_user_tags_set = ( existing_user_tag_set - tags_to_remove_set ) | tags_to_add_set # we think it will be a no-op, so let's return right away if next_user_tags_set == existing_user_tag_set: return MutationResult( tags=next_user_tags_set, tags_are_consistent=True, ) validate_tags(next_user_tags_set, existing_tags=existing_user_tag_set) # write new tag set to file system cls._persist_tags_for_run( flow_id, run_id, next_user_tags_set, existing_system_tag_set ) # read tags back from file system to see if our optimism is misplaced # I.e. did a concurrent mutate overwrite our change run = cls.get_object("run", "self", {}, None, flow_id, run_id) if not run: raise MetaflowTaggingError( msg="Run not found for read-back check (%s, %s)" % (flow_id, run_id) ) final_tag_set = frozenset(run["tags"]) if tags_to_add_set - final_tag_set: return MutationResult(tags=final_tag_set, tags_are_consistent=False) if ( tags_to_remove_set & final_tag_set ) - tags_to_add_set: # Remove before add, remember? Account for this return MutationResult(tags=final_tag_set, tags_are_consistent=False) return MutationResult(tags=final_tag_set, tags_are_consistent=True) tries = 1 # try up to 5 times, with a gentle exponential backoff (1.1-1.3x) while True: mutation_result = _optimistically_mutate() if mutation_result.tags_are_consistent: return mutation_result.tags if tries >= 5: break time.sleep(0.3 * random.uniform(1.1, 1.3) ** tries) tries += 1 raise MetaflowTaggingError( "Tagging failed due to too many conflicting updates from other processes" ) @classmethod def filter_tasks_by_metadata( cls, flow_name: str, run_id: str, step_name: str, field_name: str, pattern: str, ) -> List[str]: """ Filter tasks by metadata field and pattern, returning task pathspecs that match criteria. Parameters ---------- flow_name : str Identifier for the flow run_id : str Identifier for the run step_name : str Name of the step to query tasks from field_name : str Name of metadata field to query pattern : str Pattern to match in metadata field value Returns ------- List[str] List of task pathspecs that match the query criteria """ tasks = cls.get_object("step", "task", {}, None, flow_name, run_id, step_name) if not tasks: return [] regex = re.compile(pattern) matching_task_pathspecs = [] for task in tasks: task_id = task.get("task_id") if not task_id: continue if pattern == ".*": # If the pattern is ".*", we can match all tasks without reading metadata matching_task_pathspecs.append( f"{flow_name}/{run_id}/{step_name}/{task_id}" ) continue metadata = cls.get_object( "task", "metadata", {}, None, flow_name, run_id, step_name, task_id ) if any( meta.get("field_name") == field_name and regex.match(meta.get("value", "")) for meta in metadata ): matching_task_pathspecs.append( f"{flow_name}/{run_id}/{step_name}/{task_id}" ) return matching_task_pathspecs @classmethod def _get_object_internal( cls, obj_type, obj_order, sub_type, sub_order, filters, attempt, *args ): # This is guaranteed by MetaflowProvider.get_object(), sole intended caller if obj_type in ("metadata", "self"): raise MetaflowInternalError(msg="Type %s is not allowed" % obj_type) if obj_type not in ("root", "flow", "run", "step", "task", "artifact"): raise MetaflowInternalError(msg="Unexpected object type %s" % obj_type) if obj_type == "artifact": # Artifacts are actually part of the tasks in the filesystem # E.g. we get here for (obj_type, sub_type) == (artifact, self) obj_type = "task" sub_type = "artifact" sub_order = obj_order obj_order = obj_order - 1 if obj_type != ObjectOrder.order_to_type(obj_order): raise MetaflowInternalError( "Object type order mismatch %s %s" % (obj_type, ObjectOrder.order_to_type(obj_order)) ) if sub_type != ObjectOrder.order_to_type(sub_order): raise MetaflowInternalError( "Sub type order mismatch %s %s" % (sub_type, ObjectOrder.order_to_type(sub_order)) ) RUN_ORDER = ObjectOrder.type_to_order("run") if obj_type not in ("root", "flow", "run", "step", "task"): raise MetaflowInternalError(msg="Unexpected object type %s" % obj_type) # Special handling of self, artifact, and metadata if sub_type == "self": meta_path = cls._get_metadir(*args[:obj_order]) if meta_path is None: return None self_file = os.path.join(meta_path, "_self.json") if os.path.isfile(self_file): obj = MetadataProvider._apply_filter( [cls._read_json_file(self_file)], filters )[0] # For non-descendants of a run, we are done if obj_order <= RUN_ORDER: return obj if obj_type not in ("step", "task"): raise MetaflowInternalError( msg="Unexpected object type %s" % obj_type ) run = cls.get_object( "run", "self", {}, None, *args[:RUN_ORDER] # *[flow_id, run_id] ) if not run: raise MetaflowInternalError( msg="Could not find run %s" % str(args[:RUN_ORDER]) ) obj["tags"] = run.get("tags", []) obj["system_tags"] = run.get("system_tags", []) return obj return None if sub_type == "artifact": if obj_type not in ("root", "flow", "run", "step", "task"): raise MetaflowInternalError(msg="Unexpected object type %s" % obj_type) meta_path = cls._get_metadir(*args[:obj_order]) result = [] if meta_path is None: return result successful_attempt = attempt if successful_attempt is None: attempt_done_files = os.path.join(meta_path, "sysmeta_attempt-done_*") attempts_done = sorted(glob.iglob(attempt_done_files)) if attempts_done: successful_attempt = int( cls._read_json_file(attempts_done[-1])["value"] ) if successful_attempt is not None: which_artifact = "*" if len(args) >= sub_order: which_artifact = args[sub_order - 1] artifact_files = os.path.join( meta_path, "%d_artifact_%s.json" % (successful_attempt, which_artifact), ) for obj in glob.iglob(artifact_files): result.append(cls._read_json_file(obj)) # We are getting artifacts. We should overlay with ancestral run's tags run = cls.get_object( "run", "self", {}, None, *args[:RUN_ORDER] # *[flow_id, run_id] ) if not run: raise MetaflowInternalError( msg="Could not find run %s" % str(args[:RUN_ORDER]) ) for obj in result: obj["tags"] = run.get("tags", []) obj["system_tags"] = run.get("system_tags", []) if len(result) == 1: return result[0] return result if sub_type == "metadata": # artifact is not expected because if obj_type=artifact on function entry, we transform to =task if obj_type not in ("root", "flow", "run", "step", "task"): raise MetaflowInternalError(msg="Unexpected object type %s" % obj_type) result = [] meta_path = cls._get_metadir(*args[:obj_order]) if meta_path is None: return result files = os.path.join(meta_path, "sysmeta_*") for obj in glob.iglob(files): result.append(cls._read_json_file(obj)) return result # For the other types, we locate all the objects we need to find and return them if obj_type not in ("root", "flow", "run", "step", "task"): raise MetaflowInternalError(msg="Unexpected object type %s" % obj_type) if sub_type not in ("flow", "run", "step", "task"): raise MetaflowInternalError(msg="unexpected sub type %s" % sub_type) obj_path = cls._make_path(*args[:obj_order], create_on_absent=False) result = [] if obj_path is None: return result skip_dirs = "*/" * (sub_order - obj_order) storage_class = cls._get_storage_class() all_meta = os.path.join(obj_path, skip_dirs, storage_class.METADATA_DIR) SelfInfo = collections.namedtuple("SelfInfo", ["filepath", "run_id"]) self_infos = [] for meta_path in glob.iglob(all_meta): self_file = os.path.join(meta_path, "_self.json") if not os.path.isfile(self_file): continue run_id = None # flow and run do not need info from ancestral run if sub_type in ("step", "task"): run_id = cls._deduce_run_id_from_meta_dir(meta_path, sub_type) # obj_type IS run, or more granular than run, let's do sanity check vs args if obj_order >= RUN_ORDER: if run_id != args[RUN_ORDER - 1]: raise MetaflowInternalError( msg="Unexpected run id %s deduced from meta path" % run_id ) self_infos.append(SelfInfo(filepath=self_file, run_id=run_id)) for self_info in self_infos: obj = cls._read_json_file(self_info.filepath) if self_info.run_id: flow_id_from_args = args[0] run = cls.get_object( "run", "self", {}, None, flow_id_from_args, self_info.run_id, ) if not run: raise MetaflowInternalError( msg="Could not find run %s, %s" % (flow_id_from_args, self_info.run_id) ) obj["tags"] = run.get("tags", []) obj["system_tags"] = run.get("system_tags", []) result.append(obj) return MetadataProvider._apply_filter(result, filters) @classmethod def _deduce_run_id_from_meta_dir(cls, meta_dir_path, sub_type): curr_order = ObjectOrder.type_to_order(sub_type) levels_to_ascend = curr_order - ObjectOrder.type_to_order("run") if levels_to_ascend < 0: return None curr_path = meta_dir_path for _ in range(levels_to_ascend + 1): # +1 to account for ../_meta curr_path, _ = os.path.split(curr_path) _, run_id = os.path.split(curr_path) if not run_id: raise MetaflowInternalError( "Failed to deduce run_id from meta dir %s" % meta_dir_path ) return run_id @classmethod def _makedirs(cls, path): # this is for python2 compatibility. # Python3 has os.makedirs(exist_ok=True). try: os.makedirs(path) except OSError as x: if x.errno == 17: # Error raised when directory exists return else: raise @classmethod def _persist_tags_for_run(cls, flow_id, run_id, tags, system_tags): subpath = cls._create_and_get_metadir(flow_name=flow_id, run_id=run_id) selfname = os.path.join(subpath, "_self.json") if not os.path.isfile(selfname): raise MetaflowInternalError( msg="Could not verify Run existence on disk - missing %s" % selfname ) cls._save_meta( subpath, { "_self": MetadataProvider._run_to_json_static( flow_id, run_id=run_id, tags=tags, sys_tags=system_tags ) }, allow_overwrite=True, ) def _ensure_meta( self, obj_type, run_id, step_name, task_id, tags=None, sys_tags=None ): if tags is None: tags = set() if sys_tags is None: sys_tags = set() subpath = self.__class__._create_and_get_metadir( self._flow_name, run_id, step_name, task_id ) selfname = os.path.join(subpath, "_self.json") self.__class__._makedirs(subpath) if os.path.isfile(selfname): # There is a race here, but we are not aiming to make this as solid as # the metadata service. This is used primarily for concurrent resumes, # so it is highly unlikely that this combination (multiple resumes of # the same flow on the same machine) happens. return False # In this case the metadata information does not exist, so we create it self._save_meta( subpath, { "_self": self._object_to_json( obj_type, run_id, step_name, task_id, self.sticky_tags.union(tags), self.sticky_sys_tags.union(sys_tags), ) }, ) return True def _new_run(self, run_id, tags=None, sys_tags=None): self._ensure_meta("flow", None, None, None) return self._ensure_meta("run", run_id, None, None, tags, sys_tags) def _new_task( self, run_id, step_name, task_id, attempt=0, tags=None, sys_tags=None ): self._ensure_meta("step", run_id, step_name, None) to_return = self._ensure_meta( "task", run_id, step_name, task_id, tags, sys_tags ) self._register_system_metadata(run_id, step_name, task_id, attempt) return to_return @classmethod def _make_path( cls, flow_name=None, run_id=None, step_name=None, task_id=None, create_on_absent=True, ): storage_class = cls._get_storage_class() if storage_class.datastore_root is None: def print_clean(line, **kwargs): print(line) storage_class.datastore_root = storage_class.get_datastore_root_from_config( print_clean, create_on_absent=create_on_absent ) if storage_class.datastore_root is None: return None if flow_name is None: return storage_class.datastore_root components = [] if flow_name: components.append(flow_name) if run_id: components.append(run_id) if step_name: components.append(step_name) if task_id: components.append(task_id) return storage_class().full_uri(storage_class.path_join(*components)) @classmethod def _create_and_get_metadir( cls, flow_name=None, run_id=None, step_name=None, task_id=None ): storage_class = cls._get_storage_class() root_path = cls._make_path(flow_name, run_id, step_name, task_id) subpath = os.path.join(root_path, storage_class.METADATA_DIR) cls._makedirs(subpath) return subpath @classmethod def _get_metadir(cls, flow_name=None, run_id=None, step_name=None, task_id=None): storage_class = cls._get_storage_class() root_path = cls._make_path( flow_name, run_id, step_name, task_id, create_on_absent=False ) if root_path is None: return None subpath = os.path.join(root_path, storage_class.METADATA_DIR) if os.path.isdir(subpath): return subpath return None @classmethod def _dump_json_to_file(cls, filepath, data, allow_overwrite=False): if os.path.isfile(filepath) and not allow_overwrite: return try: with tempfile.NamedTemporaryFile( mode="w", dir=os.path.dirname(filepath), delete=False ) as f: json.dump(data, f) os.rename(f.name, filepath) finally: # clean up in case anything goes wrong if f and os.path.isfile(f.name): os.remove(f.name) @classmethod def _read_json_file(cls, filepath): with open(filepath, "r") as f: return json.load(f) @classmethod def _save_meta(cls, root_dir, metadict, allow_overwrite=False): for name, datum in metadict.items(): filename = os.path.join(root_dir, "%s.json" % name) cls._dump_json_to_file(filename, datum, allow_overwrite=allow_overwrite)
LocalMetadataProvider
python
django__django
tests/db_functions/json/test_json_object.py
{ "start": 362, "end": 3371 }
class ____(TestCase): @classmethod def setUpTestData(cls): Author.objects.bulk_create( [ Author(name="Ivan Ivanov", alias="iivanov"), Author(name="Bertha Berthy", alias="bberthy"), ] ) def test_empty(self): obj = Author.objects.annotate(json_object=JSONObject()).first() self.assertEqual(obj.json_object, {}) def test_basic(self): obj = Author.objects.annotate(json_object=JSONObject(name="name")).first() self.assertEqual(obj.json_object, {"name": "Ivan Ivanov"}) def test_expressions(self): obj = Author.objects.annotate( json_object=JSONObject( name=Lower("name"), alias="alias", goes_by="goes_by", salary=Value(30000.15), age=F("age") * 2, ) ).first() self.assertEqual( obj.json_object, { "name": "ivan ivanov", "alias": "iivanov", "goes_by": None, "salary": 30000.15, "age": 60, }, ) def test_nested_json_object(self): obj = Author.objects.annotate( json_object=JSONObject( name="name", nested_json_object=JSONObject( alias="alias", age="age", ), ) ).first() self.assertEqual( obj.json_object, { "name": "Ivan Ivanov", "nested_json_object": { "alias": "iivanov", "age": 30, }, }, ) def test_nested_empty_json_object(self): obj = Author.objects.annotate( json_object=JSONObject( name="name", nested_json_object=JSONObject(), ) ).first() self.assertEqual( obj.json_object, { "name": "Ivan Ivanov", "nested_json_object": {}, }, ) def test_textfield(self): Article.objects.create( title="The Title", text="x" * 4000, written=timezone.now(), ) obj = Article.objects.annotate(json_object=JSONObject(text=F("text"))).first() self.assertEqual(obj.json_object, {"text": "x" * 4000}) def test_order_by_key(self): qs = Author.objects.annotate(attrs=JSONObject(alias=F("alias"))).order_by( "attrs__alias" ) self.assertQuerySetEqual(qs, Author.objects.order_by("alias")) def test_order_by_nested_key(self): qs = Author.objects.annotate( attrs=JSONObject(nested=JSONObject(alias=F("alias"))) ).order_by("-attrs__nested__alias") self.assertQuerySetEqual(qs, Author.objects.order_by("-alias")) @skipIfDBFeature("has_json_object_function")
JSONObjectTests
python
pytorch__pytorch
test/inductor/test_perf.py
{ "start": 18252, "end": 20828 }
class ____(TestCase): """ Testing the fusion group creation heuristic (i.e. cases where we can't fuse everything into a single kernel) Disables inductor rematerialization for easier reasoning of tests. """ @classmethod def setUpClass(cls): super().setUpClass() cls._stack = contextlib.ExitStack() cls._stack.enter_context(patch.object(config, "realize_opcount_threshold", 0)) @classmethod def tearDownClass(cls): cls._stack.close() super().tearDownClass() @patch.object(config, "pattern_matcher", False) def test_fusion_choice1(self): # Doesn't matter where we break fusion group here def f(a): c = a.cos() d = torch.mm(c, c) e = c.cos() return d + e inp = (T(10, 10),) self.assertExpectedInline(count_numel(f, *inp), """700""") @patch.object(config, "pattern_matcher", False) def test_fusion_choice2(self): # We should materialize e (it's smaller!) # [c, e]: 210, [f]: 210, [d]: 200 def f(a): c = a.cos() d = torch.mm(c, c) e = c.sum(dim=1) f = d + e return f inp = (T(10, 10),) self.assertExpectedInline(count_numel(f, *inp), """620""") @patch.object(config, "pattern_matcher", False) def test_fusion_choice3(self): # We should materialize e. # [c, e]: 300, [f]: 300, [d]: 200 def f(a): c = a.cos() d = torch.mm(c, c) e = c + a f = d + e return f, e inp = (T(10, 10),) self.assertExpectedInline(count_numel(f, *inp), """800""") @patch.object(config, "pattern_matcher", False) def test_fusion_choice4_cpu(self): # Fuse nodes with same number of elements and compatible original var ranges # [buf0: {d0: 60, d1: 11}, buf1: {d0: 660}] -> buf0_buf1 def f(x, w): o1 = x * w output = o1 + 1.0 return output inp = (T(2, 3, 10, 11, device="cpu"), T(11, device="cpu")) self.assertExpectedInline(count_numel(f, *inp), """1331""") # [buf0_buf1: {d0: 60, d1: 11}, buf2: {d0: 660}] -> buf0_buf1_buf2 def f(x, w1, w2): o1 = x * w1 o2 = x * w2 output = o1 + o2 return output inp = (T(2, 3, 10, 11, device="cpu"), T(11, device="cpu"), T(11, device="cpu")) self.assertExpectedInline(count_numel(f, *inp), """1342""")
SchedulerFusionTests
python
great-expectations__great_expectations
tests/integration/test_utils/data_source_config/postgres.py
{ "start": 1367, "end": 2030 }
class ____(SQLBatchTestSetup[PostgreSQLDatasourceTestConfig]): @property @override def connection_string(self) -> str: return "postgresql+psycopg2://postgres@localhost:5432/test_ci" @property @override def use_schema(self) -> bool: return False @override def make_asset(self) -> TableAsset: return self.context.data_sources.add_postgres( name=self._random_resource_name(), connection_string=self.connection_string ).add_table_asset( name=self._random_resource_name(), table_name=self.table_name, schema_name=self.schema, )
PostgresBatchTestSetup
python
PrefectHQ__prefect
tests/test_logging.py
{ "start": 55992, "end": 69420 }
class ____: def test_filters_current_api_key(self): test_api_key = "hi-hello-im-an-api-key" with temporary_settings({PREFECT_API_KEY: test_api_key}): filter = ObfuscateApiKeyFilter() record = logging.LogRecord( name="Test Log", level=1, pathname="/path/file.py", lineno=1, msg=test_api_key, args=None, exc_info=None, ) filter.filter(record) assert test_api_key not in record.getMessage() assert obfuscate(test_api_key) in record.getMessage() def test_current_api_key_is_not_logged(self, caplog): test_api_key = "hot-dog-theres-a-logger-this-is-my-big-chance-for-stardom" with temporary_settings({PREFECT_API_KEY: test_api_key}): logger = get_logger("test") logger.info(test_api_key) assert test_api_key not in caplog.text assert obfuscate(test_api_key) in caplog.text def test_current_api_key_is_not_logged_from_flow( self, caplog: pytest.LogCaptureFixture ): test_api_key = "i-am-a-plaintext-api-key-and-i-dream-of-being-logged-one-day" with temporary_settings({PREFECT_API_KEY: test_api_key}): @flow def test_flow(): logger = get_run_logger() logger.info(test_api_key) test_flow() assert test_api_key not in caplog.text assert obfuscate(test_api_key) in caplog.text def test_current_api_key_is_not_logged_from_flow_log_prints( self, caplog: pytest.LogCaptureFixture ): test_api_key = "i-am-a-sneaky-little-api-key" with temporary_settings({PREFECT_API_KEY: test_api_key}): @flow(log_prints=True) def test_flow(): print(test_api_key) test_flow() assert test_api_key not in caplog.text assert obfuscate(test_api_key) in caplog.text def test_current_api_key_is_not_logged_from_task( self, caplog: pytest.LogCaptureFixture ): test_api_key = "i-am-jacks-security-risk" with temporary_settings({PREFECT_API_KEY: test_api_key}): @task def test_task(): logger = get_run_logger() logger.info(test_api_key) @flow def test_flow(): test_task() test_flow() assert test_api_key not in caplog.text assert obfuscate(test_api_key) in caplog.text @pytest.mark.parametrize( "raw_log_record,expected_log_record", [ ( ["super-mega-admin-key", "in", "a", "list"], ["********", "in", "a", "list"], ), ( {"super-mega-admin-key": "in", "a": "dict"}, {"********": "in", "a": "dict"}, ), ( { "key1": "some_value", "key2": [ {"nested_key": "api_key: super-mega-admin-key"}, "another_value", ], }, { "key1": "some_value", "key2": [ {"nested_key": "api_key: ********"}, "another_value", ], }, ), ], ) def test_redact_substr_from_collections( self, caplog: pytest.LogCaptureFixture, raw_log_record: Any, expected_log_record: Any, ): """ This is a regression test for https://github.com/PrefectHQ/prefect/issues/12139 """ @flow() def test_log_list(): logger = get_run_logger() logger.info(raw_log_record) with temporary_settings({PREFECT_API_KEY: "super-mega-admin-key"}): test_log_list() assert str(expected_log_record) in caplog.text def test_log_in_flow(caplog: pytest.LogCaptureFixture): msg = "Hello world!" @flow def test_flow(): logger = get_run_logger() logger.warning(msg) test_flow() for record in caplog.records: if record.msg == msg: assert record.levelno == logging.WARNING break else: raise AssertionError(f"{msg} was not found in records: {caplog.records}") def test_log_in_task(caplog: pytest.LogCaptureFixture): msg = "Hello world!" @task def test_task(): logger = get_run_logger() logger.warning(msg) @flow def test_flow(): test_task() test_flow() for record in caplog.records: if record.msg == msg: assert record.levelno == logging.WARNING break else: raise AssertionError(f"{msg} was not found in records") def test_without_disable_logger(caplog: pytest.LogCaptureFixture): """ Sanity test to double check whether caplog actually works so can be more confident in the asserts in test_disable_logger. """ logger = logging.getLogger("griffe.agents.nodes") def function_with_logging(logger: logging.Logger): assert not logger.disabled logger.critical("it's enabled!") return 42 function_with_logging(logger) assert not logger.disabled assert ("griffe.agents.nodes", 50, "it's enabled!") in caplog.record_tuples def test_disable_logger(caplog: pytest.LogCaptureFixture): logger = logging.getLogger("griffe.agents.nodes") def function_with_logging(logger): logger.critical("I know this is critical, but it's disabled!") return 42 with disable_logger(logger.name): assert logger.disabled function_with_logging(logger) assert not logger.disabled assert caplog.record_tuples == [] def test_disable_run_logger_with_task(caplog: pytest.LogCaptureFixture): @task def task_with_run_logger(): logger = get_run_logger() logger.critical("won't show") flow_run_logger = get_logger("prefect.flow_runs") task_run_logger = get_logger("prefect.task_runs") # Can call the task as normal and the underlying function without issue inside the context manager with disable_run_logger(): task_with_run_logger() task_with_run_logger.fn() assert flow_run_logger.disabled assert task_run_logger.disabled # Loggers should return to normal state and the disabled logs should not be in the caplog assert not flow_run_logger.disabled assert not task_run_logger.disabled assert "won't show" not in caplog.text caplog.clear() # Should operate normally outside of the context manager task_with_run_logger() assert "won't show" in caplog.text with pytest.raises(MissingContextError): task_with_run_logger.fn() def test_disable_run_logger_with_flow(caplog: pytest.LogCaptureFixture): @flow def test_flow(): logger = get_run_logger() logger.critical("won't show") flow_run_logger = get_logger("prefect.flow_runs") task_run_logger = get_logger("prefect.task_runs") # Can call the flow as normal and the underlying function without issue inside the context manager with disable_run_logger(): test_flow() test_flow.fn() assert flow_run_logger.disabled assert task_run_logger.disabled # Loggers should return to normal state and the disabled logs should not be in the caplog assert not flow_run_logger.disabled assert not task_run_logger.disabled assert "won't show" not in caplog.text caplog.clear() # Should operate normally outside of the context manager test_flow() assert "won't show" in caplog.text with pytest.raises(MissingContextError): test_flow.fn() def test_patch_print_writes_to_stdout_without_run_context( caplog: pytest.LogCaptureFixture, capsys: pytest.CaptureFixture[str] ): with patch_print(): print("foo") assert "foo" in capsys.readouterr().out assert "foo" not in caplog.text @pytest.mark.parametrize("run_context_cls", [TaskRunContext, FlowRunContext]) def test_patch_print_writes_to_stdout_with_run_context_and_no_log_prints( caplog: pytest.LogCaptureFixture, capsys: pytest.CaptureFixture[str], run_context_cls: type, ): with patch_print(): with run_context_cls.model_construct(log_prints=False): print("foo") assert "foo" in capsys.readouterr().out assert "foo" not in caplog.text def test_patch_print_does_not_write_to_logger_with_custom_file( caplog: pytest.LogCaptureFixture, capsys: pytest.CaptureFixture[str], task_run: "TaskRun", ): string_io = StringIO() @task def my_task(): pass with patch_print(): with TaskRunContext.model_construct( log_prints=True, task_run=task_run, task=my_task ): print("foo", file=string_io) assert "foo" not in caplog.text assert "foo" not in capsys.readouterr().out assert string_io.getvalue().rstrip() == "foo" def test_patch_print_writes_to_logger_with_task_run_context( caplog: pytest.LogCaptureFixture, capsys: pytest.CaptureFixture[str], task_run: "TaskRun", ): @task def my_task(): pass with patch_print(): with TaskRunContext.model_construct( log_prints=True, task_run=task_run, task=my_task ): print("foo") assert "foo" not in capsys.readouterr().out assert "foo" in caplog.text for record in caplog.records: if record.message == "foo": break assert record.levelname == "INFO" assert record.name == "prefect.task_runs" assert record.task_run_id == str(task_run.id) assert record.task_name == my_task.name @pytest.mark.parametrize("file", ["stdout", "stderr"]) def test_patch_print_writes_to_logger_with_explicit_file( caplog: pytest.LogCaptureFixture, capsys: pytest.CaptureFixture[str], task_run: "TaskRun", file: str, ): @task def my_task(): pass with patch_print(): with TaskRunContext.model_construct( log_prints=True, task_run=task_run, task=my_task ): # We must defer retrieval of sys.<file> because pytest overrides sys! print("foo", file=getattr(sys, file)) out, err = capsys.readouterr() assert "foo" not in out assert "foo" not in err assert "foo" in caplog.text for record in caplog.records: if record.message == "foo": break assert record.levelname == "INFO" assert record.name == "prefect.task_runs" assert record.task_run_id == str(task_run.id) assert record.task_name == my_task.name def test_patch_print_writes_to_logger_with_flow_run_context( caplog: pytest.LogCaptureFixture, capsys: pytest.CaptureFixture[str], flow_run: "FlowRun", ): @flow def my_flow(): pass with patch_print(): with FlowRunContext.model_construct( log_prints=True, flow_run=flow_run, flow=my_flow ): print("foo") assert "foo" not in capsys.readouterr().out assert "foo" in caplog.text for record in caplog.records: if record.message == "foo": break assert record.levelname == "INFO" assert record.name == "prefect.flow_runs" assert record.flow_run_id == str(flow_run.id) assert record.flow_name == my_flow.name def test_log_adapter_get_child(): logger = PrefectLogAdapter(get_logger("prefect.parent"), {"hello": "world"}) assert logger.extra == {"hello": "world"} child_logger = logger.getChild("child", {"goodnight": "moon"}) assert child_logger.logger.name == "prefect.parent.child" assert child_logger.extra == {"hello": "world", "goodnight": "moon"} def test_eavesdropping(): logging.getLogger("my_logger").debug("This is before the context") with LogEavesdropper("my_logger", level=logging.INFO) as eavesdropper: logging.getLogger("my_logger").info("Hello, world!") logging.getLogger("my_logger.child_module").warning("Another one!") logging.getLogger("my_logger").debug("Not this one!") logging.getLogger("my_logger").debug("This is after the context") assert eavesdropper.text() == "[INFO]: Hello, world!\n[WARNING]: Another one!" def test_prepare_truncates_oversized_log(): max_log_size = 500 handler = APILogHandler() very_long_msg = "X" * (max_log_size * 2) record = logging.LogRecord( name="test.logger.flow", level=logging.INFO, pathname=__file__, lineno=10, msg=very_long_msg, args=(), exc_info=None, ) record.flow_run_id = str(uuid.uuid4()) with patch( "prefect.settings.PREFECT_LOGGING_TO_API_MAX_LOG_SIZE.value", return_value=max_log_size, ): log = handler.prepare(record) # Check truncation suffix is present assert "... [truncated]" in log["message"] # Check size does not exceed max_log_size assert log["__payload_size__"] <= max_log_size # flow_run_id should match assert log["flow_run_id"] == record.flow_run_id # Message should not be empty (except the truncation text) assert log["message"].strip() != ""
TestObfuscateApiKeyFilter
python
python__mypy
mypyc/test/test_lowering.py
{ "start": 888, "end": 2433 }
class ____(MypycDataSuite): files = ["lowering-int.test", "lowering-list.test"] base_path = test_temp_dir def run_case(self, testcase: DataDrivenTestCase) -> None: options = infer_ir_build_options_from_test_name(testcase.name) if options is None: # Skipped test case return with use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase): expected_output = remove_comment_lines(testcase.output) expected_output = replace_word_size(expected_output) try: ir = build_ir_for_single_file(testcase.input, options) except CompileError as e: actual = e.messages else: actual = [] for fn in ir: if fn.name == TOP_LEVEL_NAME and not testcase.name.endswith("_toplevel"): continue options = CompilerOptions() # Lowering happens after exception handling and ref count opcodes have # been added. Any changes must maintain reference counting semantics. insert_uninit_checks(fn) insert_exception_handling(fn) insert_ref_count_opcodes(fn) lower_ir(fn, options) do_flag_elimination(fn, options) actual.extend(format_func(fn)) assert_test_output(testcase, actual, "Invalid source code output", expected_output)
TestLowering
python
huggingface__transformers
src/transformers/models/electra/modeling_electra.py
{ "start": 44573, "end": 46824 }
class ____(ElectraPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.electra = ElectraModel(config) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ discriminator_hidden_states = self.electra( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, return_dict=True, **kwargs, ) discriminator_sequence_output = discriminator_hidden_states[0] discriminator_sequence_output = self.dropout(discriminator_sequence_output) logits = self.classifier(discriminator_sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions, ) @auto_docstring
ElectraForTokenClassification
python
allegroai__clearml
clearml/backend_api/services/v2_13/events.py
{ "start": 66606, "end": 67533 }
class ____(Request): """ get task scalar metrics and variants :param task: task ID :type task: str """ _service = "events" _action = "get_scalar_metrics_and_variants" _version = "2.13" _schema = { "definitions": {}, "properties": {"task": {"description": "task ID", "type": "string"}}, "required": ["task"], "type": "object", } def __init__(self, task: str, **kwargs: Any) -> None: super(GetScalarMetricsAndVariantsRequest, self).__init__(**kwargs) self.task = task @schema_property("task") def task(self) -> str: return self._property_task @task.setter def task(self, value: str) -> None: if value is None: self._property_task = None return self.assert_isinstance(value, "task", six.string_types) self._property_task = value
GetScalarMetricsAndVariantsRequest
python
pyparsing__pyparsing
pyparsing/core.py
{ "start": 237437, "end": 241021 }
class ____(TokenConverter): """Converter to return a repetitive expression as a list, but also as a dictionary. Each element can also be referenced using the first token in the expression as its key. Useful for tabular report scraping when the first column can be used as a item key. The optional ``asdict`` argument when set to True will return the parsed tokens as a Python dict instead of a pyparsing ParseResults. Example: .. doctest:: >>> data_word = Word(alphas) >>> label = data_word + FollowedBy(':') >>> attr_expr = ( ... label + Suppress(':') ... + OneOrMore(data_word, stop_on=label) ... .set_parse_action(' '.join) ... ) >>> text = "shape: SQUARE posn: upper left color: light blue texture: burlap" >>> # print attributes as plain groups >>> print(attr_expr[1, ...].parse_string(text).dump()) ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] # instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) # Dict will auto-assign names. >>> result = Dict(Group(attr_expr)[1, ...]).parse_string(text) >>> print(result.dump()) [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - color: 'light blue' - posn: 'upper left' - shape: 'SQUARE' - texture: 'burlap' [0]: ['shape', 'SQUARE'] [1]: ['posn', 'upper left'] [2]: ['color', 'light blue'] [3]: ['texture', 'burlap'] # access named fields as dict entries, or output as dict >>> print(result['shape']) SQUARE >>> print(result.as_dict()) {'shape': 'SQUARE', 'posn': 'upper left', 'color': 'light blue', 'texture': 'burlap'} See more examples at :class:`ParseResults` of accessing fields by results name. """ def __init__(self, expr: ParserElement, asdict: bool = False) -> None: super().__init__(expr) self.saveAsList = True self._asPythonDict = asdict def postParse(self, instring, loc, tokenlist): for i, tok in enumerate(tokenlist): if len(tok) == 0: continue ikey = tok[0] if isinstance(ikey, int): ikey = str(ikey).strip() if len(tok) == 1: tokenlist[ikey] = _ParseResultsWithOffset("", i) elif len(tok) == 2 and not isinstance(tok[1], ParseResults): tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) else: try: dictvalue = tok.copy() # ParseResults(i) except Exception: exc = TypeError( "could not extract dict values from parsed results" " - Dict expression must contain Grouped expressions" ) raise exc from None del dictvalue[0] if len(dictvalue) != 1 or ( isinstance(dictvalue, ParseResults) and dictvalue.haskeys() ): tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) else: tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) if self._asPythonDict: return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict() return [tokenlist] if self.resultsName else tokenlist
Dict
python
kamyu104__LeetCode-Solutions
Python/maximum-binary-tree-ii.py
{ "start": 191, "end": 736 }
class ____(object): def insertIntoMaxTree(self, root, val): """ :type root: TreeNode :type val: int :rtype: TreeNode """ if not root: return TreeNode(val) if val > root.val: node = TreeNode(val) node.left = root return node curr = root while curr.right and curr.right.val > val: curr = curr.right node = TreeNode(val) curr.right, node.left = node, curr.right return root
Solution
python
huggingface__transformers
tests/models/aya_vision/test_modeling_aya_vision.py
{ "start": 5197, "end": 7104 }
class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( AyaVisionModel, AyaVisionForConditionalGeneration, ) if is_torch_available() else () ) all_generative_model_classes = (AyaVisionForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "image-text-to-text": AyaVisionForConditionalGeneration, "any-to-any": AyaVisionForConditionalGeneration, } if is_torch_available() else {} ) _is_composite = True def setUp(self): self.model_tester = AyaVisionVisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=AyaVisionConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Compile not yet supported because in LLava models") @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): pass # todo: yoni - fix or improve the test @unittest.skip("Difference is slightly higher than the threshold") def test_batching_equivalence(self): pass @require_read_token @require_torch
AyaVisionModelTest
python
django__django
django/utils/functional.py
{ "start": 1799, "end": 7671 }
class ____: """ Base class for the proxy class created in the closure of the lazy function. It's used to recognize promises in code. """ pass def lazy(func, *resultclasses): """ Turn any callable into a lazy evaluated callable. result classes or types is required -- at least one is needed so that the automatic forcing of the lazy evaluation code is triggered. Results are not memoized; the function is evaluated on every access. """ class __proxy__(Promise): """ Encapsulate a function call and act as a proxy for methods that are called on the result of that function. The function is not evaluated until one of the methods on the result is called. """ def __init__(self, args, kw): self._args = args self._kw = kw def __reduce__(self): return ( _lazy_proxy_unpickle, (func, self._args, self._kw, *resultclasses), ) def __deepcopy__(self, memo): # Instances of this class are effectively immutable. It's just a # collection of functions. So we don't need to do anything # complicated for copying. memo[id(self)] = self return self def __cast(self): return func(*self._args, **self._kw) # Explicitly wrap methods which are defined on object and hence would # not have been overloaded by the loop over resultclasses below. def __repr__(self): return repr(self.__cast()) def __str__(self): return str(self.__cast()) def __eq__(self, other): if isinstance(other, Promise): other = other.__cast() return self.__cast() == other def __ne__(self, other): if isinstance(other, Promise): other = other.__cast() return self.__cast() != other def __lt__(self, other): if isinstance(other, Promise): other = other.__cast() return self.__cast() < other def __le__(self, other): if isinstance(other, Promise): other = other.__cast() return self.__cast() <= other def __gt__(self, other): if isinstance(other, Promise): other = other.__cast() return self.__cast() > other def __ge__(self, other): if isinstance(other, Promise): other = other.__cast() return self.__cast() >= other def __hash__(self): return hash(self.__cast()) def __format__(self, format_spec): return format(self.__cast(), format_spec) # Explicitly wrap methods which are required for certain operations on # int/str objects to function correctly. def __add__(self, other): return self.__cast() + other def __radd__(self, other): return other + self.__cast() def __mod__(self, other): return self.__cast() % other def __mul__(self, other): return self.__cast() * other # Add wrappers for all methods from resultclasses which haven't been # wrapped explicitly above. for resultclass in resultclasses: for type_ in resultclass.mro(): for method_name in type_.__dict__: # All __promise__ return the same wrapper method, they look up # the correct implementation when called. if hasattr(__proxy__, method_name): continue # Builds a wrapper around some method. Pass method_name to # avoid issues due to late binding. def __wrapper__(self, *args, __method_name=method_name, **kw): # Automatically triggers the evaluation of a lazy value and # applies the given method of the result type. result = func(*self._args, **self._kw) return getattr(result, __method_name)(*args, **kw) setattr(__proxy__, method_name, __wrapper__) @wraps(func) def __wrapper__(*args, **kw): # Creates the proxy object, instead of the actual value. return __proxy__(args, kw) return __wrapper__ def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses): return lazy(func, *resultclasses)(*args, **kwargs) def lazystr(text): """ Shortcut for the common case of a lazy callable that returns str. """ return lazy(str, str)(text) def keep_lazy(*resultclasses): """ A decorator that allows a function to be called with one or more lazy arguments. If none of the args are lazy, the function is evaluated immediately, otherwise a __proxy__ is returned that will evaluate the function when needed. """ if not resultclasses: raise TypeError("You must pass at least one argument to keep_lazy().") def decorator(func): lazy_func = lazy(func, *resultclasses) @wraps(func) def wrapper(*args, **kwargs): if any( isinstance(arg, Promise) for arg in itertools.chain(args, kwargs.values()) ): return lazy_func(*args, **kwargs) return func(*args, **kwargs) return wrapper return decorator def keep_lazy_text(func): """ A decorator for functions that accept lazy arguments and return text. """ return keep_lazy(str)(func) empty = object() def new_method_proxy(func): def inner(self, *args): if (_wrapped := self._wrapped) is empty: self._setup() _wrapped = self._wrapped return func(_wrapped, *args) inner._mask_wrapped = False return inner
Promise
python
openai__openai-python
src/openai/resources/completions.py
{ "start": 58940, "end": 59189 }
class ____: def __init__(self, completions: AsyncCompletions) -> None: self._completions = completions self.create = async_to_streamed_response_wrapper( completions.create, )
AsyncCompletionsWithStreamingResponse
python
numba__numba
numba/tests/test_array_methods.py
{ "start": 7024, "end": 68040 }
class ____(MemoryLeakMixin, TestCase): """ Test various array methods and array-related functions. """ def setUp(self): super(TestArrayMethods, self).setUp() def check_round_scalar(self, unary_pyfunc, binary_pyfunc): base_values = [-3.0, -2.5, -2.25, -1.5, 1.5, 2.25, 2.5, 2.75] complex_values = [x * (1 - 1j) for x in base_values] int_values = [int(x) for x in base_values] argtypes = (types.float64, types.float32, types.int32, types.complex64, types.complex128) argvalues = [base_values, base_values, int_values, complex_values, complex_values] pyfunc = binary_pyfunc for ty, values in zip(argtypes, argvalues): cfunc = njit((ty, types.int32))(pyfunc) for decimals in (1, 0, -1): for v in values: if decimals > 0: v *= 10 expected = _fixed_np_round(v, decimals) got = cfunc(v, decimals) self.assertPreciseEqual(got, expected) pyfunc = unary_pyfunc for ty, values in zip(argtypes, argvalues): cfunc = njit((ty,))(pyfunc) for v in values: expected = _fixed_np_round(v) got = cfunc(v) self.assertPreciseEqual(got, expected) def test_round_scalar(self): self.check_round_scalar(np_round_unary, np_round_binary) def test_around_scalar(self): self.check_round_scalar(np_around_unary, np_around_binary) def check_round_array(self, pyfunc): def check_round(cfunc, values, inty, outty, decimals): # Create input and output arrays of the right type arr = values.astype(as_dtype(inty)) out = np.zeros_like(arr).astype(as_dtype(outty)) pyout = out.copy() _fixed_np_round(arr, decimals, pyout) self.memory_leak_setup() cfunc(arr, decimals, out) self.memory_leak_teardown() np.testing.assert_allclose(out, pyout) # Output shape mismatch with self.assertRaises(ValueError) as raises: cfunc(arr, decimals, out[1:]) self.assertEqual(str(raises.exception), "invalid output shape") def check_types(argtypes, outtypes, values): for inty, outty in product(argtypes, outtypes): argtys = (types.Array(inty, 1, 'A'), types.int32, types.Array(outty, 1, 'A')) cfunc = njit(argtys)(pyfunc) check_round(cfunc, values, inty, outty, 0) check_round(cfunc, values, inty, outty, 1) if not isinstance(outty, types.Integer): check_round(cfunc, values * 10, inty, outty, -1) else: # Avoid Numpy bug when output is an int: # https://github.com/numpy/numpy/issues/5777 pass values = np.array([-3.0, -2.5, -2.25, -1.5, 1.5, 2.25, 2.5, 2.75]) argtypes = (types.float64, types.float32) check_types(argtypes, argtypes, values) argtypes = (types.complex64, types.complex128) check_types(argtypes, argtypes, values * (1 - 1j)) # Exceptions leak references self.disable_leak_check() def test_round_array(self): self.check_round_array(np_round_array) def test_around_array(self): self.check_round_array(np_around_array) @skip_if_numpy_2 def test_round__array(self): self.check_round_array(np_round__array) def test_around_bad_array(self): for pyfunc in (np_round_unary, np_around_unary): cfunc = jit(nopython=True)(pyfunc) msg = '.*The argument "a" must be array-like.*' with self.assertRaisesRegex(TypingError, msg): cfunc(None) def test_around_bad_out(self): funcs = [np_round_array, np_around_array] if numpy_version < (2, 0): funcs.append(np_round__array) for py_func in funcs: cfunc = jit(nopython=True)(py_func) msg = '.*The argument "out" must be an array if it is provided.*' with self.assertRaisesRegex(TypingError, msg): cfunc(5, 0, out=6) def test_array_view(self): def run(arr, dtype): pyfunc = make_array_view(dtype) return njit(pyfunc)(arr) def check(arr, dtype): expected = arr.view(dtype) self.memory_leak_setup() got = run(arr, dtype) self.assertPreciseEqual(got, expected) del got self.memory_leak_teardown() def check_err(arr, dtype): with self.assertRaises(ValueError) as raises: run(arr, dtype) self.assertEqual(str(raises.exception), "new type not compatible with array") def check_err_noncontig_last_axis(arr, dtype): # check NumPy interpreted version raises msg = ("To change to a dtype of a different size, the last axis " "must be contiguous") with self.assertRaises(ValueError) as raises: make_array_view(dtype)(arr) self.assertEqual(str(raises.exception), msg) # check Numba version raises with self.assertRaises(ValueError) as raises: run(arr, dtype) self.assertEqual(str(raises.exception), msg) def check_err_0d(arr, dtype): # check NumPy interpreted version raises msg = ("Changing the dtype of a 0d array is only supported " "if the itemsize is unchanged") with self.assertRaises(ValueError) as raises: make_array_view(dtype)(arr) self.assertEqual(str(raises.exception), msg) # check Numba version raises with self.assertRaises(ValueError) as raises: run(arr, dtype) self.assertEqual(str(raises.exception), msg) def check_err_smaller_dtype(arr, dtype): # check NumPy interpreted version raises msg = ("When changing to a smaller dtype, its size must be a " "divisor of the size of original dtype") with self.assertRaises(ValueError) as raises: make_array_view(dtype)(arr) self.assertEqual(str(raises.exception), msg) # check Numba version raises with self.assertRaises(ValueError) as raises: run(arr, dtype) self.assertEqual(str(raises.exception), msg) def check_err_larger_dtype(arr, dtype): # check NumPy interpreted version raises msg = ("When changing to a larger dtype, its size must be a " "divisor of the total size in bytes of the last axis " "of the array.") with self.assertRaises(ValueError) as raises: make_array_view(dtype)(arr) self.assertEqual(str(raises.exception), msg) # check Numba version raises with self.assertRaises(ValueError) as raises: run(arr, dtype) self.assertEqual(str(raises.exception), msg) dt1 = np.dtype([('a', np.int8), ('b', np.int8)]) dt2 = np.dtype([('u', np.int16), ('v', np.int8)]) dt3 = np.dtype([('x', np.int16), ('y', np.int16)]) # The checking routines are much more specific from NumPy 1.23 onwards # as the granularity of error reporing is improved in Numba to match # that of NumPy. if numpy_version >= (1, 23): check_error_larger_dt = check_err_larger_dtype check_error_smaller_dt = check_err_smaller_dtype check_error_noncontig = check_err_noncontig_last_axis check_error_0d = check_err_0d else: check_error_larger_dt = check_err check_error_smaller_dt = check_err check_error_noncontig = check_err check_error_0d = check_err # C-contiguous arr = np.arange(24, dtype=np.int8) check(arr, np.dtype('int16')) check(arr, np.int16) check(arr, np.int8) check(arr, np.float32) check(arr, np.complex64) check(arr, dt1) check(arr, dt2) check_error_larger_dt(arr, np.complex128) # Last dimension must have a compatible size arr = arr.reshape((3, 8)) check(arr, np.int8) check(arr, np.float32) check(arr, np.complex64) check(arr, dt1) check_error_larger_dt(arr, dt2) check_error_larger_dt(arr, np.complex128) # F-contiguous f_arr = np.arange(24, dtype=np.int8).reshape((3, 8)).T # neither F or C contiguous not_f_or_c_arr = np.zeros((4, 4)).T[::2, ::2] # NumPy 1.23 does not allow views with different size dtype for # non-contiguous last axis. if numpy_version >= (1, 23): check_maybe_error = check_err_noncontig_last_axis else: check_maybe_error = check check(f_arr, np.int8) check(not_f_or_c_arr, np.uint64) check_maybe_error(f_arr, np.float32) check_maybe_error(f_arr, np.complex64) check_maybe_error(f_arr, dt1) check_error_noncontig(f_arr, dt2) check_error_noncontig(f_arr, np.complex128) check_error_noncontig(not_f_or_c_arr, np.int8) # Non-contiguous: only a type with the same itemsize can be used arr = np.arange(16, dtype=np.int32)[::2] check(arr, np.uint32) check(arr, np.float32) check(arr, dt3) check_error_noncontig(arr, np.int8) check_error_noncontig(arr, np.int16) check_error_noncontig(arr, np.int64) check_error_noncontig(arr, dt1) check_error_noncontig(arr, dt2) ## Zero-dim array: only a type with the same itemsize can be used arr = np.array([42], dtype=np.int32).reshape(()) check(arr, np.uint32) check(arr, np.float32) check(arr, dt3) check_error_0d(arr, np.int8) check_error_0d(arr, np.int16) check_error_0d(arr, np.int64) check_error_0d(arr, dt1) check_error_0d(arr, dt2) # Changing to smaller dtype arr = np.array(['abcdef']) check_error_smaller_dt(arr, np.complex128) # Exceptions leak references self.disable_leak_check() def test_array_sliced_view(self): """ Test .view() on A layout array but has contiguous innermost dimension. """ pyfunc = array_sliced_view cfunc = njit((types.uint8[:],))(pyfunc) orig = np.array([1.5, 2], dtype=np.float32) byteary = orig.view(np.uint8) expect = pyfunc(byteary) got = cfunc(byteary) self.assertEqual(expect, got) def test_array_astype(self): def run(arr, dtype): pyfunc = make_array_astype(dtype) return njit(pyfunc)(arr) def check(arr, dtype): expected = arr.astype(dtype).copy(order='A') got = run(arr, dtype) self.assertPreciseEqual(got, expected) # C-contiguous arr = np.arange(24, dtype=np.int8) check(arr, np.dtype('int16')) check(arr, np.int32) check(arr, np.float32) check(arr, np.complex128) check(arr, "float32") # F-contiguous arr = np.arange(24, dtype=np.int8).reshape((3, 8)).T check(arr, np.float32) # Non-contiguous arr = np.arange(16, dtype=np.int32)[::2] check(arr, np.uint64) # check read only attr does not get copied arr = np.arange(16, dtype=np.int32) arr.flags.writeable = False check(arr, np.int32) # Invalid conversion dt = np.dtype([('x', np.int8)]) with self.assertTypingError() as raises: check(arr, dt) self.assertIn('cannot convert from int32 to Record', str(raises.exception)) # Check non-Literal string raises unicode_val = "float32" with self.assertTypingError() as raises: @jit(nopython=True) def foo(dtype): np.array([1]).astype(dtype) foo(unicode_val) self.assertIn('array.astype if dtype is a string it must be constant', str(raises.exception)) def test_array_tobytes(self): self.check_layout_dependent_func( array_tobytes, memoryaddr=lambda x: np.frombuffer(x, dtype=np.uint8).ctypes.data, ) def check_np_frombuffer(self, pyfunc): cfunc = njit(pyfunc) def check(buf): old_refcnt = sys.getrefcount(buf) expected = pyfunc(buf) self.memory_leak_setup() got = cfunc(buf) self.assertPreciseEqual(got, expected) del expected # Note gc.collect is due to references in `except ... as e` that # aren't immediately cleared gc.collect() self.assertEqual(sys.getrefcount(buf), old_refcnt + 1) del got gc.collect() self.assertEqual(sys.getrefcount(buf), old_refcnt) self.memory_leak_teardown() b = bytearray(range(16)) check(b) check(bytes(b)) check(memoryview(b)) check(np.arange(12)) b = np.arange(12).reshape((3, 4)) check(b) # Exceptions leak references self.disable_leak_check() with self.assertRaises(ValueError) as raises: cfunc(bytearray(b"xxx")) self.assertEqual("buffer size must be a multiple of element size", str(raises.exception)) def test_np_frombuffer(self): self.check_np_frombuffer(np_frombuffer) def test_np_frombuffer_dtype(self): self.check_np_frombuffer(np_frombuffer_dtype) def test_np_frombuffer_dtype_str(self): self.check_np_frombuffer(np_frombuffer_dtype_str) def test_np_frombuffer_dtype_non_const_str(self): @jit(nopython=True) def func(buf, dt): np.frombuffer(buf, dtype=dt) with self.assertRaises(TypingError) as raises: func(bytearray(range(16)), 'int32') excstr = str(raises.exception) msg = ("If np.frombuffer dtype is a string it must be a " "string constant.") self.assertIn(msg, excstr) def test_np_frombuffer_bad_buffer(self): @jit(nopython=True) def func(buf): return np.frombuffer(buf) msg = '.*Argument "buffer" must be buffer-like.*' with self.assertRaisesRegex(TypingError, msg) as raises: func(None) def check_layout_dependent_func( self, pyfunc, fac=np.arange, memoryaddr=lambda x: x.ctypes.data ): def check_arr(arr): cfunc = njit((typeof(arr),))(pyfunc) expected = pyfunc(arr) got = cfunc(arr) self.assertPreciseEqual(expected, got) self.assertEqual( arr.ctypes.data == memoryaddr(expected), arr.ctypes.data == memoryaddr(got), ) arr = fac(24) check_arr(arr) check_arr(arr.reshape((3, 8))) check_arr(arr.reshape((3, 8)).T) check_arr(arr.reshape((3, 8))[::2]) check_arr(arr.reshape((2, 3, 4))) check_arr(arr.reshape((2, 3, 4)).T) check_arr(arr.reshape((2, 3, 4))[::2]) arr = np.array([0]).reshape(()) check_arr(arr) def test_array_transpose(self): self.check_layout_dependent_func(array_transpose) def test_array_T(self): self.check_layout_dependent_func(array_T) def test_array_copy(self): self.check_layout_dependent_func(array_copy) def check_object_copy(self, pyfunc): def check_obj(obj): cfunc = njit((typeof(obj),))(pyfunc) expected = pyfunc(obj) got = cfunc(obj) self.assertPreciseEqual(expected, got) check_obj((1, 2, 3)) check_obj([1.0, 2.0, 3.0]) check_obj(6) msg = '.*The argument "a" must be array-like.*' with self.assertRaisesRegex(TypingError, msg) as raises: njit((typeof('hello'), ))(pyfunc) def test_np_copy(self): self.check_layout_dependent_func(np_copy) self.check_object_copy(np_copy) def check_ascontiguousarray_scalar(self, pyfunc): def check_scalar(x): cfunc = njit((typeof(x),))(pyfunc) expected = pyfunc(x) got = cfunc(x) self.assertPreciseEqual(expected, got) for x in [42, 42.0, 42j, np.float32(42), np.float64(42), True]: check_scalar(x) def check_bad_array(self, pyfunc): msg = '.*The argument "a" must be array-like.*' with self.assertRaisesRegex(TypingError, msg) as raises: njit((typeof('hello'), ))(pyfunc) def test_np_asfortranarray(self): self.check_layout_dependent_func(np_asfortranarray) self.check_bad_array(np_asfortranarray) self.check_ascontiguousarray_scalar(np_asfortranarray) def test_np_ascontiguousarray(self): self.check_layout_dependent_func(np_ascontiguousarray) self.check_bad_array(np_asfortranarray) self.check_ascontiguousarray_scalar(np_ascontiguousarray) def check_np_frombuffer_allocated(self, pyfunc): cfunc = njit(pyfunc) def check(shape): expected = pyfunc(shape) got = cfunc(shape) self.assertPreciseEqual(got, expected) check((16,)) check((4, 4)) check((1, 0, 1)) def test_np_frombuffer_allocated(self): self.check_np_frombuffer_allocated(np_frombuffer_allocated) def test_np_frombuffer_allocated2(self): self.check_np_frombuffer_allocated(np_frombuffer_allocated_dtype) def check_nonzero(self, pyfunc): def fac(N): np.random.seed(42) arr = np.random.random(N) arr[arr < 0.3] = 0.0 arr[arr > 0.7] = float('nan') return arr def check_arr(arr): cfunc = njit((typeof(arr),))(pyfunc) expected = pyfunc(arr) expected = [a.copy() for a in expected] self.assertPreciseEqual(cfunc(arr), expected) arr = np.int16([1, 0, -1, 0]) check_arr(arr) arr = np.bool_([1, 0, 1]) check_arr(arr) arr = fac(24) check_arr(arr) check_arr(arr.reshape((3, 8))) check_arr(arr.reshape((3, 8)).T) check_arr(arr.reshape((3, 8))[::2]) check_arr(arr.reshape((2, 3, 4))) check_arr(arr.reshape((2, 3, 4)).T) check_arr(arr.reshape((2, 3, 4))[::2]) arr = np.array(["Hello", "", "world"]) check_arr(arr) for v in (0.0, 1.5, float('nan')): arr = np.array([v]).reshape(()) if numpy_version < (2, 1): check_arr(arr) else: with self.assertRaises((ValueError, TypingError)) as raises: njit((typeof(arr),))(pyfunc) msg = "Calling nonzero on 0d arrays is not allowed. Use " \ "np.atleast_1d(scalar).nonzero() instead." self.assertIn(msg, str(raises.exception)) def test_array_nonzero(self): self.check_nonzero(array_nonzero) def test_np_nonzero(self): self.check_nonzero(np_nonzero) def test_np_where_1(self): self.check_nonzero(np_where_1) def test_np_where_3(self): pyfunc = np_where_3 def fac(N): np.random.seed(42) arr = np.random.random(N) arr[arr < 0.3] = 0.0 arr[arr > 0.7] = float('nan') return arr layouts = cycle(['C', 'F', 'A']) _types = [np.int32, np.int64, np.float32, np.float64, np.complex64, np.complex128] np.random.seed(42) def check_arr(arr, layout=False): np.random.shuffle(_types) if layout != False: x = np.zeros_like(arr, dtype=_types[0], order=layout) y = np.zeros_like(arr, dtype=_types[1], order=layout) arr = arr.copy(order=layout) else: x = np.zeros_like(arr, dtype=_types[0], order=next(layouts)) y = np.zeros_like(arr, dtype=_types[1], order=next(layouts)) x.fill(4) y.fill(9) cfunc = njit((typeof(arr), typeof(x), typeof(y)))(pyfunc) expected = pyfunc(arr, x, y) got = cfunc(arr, x, y) self.assertPreciseEqual(got, expected) def check_scal(scal): x = 4 y = 5 np.random.shuffle(_types) x = _types[0](4) y = _types[1](5) cfunc = njit((typeof(scal), typeof(x), typeof(y)))(pyfunc) expected = pyfunc(scal, x, y) got = cfunc(scal, x, y) self.assertPreciseEqual(got, expected) arr = np.int16([1, 0, -1, 0]) check_arr(arr) arr = np.bool_([1, 0, 1]) check_arr(arr) arr = fac(24) check_arr(arr) check_arr(arr.reshape((3, 8))) check_arr(arr.reshape((3, 8)).T) check_arr(arr.reshape((3, 8))[::2]) check_arr(arr.reshape((2, 3, 4))) check_arr(arr.reshape((2, 3, 4)).T) check_arr(arr.reshape((2, 3, 4))[::2]) check_arr(arr.reshape((2, 3, 4)), layout='F') check_arr(arr.reshape((2, 3, 4)).T, layout='F') check_arr(arr.reshape((2, 3, 4))[::2], layout='F') for v in (0.0, 1.5, float('nan')): arr = np.array([v]).reshape(()) check_arr(arr) for x in (0, 1, True, False, 2.5, 0j): check_scal(x) def test_np_where_3_broadcast_x_y_scalar(self): pyfunc = np_where_3 cfunc = jit(nopython=True)(pyfunc) def check_ok(args): expected = pyfunc(*args) got = cfunc(*args) self.assertPreciseEqual(got, expected) def a_variations(): a = np.linspace(-2, 4, 20) self.random.shuffle(a) yield a yield a.reshape(2, 5, 2) yield a.reshape(4, 5, order='F') yield a.reshape(2, 5, 2)[::-1] for a in a_variations(): params = (a > 0, 0, 1) check_ok(params) params = (a < 0, np.nan, 1 + 4j) check_ok(params) params = (a > 1, True, False) check_ok(params) def test_np_where_3_broadcast_x_or_y_scalar(self): pyfunc = np_where_3 cfunc = jit(nopython=True)(pyfunc) def check_ok(args): condition, x, y = args expected = pyfunc(condition, x, y) got = cfunc(condition, x, y) self.assertPreciseEqual(got, expected) # swap x and y expected = pyfunc(condition, y, x) got = cfunc(condition, y, x) self.assertPreciseEqual(got, expected) def array_permutations(): x = np.arange(9).reshape(3, 3) yield x yield x * 1.1 yield np.asfortranarray(x) yield x[::-1] yield np.linspace(-10, 10, 60).reshape(3, 4, 5) * 1j def scalar_permutations(): yield 0 yield 4.3 yield np.nan yield True yield 8 + 4j for x in array_permutations(): for y in scalar_permutations(): x_mean = np.mean(x) condition = x > x_mean params = (condition, x, y) check_ok(params) def test_np_where_numpy_basic(self): # https://github.com/numpy/numpy/blob/fe2bb380fd9a084b622ff3f00cb6f245e8c1a10e/numpy/core/tests/test_multiarray.py#L8670-L8694 pyfunc = np_where_3 cfunc = jit(nopython=True)(pyfunc) # skipping unsupported dtypes: # np.longdouble, np.clongdouble dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128] for dt in dts: c = np.ones(53, dtype=bool) np.testing.assert_equal(cfunc( c, dt(0), dt(1)), dt(0)) np.testing.assert_equal(cfunc(~c, dt(0), dt(1)), dt(1)) np.testing.assert_equal(cfunc(True, dt(0), dt(1)), dt(0)) np.testing.assert_equal(cfunc(False, dt(0), dt(1)), dt(1)) d = np.ones_like(c).astype(dt) e = np.zeros_like(d) r = d.astype(dt) c[7] = False r[7] = e[7] np.testing.assert_equal(cfunc(c, e, e), e) np.testing.assert_equal(cfunc(c, d, e), r) np.testing.assert_equal(cfunc(c, d, e[0]), r) np.testing.assert_equal(cfunc(c, d[0], e), r) np.testing.assert_equal(cfunc(c[::2], d[::2], e[::2]), r[::2]) np.testing.assert_equal(cfunc(c[1::2], d[1::2], e[1::2]), r[1::2]) np.testing.assert_equal(cfunc(c[::3], d[::3], e[::3]), r[::3]) np.testing.assert_equal(cfunc(c[1::3], d[1::3], e[1::3]), r[1::3]) np.testing.assert_equal(cfunc(c[::-2], d[::-2], e[::-2]), r[::-2]) np.testing.assert_equal(cfunc(c[::-3], d[::-3], e[::-3]), r[::-3]) np.testing.assert_equal(cfunc(c[1::-3], d[1::-3], e[1::-3]), r[1::-3]) def test_np_where_numpy_ndim(self): # https://github.com/numpy/numpy/blob/fe2bb380fd9a084b622ff3f00cb6f245e8c1a10e/numpy/core/tests/test_multiarray.py#L8737-L8749 pyfunc = np_where_3 cfunc = jit(nopython=True)(pyfunc) c = [True, False] a = np.zeros((2, 25)) b = np.ones((2, 25)) r = cfunc(np.array(c)[:,np.newaxis], a, b) np.testing.assert_array_equal(r[0], a[0]) np.testing.assert_array_equal(r[1], b[0]) a = a.T b = b.T r = cfunc(c, a, b) np.testing.assert_array_equal(r[:,0], a[:,0]) np.testing.assert_array_equal(r[:,1], b[:,0]) def test_np_where_numpy_dtype_mix(self): # https://github.com/numpy/numpy/blob/fe2bb380fd9a084b622ff3f00cb6f245e8c1a10e/numpy/core/tests/test_multiarray.py#L8751-L8773 pyfunc = np_where_3 cfunc = jit(nopython=True)(pyfunc) c = np.array([False, True, False, False, False, False, True, False, False, False, True, False]) a = np.uint32(1) b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], dtype=np.float64) r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], dtype=np.float64) np.testing.assert_equal(cfunc(c, a, b), r) a = a.astype(np.float32) b = b.astype(np.int64) np.testing.assert_equal(cfunc(c, a, b), r) # non bool mask c = c.astype(int) c[c != 0] = 34242324 np.testing.assert_equal(cfunc(c, a, b), r) # invert tmpmask = c != 0 c[c == 0] = 41247212 c[tmpmask] = 0 np.testing.assert_equal(cfunc(c, b, a), r) def test_np_where_numpy_test_error(self): # https://github.com/numpy/numpy/blob/fe2bb380fd9a084b622ff3f00cb6f245e8c1a10e/numpy/core/tests/test_multiarray.py#L8794-L8799 pyfunc = np_where_3 cfunc = jit(nopython=True)(pyfunc) c = [True, True] a = np.ones((4, 5)) b = np.ones((5, 5)) self.disable_leak_check() with self.assertRaisesRegex(ValueError, "objects cannot be broadcast"): cfunc(c, a, b) with self.assertRaisesRegex(ValueError, "objects cannot be broadcast"): cfunc(c[0], a, b) def test_np_where_invalid_inputs(self): pyfunc = np_where_3 cfunc = jit(nopython=True)(pyfunc) msg = 'The argument "condition" must be array-like' with self.assertRaisesRegex(TypingError, msg): cfunc(None, 2, 3) msg = 'The argument "x" must be array-like if provided' with self.assertRaisesRegex(TypingError, msg): cfunc(1, 'hello', 3) msg = 'The argument "y" must be array-like if provided' with self.assertRaisesRegex(TypingError, msg): cfunc(1, 2, 'world') # None values are not yet supported in np.where msg = 'Argument "x" or "y" cannot be None' with self.assertRaisesRegex(TypingError, msg): cfunc(1, None, None) def test_arange_1_arg(self): all_pyfuncs = ( np_arange_1, lambda x: np.arange(x, 10), lambda x: np.arange(7, step=max(1, abs(x))) ) for pyfunc in all_pyfuncs: cfunc = jit(nopython=True)(pyfunc) def check_ok(arg0): expected = pyfunc(arg0) got = cfunc(arg0) np.testing.assert_allclose(expected, got) check_ok(0) check_ok(1) check_ok(4) check_ok(5.5) check_ok(-3) check_ok(complex(4, 4)) check_ok(np.int8(0)) def test_arange_2_arg(self): def check_ok(arg0, arg1, pyfunc, cfunc): expected = pyfunc(arg0, arg1) got = cfunc(arg0, arg1) np.testing.assert_allclose(expected, got) all_pyfuncs = ( np_arange_2, np_arange_start_stop, np_arange_1_stop, np_arange_1_step, lambda x, y: np.arange(x, y, 5), lambda x, y: np.arange(2, y, step=x), ) for pyfunc in all_pyfuncs: cfunc = jit(nopython=True)(pyfunc) check_ok(-1, 5, pyfunc, cfunc) check_ok(-8, -1, pyfunc, cfunc) check_ok(4, 0.5, pyfunc, cfunc) check_ok(0.5, 4, pyfunc, cfunc) check_ok(3, None, pyfunc, cfunc) if numpy_version < (2, 0): check_ok(complex(1, 1), complex(4, 4), pyfunc, cfunc) check_ok(complex(4, 4), complex(1, 1), pyfunc, cfunc) pyfunc = np_arange_1_dtype cfunc = jit(nopython=True)(pyfunc) check_ok(5, np.float32, pyfunc, cfunc) check_ok(2.0, np.int32, pyfunc, cfunc) check_ok(7, None, pyfunc, cfunc) check_ok(np.int8(0), None, pyfunc, cfunc) if numpy_version < (2, 0): check_ok(10, np.complex128, pyfunc, cfunc) check_ok(np.complex64(10), np.complex128, pyfunc, cfunc) def test_arange_3_arg(self): windows64 = sys.platform.startswith('win32') and sys.maxsize > 2 ** 32 def check_ok(arg0, arg1, arg2, pyfunc, cfunc, check_dtype=False): expected = pyfunc(arg0, arg1, arg2) got = cfunc(arg0, arg1, arg2) np.testing.assert_allclose(expected, got) # windows 64 cannot differentiate between a python int and a # np.int64 which means the result from numba is int64 more often # than in NumPy. if not windows64: self.assertEqual(expected.dtype, got.dtype) for pyfunc in (np_arange_3, np_arange_2_step, np_arange_start_stop_step): cfunc = jit(nopython=True)(pyfunc) check_ok(0, 5, 1, pyfunc, cfunc) check_ok(-8, -1, 3, pyfunc, cfunc) check_ok(0, -10, -2, pyfunc, cfunc) check_ok(0.5, 4, 2, pyfunc, cfunc) check_ok(0, 1, 0.1, pyfunc, cfunc) check_ok(3, 6, None, pyfunc, cfunc) check_ok(3, None, None, pyfunc, cfunc) check_ok(np.int8(0), np.int8(5), np.int8(1), pyfunc, cfunc) check_ok(np.int8(0), np.int16(5), np.int32(1), pyfunc, cfunc) # check upcasting logic, this matters most on windows i8 = np.int8 check_ok(i8(0), i8(5), i8(1), pyfunc, cfunc, True) # C int check_ok(np.int64(0), i8(5), i8(1), pyfunc, cfunc, True) # int64 if numpy_version < (2, 0): check_ok(0, complex(4, 4), complex(1, 1), pyfunc, cfunc) pyfunc = np_arange_2_dtype cfunc = jit(nopython=True)(pyfunc) check_ok(1, 5, np.float32, pyfunc, cfunc) check_ok(2.0, 8, np.int32, pyfunc, cfunc) check_ok(1, 7, None, pyfunc, cfunc) check_ok(np.int8(0), np.int32(5), None, pyfunc, cfunc, True) if numpy_version < (2, 0): check_ok(-2, 10, np.complex128, pyfunc, cfunc) check_ok(3, np.complex64(10), np.complex128, pyfunc, cfunc) def test_arange_4_arg(self): for pyfunc in (np_arange_4, np_arange_start_stop_step_dtype): cfunc = jit(nopython=True)(pyfunc) def check_ok(arg0, arg1, arg2, arg3): expected = pyfunc(arg0, arg1, arg2, arg3) got = cfunc(arg0, arg1, arg2, arg3) np.testing.assert_allclose(expected, got) check_ok(0, 5, 1, np.float64) check_ok(-8, -1, 3, np.int32) check_ok(0, -10, -2, np.float32) check_ok(0.5, 4, 2, None) check_ok(3, 6, None, None) check_ok(3, None, None, None) if numpy_version < (2, 0): check_ok(0, 1, 0.1, np.complex128) check_ok(0, complex(4, 4), complex(1, 1), np.complex128) def test_arange_throws(self): # Exceptions leak references self.disable_leak_check() bad_funcs_1 = [ lambda x: np.arange(stop=x), lambda x: np.arange(step=x), lambda x: np.arange(dtype=x), ] bad_funcs_2 = [ lambda x, y: np.arange(stop=x, step=y), lambda x, y: np.arange(stop=x, dtype=y), ] for pyfunc in bad_funcs_1: with self.assertRaises(TypingError) as raises: cfunc = jit(nopython=True)(pyfunc) cfunc(2) for pyfunc in bad_funcs_2: with self.assertRaises(TypingError) as raises: cfunc = jit(nopython=True)(pyfunc) cfunc(2, 6) # check step size = 0, this is nonsense pyfunc = np_arange_3 cfunc = jit(nopython=True)(pyfunc) for f in (pyfunc, cfunc,): for inputs in [(1, np.int16(2), 0), (1, 2, 0)]: # there's a different error depending on whether any of the # input values are np scalars permitted_errors = (ZeroDivisionError, ValueError) with self.assertRaises(permitted_errors) as raises: # this will raise RuntimeWarning's about zero division with warnings.catch_warnings(): warnings.simplefilter("ignore") f(*inputs) self.assertIn("Maximum allowed size exceeded", str(raises.exception)) def test_arange_accuracy(self): # Checking arange reasonably replicates NumPy's algorithm # see https://github.com/numba/numba/issues/6768 @jit(nopython=True) def foo(step): return np.arange(0, 1 + step, step) x = 0.010101010101010102 self.assertPreciseEqual(foo(x), foo.py_func(x)) def test_item(self): pyfunc = array_item cfunc = jit(nopython=True)(pyfunc) def check_ok(arg): expected = pyfunc(arg) got = cfunc(arg) self.assertPreciseEqual(got, expected) def check_err(arg): with self.assertRaises(ValueError) as raises: cfunc(arg) self.assertIn("item(): can only convert an array of size 1 to a Python scalar", str(raises.exception)) # Exceptions leak references self.disable_leak_check() # Test on different kinds of scalars and 1-item arrays check_ok(np.float32([1.5])) check_ok(np.complex128([[1.5j]])) check_ok(np.array(1.5)) check_ok(np.bool_(True)) check_ok(np.float32(1.5)) check_err(np.array([1, 2])) check_err(np.array([])) @skip_if_numpy_2 def test_itemset(self): pyfunc = array_itemset cfunc = jit(nopython=True)(pyfunc) def check_ok(a, v): expected = a.copy() got = a.copy() pyfunc(expected, v) cfunc(got, v) self.assertPreciseEqual(got, expected) def check_err(a): with self.assertRaises(ValueError) as raises: cfunc(a, 42) self.assertIn("itemset(): can only write to an array of size 1", str(raises.exception)) # Exceptions leak references self.disable_leak_check() # Test on different kinds of 1-item arrays check_ok(np.float32([1.5]), 42) check_ok(np.complex128([[1.5j]]), 42) check_ok(np.array(1.5), 42) check_err(np.array([1, 2])) check_err(np.array([])) def test_sum(self): """ test sum over a whole range of dtypes, no axis or dtype parameter """ pyfunc = array_sum cfunc = jit(nopython=True)(pyfunc) all_dtypes = [np.float64, np.float32, np.int64, np.int32, np.complex64, np.complex128, np.timedelta64] all_test_arrays = [ [np.ones((7, 6, 5, 4, 3), arr_dtype), np.ones(1, arr_dtype), np.ones((7, 3), arr_dtype) * -5] for arr_dtype in all_dtypes] unsigned_dtypes = [np.uint32, np.uint64, np.bool_] all_test_arrays = [ [np.ones((7, 6, 5, 4, 3), arr_dtype), np.ones(1, arr_dtype)] for arr_dtype in unsigned_dtypes] for arr_list in all_test_arrays: for arr in arr_list: with self.subTest("Test np.sum with {} input ".format(arr.dtype)): self.assertPreciseEqual(pyfunc(arr), cfunc(arr)) def test_sum_axis_kws1(self): """ test sum with axis parameter over a whole range of dtypes """ pyfunc = array_sum_axis_kws cfunc = jit(nopython=True)(pyfunc) all_dtypes = [np.float64, np.float32, np.int64, np.complex64, np.complex128, TIMEDELTA_M] all_test_arrays = [ [np.ones((7, 6, 5, 4, 3), arr_dtype), np.ones(1, arr_dtype), np.ones((7, 3), arr_dtype) * -5] for arr_dtype in all_dtypes] unsigned_dtypes = [np.uint64, np.bool_] all_test_arrays += [ [np.ones((7, 6, 5, 4, 3), arr_dtype), np.ones(1, arr_dtype)] for arr_dtype in unsigned_dtypes] for arr_list in all_test_arrays: for arr in arr_list: for axis in (0, 1, 2): if axis > len(arr.shape)-1: continue with self.subTest("Testing np.sum(axis) with {} " "input ".format(arr.dtype)): self.assertPreciseEqual(pyfunc(arr, axis=axis), cfunc(arr, axis=axis)) def test_sum_axis_kws2(self): """ testing uint32 and int32 separately uint32 and int32 must be tested separately because Numpy's current behaviour is different in 64bits Windows (accumulates as int32) and 64bits Linux (accumulates as int64), while Numba has decided to always accumulate as int64, when the OS is 64bits. No testing has been done for behaviours in 32 bits platforms. """ pyfunc = array_sum_axis_kws cfunc = jit(nopython=True)(pyfunc) all_dtypes = [np.int32] # expected return dtypes in Numba out_dtypes = {np.dtype('int32'): np.int64, np.dtype('uint32'): np.uint64, np.dtype('int64'): np.int64, np.dtype(TIMEDELTA_M): np.dtype(TIMEDELTA_M)} all_test_arrays = [ [np.ones((7, 6, 5, 4, 3), arr_dtype), np.ones(1, arr_dtype), np.ones((7, 3), arr_dtype) * -5] for arr_dtype in all_dtypes] unsigned_dtypes = [np.uint32] all_test_arrays += [ [np.ones((7, 6, 5, 4, 3), arr_dtype), np.ones(1, arr_dtype)] for arr_dtype in unsigned_dtypes] for arr_list in all_test_arrays: for arr in arr_list: for axis in (0, 1, 2): if axis > len(arr.shape)-1: continue with self.subTest("Testing np.sum(axis) with {} " "input ".format(arr.dtype)): npy_res = pyfunc(arr, axis=axis) numba_res = cfunc(arr, axis=axis) if isinstance(numba_res, np.ndarray): self.assertPreciseEqual( npy_res.astype(out_dtypes[arr.dtype]), numba_res.astype(out_dtypes[arr.dtype])) else: # the results are scalars self.assertEqual(npy_res, numba_res) def test_sum_dtype_kws(self): """ test sum with dtype parameter over a whole range of dtypes """ pyfunc = array_sum_dtype_kws cfunc = jit(nopython=True)(pyfunc) all_dtypes = [np.float64, np.float32, np.int64, np.int32, np.complex64, np.complex128] all_test_arrays = [ [np.ones((7, 6, 5, 4, 3), arr_dtype), np.ones(1, arr_dtype), np.ones((7, 3), arr_dtype) * -5] for arr_dtype in all_dtypes] unsigned_dtypes = [np.uint32, np.uint64, np.bool_] all_test_arrays = [ [np.ones((7, 6, 5, 4, 3), arr_dtype), np.ones(1, arr_dtype)] for arr_dtype in unsigned_dtypes] out_dtypes = {np.dtype('float64'): [np.float64], np.dtype('float32'): [np.float64, np.float32], np.dtype('int64'): [np.float64, np.int64, np.float32], np.dtype('int32'): [np.float64, np.int64, np.float32, np.int32], np.dtype('uint32'): [np.float64, np.int64, np.float32], np.dtype('uint64'): [np.float64, np.int64], np.dtype('bool'): [np.float64, np.int64, np.float32, np.int32, np.bool_], np.dtype('complex64'): [np.complex64, np.complex128], np.dtype('complex128'): [np.complex128]} for arr_list in all_test_arrays: for arr in arr_list: for out_dtype in out_dtypes[arr.dtype]: subtest_str = ("Testing np.sum with {} input and {} output" .format(arr.dtype, out_dtype)) with self.subTest(subtest_str): self.assertPreciseEqual(pyfunc(arr, dtype=out_dtype), cfunc(arr, dtype=out_dtype)) def test_sum_axis_dtype_kws(self): """ test sum with axis and dtype parameters over a whole range of dtypes """ pyfunc = array_sum_axis_dtype_kws cfunc = jit(nopython=True)(pyfunc) all_dtypes = [np.float64, np.float32, np.int64, np.int32, np.complex64, np.complex128] all_test_arrays = [ [np.ones((7, 6, 5, 4, 3), arr_dtype), np.ones(1, arr_dtype), np.ones((7, 3), arr_dtype) * -5] for arr_dtype in all_dtypes] unsigned_dtypes = [np.uint32, np.uint64, np.bool_] all_test_arrays = [ [np.ones((7, 6, 5, 4, 3), arr_dtype), np.ones(1, arr_dtype)] for arr_dtype in unsigned_dtypes] out_dtypes = {np.dtype('float64'): [np.float64], np.dtype('float32'): [np.float64, np.float32], np.dtype('int64'): [np.float64, np.int64, np.float32], np.dtype('int32'): [np.float64, np.int64, np.float32, np.int32], np.dtype('uint32'): [np.float64, np.int64, np.float32], np.dtype('uint64'): [np.float64, np.uint64], np.dtype('bool'): [np.float64, np.int64, np.float32, np.int32, np.bool_], np.dtype('complex64'): [np.complex64, np.complex128], np.dtype('complex128'): [np.complex128]} for arr_list in all_test_arrays: for arr in arr_list: for out_dtype in out_dtypes[arr.dtype]: for axis in (0, 1, 2): if axis > len(arr.shape) - 1: continue subtest_str = ("Testing np.sum with {} input and {} output " .format(arr.dtype, out_dtype)) with self.subTest(subtest_str): py_res = pyfunc(arr, axis=axis, dtype=out_dtype) nb_res = cfunc(arr, axis=axis, dtype=out_dtype) self.assertPreciseEqual(py_res, nb_res) def test_sum_axis_dtype_pos_arg(self): """ testing that axis and dtype inputs work when passed as positional """ pyfunc = array_sum_axis_dtype_pos cfunc = jit(nopython=True)(pyfunc) dtype = np.float64 # OK a = np.ones((7, 6, 5, 4, 3)) self.assertPreciseEqual(pyfunc(a, 1, dtype), cfunc(a, 1, dtype)) self.assertPreciseEqual(pyfunc(a, 2, dtype), cfunc(a, 2, dtype)) def test_sum_1d_kws(self): # check 1d reduces to scalar pyfunc = array_sum_axis_kws cfunc = jit(nopython=True)(pyfunc) a = np.arange(10.) self.assertPreciseEqual(pyfunc(a, axis=0), cfunc(a, axis=0)) pyfunc = array_sum_const_axis_neg_one cfunc = jit(nopython=True)(pyfunc) a = np.arange(10.) self.assertPreciseEqual(pyfunc(a, axis=-1), cfunc(a, axis=-1)) def test_sum_const(self): pyfunc = array_sum_const_multi cfunc = jit(nopython=True)(pyfunc) arr = np.ones((3, 4, 5, 6, 7, 8)) axis = 1 self.assertPreciseEqual(pyfunc(arr, axis), cfunc(arr, axis)) axis = 2 self.assertPreciseEqual(pyfunc(arr, axis), cfunc(arr, axis)) def test_sum_exceptions(self): # Exceptions leak references self.disable_leak_check() pyfunc = array_sum cfunc = jit(nopython=True)(pyfunc) a = np.ones((7, 6, 5, 4, 3)) b = np.ones((4, 3)) # BAD: axis > dimensions with self.assertRaises(ValueError): cfunc(b, 2) # BAD: negative axis with self.assertRaises(ValueError): cfunc(a, -1) # BAD: axis greater than 3 with self.assertRaises(ValueError): cfunc(a, 4) def test_sum_const_negative(self): # Exceptions leak references self.disable_leak_check() @jit(nopython=True) def foo(arr): return arr.sum(axis=-3) # ndim == 4, axis == -3, OK a = np.ones((1, 2, 3, 4)) self.assertPreciseEqual(foo(a), foo.py_func(a)) # ndim == 3, axis == -3, OK a = np.ones((1, 2, 3)) self.assertPreciseEqual(foo(a), foo.py_func(a)) # ndim == 2, axis == -3, BAD a = np.ones((1, 2)) with self.assertRaises(NumbaValueError) as raises: foo(a) errmsg = "'axis' entry (-1) is out of bounds" self.assertIn(errmsg, str(raises.exception)) with self.assertRaises(ValueError) as raises: foo.py_func(a) self.assertIn("out of bounds", str(raises.exception)) def test_cumsum(self): pyfunc = array_cumsum cfunc = jit(nopython=True)(pyfunc) # OK a = np.ones((2, 3)) self.assertPreciseEqual(pyfunc(a), cfunc(a)) # BAD: with axis with self.assertRaises(TypingError): cfunc(a, 1) # BAD: with kw axis pyfunc = array_cumsum_kws cfunc = jit(nopython=True)(pyfunc) with self.assertRaises(TypingError): cfunc(a, axis=1) def test_take(self): pyfunc = array_take cfunc = jit(nopython=True)(pyfunc) def check(arr, ind): expected = pyfunc(arr, ind) got = cfunc(arr, ind) self.assertPreciseEqual(expected, got) if hasattr(expected, 'order'): self.assertEqual(expected.order == got.order) # need to check: # 1. scalar index # 2. 1d array index # 3. nd array index, >2d and F order # 4. reflected list # 5. tuples test_indices = [] test_indices.append(1) test_indices.append(5) test_indices.append(11) test_indices.append(-2) test_indices.append(np.array([1, 5, 1, 11, 3])) test_indices.append(np.array([[1, 5, 1], [11, 3, 0]], order='F')) test_indices.append(np.array([[[1, 5, 1], [11, 3, 0]]])) test_indices.append(np.array([[[[1, 5]], [[11, 0]], [[1, 2]]]])) test_indices.append([1, 5, 1, 11, 3]) test_indices.append((1, 5, 1)) test_indices.append(((1, 5, 1), (11, 3, 2))) test_indices.append((((1,), (5,), (1,)), ((11,), (3,), (2,)))) layouts = cycle(['C', 'F', 'A']) for dt in [np.float64, np.int64, np.complex128]: A = np.arange(12, dtype=dt).reshape((4, 3), order=next(layouts)) for ind in test_indices: check(A, ind) #check illegal access raises A = np.arange(12, dtype=dt).reshape((4, 3), order=next(layouts)) szA = A.size illegal_indices = [szA, -szA - 1, np.array(szA), np.array(-szA - 1), [szA], [-szA - 1]] for x in illegal_indices: with self.assertRaises(IndexError): cfunc(A, x) # oob raises # check float indexing raises with self.assertRaises(TypingError): cfunc(A, [1.7]) #exceptions leak refs self.disable_leak_check() def test_fill(self): pyfunc = array_fill cfunc = jit(nopython=True)(pyfunc) def check(arr, val): expected = np.copy(arr) erv = pyfunc(expected, val) self.assertTrue(erv is None) got = np.copy(arr) grv = cfunc(got, val) self.assertTrue(grv is None) # check mutation is the same self.assertPreciseEqual(expected, got) # scalar A = np.arange(1) for x in [np.float64, np.bool_]: check(A, x(10)) # 2d A = np.arange(12).reshape(3, 4) for x in [np.float64, np.bool_]: check(A, x(10)) # 4d A = np.arange(48, dtype=np.complex64).reshape(2, 3, 4, 2) for x in [np.float64, np.complex128, np.bool_]: check(A, x(10)) def test_real(self): pyfunc = array_real cfunc = jit(nopython=True)(pyfunc) x = np.linspace(-10, 10) np.testing.assert_equal(pyfunc(x), cfunc(x)) x, y = np.meshgrid(x, x) z = x + 1j*y np.testing.assert_equal(pyfunc(z), cfunc(z)) def test_imag(self): pyfunc = array_imag cfunc = jit(nopython=True)(pyfunc) x = np.linspace(-10, 10) np.testing.assert_equal(pyfunc(x), cfunc(x)) x, y = np.meshgrid(x, x) z = x + 1j*y np.testing.assert_equal(pyfunc(z), cfunc(z)) def _lower_clip_result_test_util(self, func, a, a_min, a_max): # verifies that type-inference is working on the return value # this used to trigger issue #3489 def lower_clip_result(a): return np.expm1(func(a, a_min, a_max)) np.testing.assert_almost_equal( lower_clip_result(a), jit(nopython=True)(lower_clip_result)(a)) def test_clip(self): has_out = (np_clip, np_clip_kwargs, array_clip, array_clip_kwargs) has_no_out = (np_clip_no_out, array_clip_no_out) # TODO: scalars are not tested (issue #3469) for a in (np.linspace(-10, 10, 101), np.linspace(-10, 10, 40).reshape(5, 2, 4)): for pyfunc in has_out + has_no_out: cfunc = jit(nopython=True)(pyfunc) msg = "array_clip: must set either max or min" with self.assertRaisesRegex(ValueError, msg): cfunc(a, None, None) np.testing.assert_equal(pyfunc(a, 0, None), cfunc(a, 0, None)) np.testing.assert_equal(pyfunc(a, None, 0), cfunc(a, None, 0)) np.testing.assert_equal(pyfunc(a, -5, 5), cfunc(a, -5, 5)) if pyfunc in has_out: pyout = np.empty_like(a) cout = np.empty_like(a) np.testing.assert_equal(pyfunc(a, -5, 5, pyout), cfunc(a, -5, 5, cout)) np.testing.assert_equal(pyout, cout) self._lower_clip_result_test_util(cfunc, a, -5, 5) def test_clip_array_min_max(self): has_out = (np_clip, np_clip_kwargs, array_clip, array_clip_kwargs) has_no_out = (np_clip_no_out, array_clip_no_out) # TODO: scalars are not tested (issue #3469) a = np.linspace(-10, 10, 40).reshape(5, 2, 4) a_min_arr = np.arange(-8, 0).astype(a.dtype).reshape(2, 4) a_max_arr = np.arange(0, 8).astype(a.dtype).reshape(2, 4) mins = [0, -5, a_min_arr, None] maxs = [0, 5, a_max_arr, None] for pyfunc in has_out + has_no_out: cfunc = jit(nopython=True)(pyfunc) for a_min in mins: for a_max in maxs: if a_min is None and a_max is None: msg = "array_clip: must set either max or min" with self.assertRaisesRegex(ValueError, msg): cfunc(a, None, None) continue np.testing.assert_equal(pyfunc(a, a_min, a_max), cfunc(a, a_min, a_max)) if pyfunc in has_out: pyout = np.empty_like(a) cout = np.empty_like(a) np.testing.assert_equal(pyfunc(a, a_min, a_max, pyout), cfunc(a, a_min, a_max, cout)) np.testing.assert_equal(pyout, cout) self._lower_clip_result_test_util(cfunc, a, a_min, a_max) def test_clip_bad_array(self): cfunc = jit(nopython=True)(np_clip) msg = '.*The argument "a" must be array-like.*' with self.assertRaisesRegex(TypingError, msg): cfunc(None, 0, 10) def test_clip_bad_min(self): cfunc = jit(nopython=True)(np_clip) msg = '.*The argument "a_min" must be a number.*' with self.assertRaisesRegex(TypingError, msg): cfunc(1, 'a', 10) def test_clip_bad_max(self): cfunc = jit(nopython=True)(np_clip) msg = '.*The argument "a_max" must be a number.*' with self.assertRaisesRegex(TypingError, msg): cfunc(1, 1, 'b') def test_clip_bad_out(self): cfunc = jit(nopython=True)(np_clip) msg = '.*The argument "out" must be an array if it is provided.*' with self.assertRaisesRegex(TypingError, msg): cfunc(5, 1, 10, out=6) def test_clip_no_broadcast(self): self.disable_leak_check() cfunc = jit(nopython=True)(np_clip) msg = ".*shape mismatch: objects cannot be broadcast to a single shape.*" a = np.linspace(-10, 10, 40).reshape(5, 2, 4) a_min_arr = np.arange(-5, 0).astype(a.dtype).reshape(5, 1) a_max_arr = np.arange(0, 5).astype(a.dtype).reshape(5, 1) min_max = [(0, a_max_arr), (-5, a_max_arr), (a_min_arr, a_max_arr), (a_min_arr, 0), (a_min_arr, 5)] for a_min, a_max in min_max: with self.assertRaisesRegex(ValueError, msg): cfunc(a, a_min, a_max) def test_conj(self): for pyfunc in [array_conj, array_conjugate]: cfunc = jit(nopython=True)(pyfunc) x = np.linspace(-10, 10) np.testing.assert_equal(pyfunc(x), cfunc(x)) x, y = np.meshgrid(x, x) z = x + 1j*y np.testing.assert_equal(pyfunc(z), cfunc(z)) def test_unique(self): pyfunc = np_unique cfunc = jit(nopython=True)(pyfunc) def check(a): np.testing.assert_equal(pyfunc(a), cfunc(a)) check(np.array([[1, 1, 3], [3, 4, 5]])) check(np.array(np.zeros(5))) check(np.array([[3.1, 3.1], [1.7, 2.29], [3.3, 1.7]])) check(np.array([])) check(np.array([np.nan, np.nan])) check(np.array(['A', 'A', 'B'], dtype='<U16')) # issue 10250 check(np.array([np.datetime64("2001-01-01"), np.datetime64("2001-01-01"), np.datetime64("2001-01-02"), np.datetime64("NAT")])) @needs_blas def test_array_dot(self): # just ensure that the dot impl dispatches correctly, do # not test dot itself, this is done in test_linalg. pyfunc = array_dot cfunc = jit(nopython=True)(pyfunc) a = np.arange(20.).reshape(4, 5) b = np.arange(5.) np.testing.assert_equal(pyfunc(a, b), cfunc(a, b)) # check that chaining works pyfunc = array_dot_chain cfunc = jit(nopython=True)(pyfunc) a = np.arange(16.).reshape(4, 4) np.testing.assert_equal(pyfunc(a, a), cfunc(a, a)) def test_array_ctor_with_dtype_arg(self): # Test using np.dtype and np.generic (i.e. np.dtype.type) has args pyfunc = array_ctor cfunc = jit(nopython=True)(pyfunc) n = 2 args = n, np.int32 np.testing.assert_array_equal(pyfunc(*args), cfunc(*args)) args = n, np.dtype('int32') np.testing.assert_array_equal(pyfunc(*args), cfunc(*args)) args = n, np.float32 np.testing.assert_array_equal(pyfunc(*args), cfunc(*args)) args = n, np.dtype('f4') np.testing.assert_array_equal(pyfunc(*args), cfunc(*args)) def test_frombuffer_offset(self): # Expect to skip the first two elements (offset = 2 bytes) buffer = np.arange(8, dtype=np.uint8) offset = 2 result = np_frombuffer(buffer, dtype=buffer.dtype, offset=offset) expected = np.array([2, 3, 4, 5, 6, 7], dtype=buffer.dtype) np.testing.assert_array_equal(result, expected) def test_frombuffer_count(self): # Expect to read only 4 elements buffer = np.arange(24, dtype=np.uint8) count = 4 result = np_frombuffer(buffer, dtype=buffer.dtype, count=count) expected = np.array([0, 1, 2, 3], dtype=buffer.dtype) np.testing.assert_array_equal(result, expected) def test_frombuffer_count_negative_means_all(self): # Expect to read only 4 elements buffer = np.arange(8, dtype=np.uint8) result = np_frombuffer(buffer, dtype=buffer.dtype, count=-1) expected = np.array([0, 1, 2, 3, 4, 5, 6, 7], dtype=buffer.dtype) np.testing.assert_array_equal(result, expected) def test_frombuffer_offset_and_count(self): # Skip first 2 bytes and read 3 elements buffer = np.arange(24, dtype=np.uint8) offset = 2 count = 3 result = np_frombuffer(buffer, dtype=buffer.dtype, offset=offset, count=count) expected = np.array([2, 3, 4], dtype=buffer.dtype) np.testing.assert_array_equal(result, expected) def test_frombuffer_invalid_offset(self): # Test behavior when offset exceeds buffer size buffer = np.arange(24, dtype=np.uint8) offset = len(buffer) + 1 # Invalid offset msg = "offset must be non-negative and no greater than buffer length" with self.assertRaisesRegex(ValueError, msg): np_frombuffer(buffer, dtype=buffer.dtype, offset=offset) def test_frombuffer_invalid_count(self): # Test behavior when count exceeds the possible number of elements buffer = np.arange(24, dtype=np.uint8) count = len(buffer) + 1 # Count exceeds buffer size msg = "buffer is smaller than requested size" with self.assertRaisesRegex(ValueError, msg): np.frombuffer(buffer, dtype=buffer.dtype, count=count)
TestArrayMethods
python
getsentry__sentry
tests/sentry/issues/test_ingest.py
{ "start": 36904, "end": 40512 }
class ____(OccurrenceTestMixin, TestCase): def test_simple(self) -> None: occurrence = self.build_occurrence() event = self.store_event(data={}, project_id=self.project.id) assert materialize_metadata(occurrence, event) == { "type": "default", "culprit": occurrence.culprit, "metadata": { "title": occurrence.issue_title, "value": occurrence.subtitle, "initial_priority": occurrence.priority, }, "title": occurrence.issue_title, "location": event.location, "last_received": json.datetime_to_str(event.datetime), } def test_preserves_existing_metadata(self) -> None: occurrence = self.build_occurrence() event = self.store_event(data={}, project_id=self.project.id) event.data.setdefault("metadata", {}) event.data["metadata"]["dogs"] = "are great" # should not get clobbered materialized = materialize_metadata(occurrence, event) assert materialized["metadata"] == { "title": occurrence.issue_title, "value": occurrence.subtitle, "dogs": "are great", "initial_priority": occurrence.priority, } def test_populates_feedback_metadata(self) -> None: occurrence = self.build_occurrence( type=FeedbackGroup.type_id, evidence_data={ "contact_email": "test@test.com", "message": "test", "name": "Name Test", "source": "crash report widget", "summary": "test", }, ) event = self.store_event(data={}, project_id=self.project.id) event.data.setdefault("metadata", {}) event.data["metadata"]["dogs"] = "are great" # should not get clobbered materialized = materialize_metadata(occurrence, event) assert materialized["metadata"] == { "title": occurrence.issue_title, "value": occurrence.subtitle, "dogs": "are great", "contact_email": "test@test.com", "message": "test", "name": "Name Test", "source": "crash report widget", "summary": "test", "initial_priority": occurrence.priority, } def test_populates_feedback_metadata_with_linked_error(self) -> None: occurrence = self.build_occurrence( type=FeedbackGroup.type_id, evidence_data={ "contact_email": "test@test.com", "message": "test", "name": "Name Test", "source": "crash report widget", "summary": "test", "associated_event_id": "55798fee4d21425c8689c980cde794f2", }, ) event = self.store_event(data={}, project_id=self.project.id) event.data.setdefault("metadata", {}) event.data["metadata"]["dogs"] = "are great" # should not get clobbered materialized = materialize_metadata(occurrence, event) assert materialized["metadata"] == { "title": occurrence.issue_title, "value": occurrence.subtitle, "dogs": "are great", "contact_email": "test@test.com", "message": "test", "name": "Name Test", "source": "crash report widget", "summary": "test", "initial_priority": occurrence.priority, "associated_event_id": "55798fee4d21425c8689c980cde794f2", }
MaterializeMetadataTest
python
huggingface__transformers
src/transformers/models/esm/modeling_esmfold.py
{ "start": 49168, "end": 49700 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.linear_1 = EsmFoldLinear(config.resnet_dim, config.resnet_dim, init="relu") self.linear_2 = EsmFoldLinear(config.resnet_dim, config.resnet_dim, init="final") self.relu = nn.ReLU() def forward(self, a: torch.Tensor) -> torch.Tensor: s_initial = a a = self.relu(a) a = self.linear_1(a) a = self.relu(a) a = self.linear_2(a) return a + s_initial
EsmFoldAngleResnetBlock
python
dask__dask
dask/dataframe/dask_expr/_expr.py
{ "start": 96887, "end": 97401 }
class ____(Expr): _parameters = ["obj"] def __str__(self): return f"{type(self).__name__}({self.obj})" @property def _name(self): return self.obj.key def _layer(self) -> dict: dc = self.obj.__dask_optimize__(self.obj.dask, self.obj.key).to_dict().copy() dc[(self.obj.key, 0)] = dc[self.obj.key] dc.pop(self.obj.key) return dc def _divisions(self): return (None, None) @property def ndim(self): return 0
_DelayedExpr
python
scipy__scipy
scipy/signal/tests/test_spectral.py
{ "start": 36628, "end": 58365 }
class ____: def test_frequency(self): """Test if frequency location of peak corresponds to frequency of generated input signal. """ # Input parameters ampl = 2. w = 1. phi = 0.5 * np.pi nin = 100 nout = 1000 p = 0.7 # Fraction of points to select # Randomly select a fraction of an array with timesteps rng = np.random.RandomState(2353425) r = rng.rand(nin) t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] # Plot a sine wave for the selected times y = ampl * np.sin(w*t + phi) # Define the array of frequencies for which to compute the periodogram f = np.linspace(0.01, 10., nout) # Calculate Lomb-Scargle periodogram P = lombscargle(t, y, f) # Check if difference between found frequency maximum and input # frequency is less than accuracy delta = f[1] - f[0] assert(w - f[np.argmax(P)] < (delta/2.)) # also, check that it works with weights P = lombscargle(t, y, f, weights=np.ones_like(t, dtype=f.dtype)) # Check if difference between found frequency maximum and input # frequency is less than accuracy delta = f[1] - f[0] assert(w - f[np.argmax(P)] < (delta/2.)) def test_amplitude(self): # Test if height of peak in unnormalized Lomb-Scargle periodogram # corresponds to amplitude of the generated input signal. # Input parameters ampl = 2. w = 1. phi = 0.5 * np.pi nin = 1000 nout = 1000 p = 0.7 # Fraction of points to select # Randomly select a fraction of an array with timesteps rng = np.random.RandomState(2353425) r = rng.rand(nin) t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] # Plot a sine wave for the selected times y = ampl * np.sin(w*t + phi) # Define the array of frequencies for which to compute the periodogram f = np.linspace(0.01, 10., nout) # Calculate Lomb-Scargle periodogram pgram = lombscargle(t, y, f) # convert to the amplitude pgram = np.sqrt(4.0 * pgram / t.shape[0]) # Check if amplitude is correct (this will not exactly match, due to # numerical differences when data is removed) assert_allclose(pgram[f==w], ampl, rtol=5e-2) @pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_precenter(self): # Test if precenter gives the same result as manually precentering # (for a very simple offset) # Input parameters ampl = 2. w = 1. phi = 0.5 * np.pi nin = 100 nout = 1000 p = 0.7 # Fraction of points to select offset = 0.15 # Offset to be subtracted in pre-centering # Randomly select a fraction of an array with timesteps rng = np.random.RandomState(2353425) r = rng.rand(nin) t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] # Plot a sine wave for the selected times y = ampl * np.sin(w*t + phi) + offset # Define the array of frequencies for which to compute the periodogram f = np.linspace(0.01, 10., nout) # Calculate Lomb-Scargle periodogram pgram = lombscargle(t, y, f, precenter=True) pgram2 = lombscargle(t, y - y.mean(), f, precenter=False) # check if centering worked assert_allclose(pgram, pgram2) # do this again, but with floating_mean=True # Calculate Lomb-Scargle periodogram pgram = lombscargle(t, y, f, precenter=True, floating_mean=True) pgram2 = lombscargle(t, y - y.mean(), f, precenter=False, floating_mean=True) # check if centering worked assert_allclose(pgram, pgram2) def test_normalize(self): # Test normalize option of Lomb-Scarge. # Input parameters ampl = 2. w = 1. phi = 0.5 * np.pi nin = 100 nout = 1000 p = 0.7 # Fraction of points to select # Randomly select a fraction of an array with timesteps rng = np.random.RandomState(2353425) r = rng.rand(nin) t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] # Plot a sine wave for the selected times y = ampl * np.sin(w*t + phi) # Define the array of frequencies for which to compute the periodogram f = np.linspace(0.01, 10., nout) # Calculate Lomb-Scargle periodogram pgram = lombscargle(t, y, f) pgram2 = lombscargle(t, y, f, normalize=True) # Calculate the scale to convert from unnormalized to normalized weights = np.ones_like(t)/float(t.shape[0]) YY_hat = (weights * y * y).sum() YY = YY_hat # correct formula for floating_mean=False scale_to_use = 2/(YY*t.shape[0]) # check if normalization works as expected assert_allclose(pgram * scale_to_use, pgram2) assert_allclose(np.max(pgram2), 1.0) def test_wrong_shape(self): # different length t and y t = np.linspace(0, 1, 1) y = np.linspace(0, 1, 2) f = np.linspace(0, 1, 3) + 0.1 assert_raises(ValueError, lombscargle, t, y, f) # t is 2D, with both axes length > 1 t = np.repeat(np.expand_dims(np.linspace(0, 1, 2), 1), 2, axis=1) y = np.linspace(0, 1, 2) f = np.linspace(0, 1, 3) + 0.1 assert_raises(ValueError, lombscargle, t, y, f) # y is 2D, with both axes length > 1 t = np.linspace(0, 1, 2) y = np.repeat(np.expand_dims(np.linspace(0, 1, 2), 1), 2, axis=1) f = np.linspace(0, 1, 3) + 0.1 assert_raises(ValueError, lombscargle, t, y, f) # f is 2D, with both axes length > 1 t = np.linspace(0, 1, 2) y = np.linspace(0, 1, 2) f = np.repeat(np.expand_dims(np.linspace(0, 1, 3), 1) + 0.1, 2, axis=1) assert_raises(ValueError, lombscargle, t, y, f) # weights is 2D, with both axes length > 1 t = np.linspace(0, 1, 2) y = np.linspace(0, 1, 2) f = np.linspace(0, 1, 3) + 0.1 weights = np.repeat(np.expand_dims(np.linspace(0, 1, 2), 1), 2, axis=1) assert_raises(ValueError, lombscargle, t, y, f, weights=weights) def test_lombscargle_atan_vs_atan2(self): # https://github.com/scipy/scipy/issues/3787 # This raised a ZeroDivisionError. t = np.linspace(0, 10, 1000, endpoint=False) y = np.sin(4*t) f = np.linspace(0, 50, 500, endpoint=False) + 0.1 lombscargle(t, y, f*2*np.pi) def test_wrong_shape_weights(self): # Weights must be the same shape as t t = np.linspace(0, 1, 1) y = np.linspace(0, 1, 1) f = np.linspace(0, 1, 3) + 0.1 weights = np.linspace(1, 2, 2) assert_raises(ValueError, lombscargle, t, y, f, weights=weights) def test_zero_division_weights(self): # Weights cannot sum to 0 t = np.zeros(1) y = np.zeros(1) f = np.ones(1) weights = np.zeros(1) assert_raises(ValueError, lombscargle, t, y, f, weights=weights) def test_normalize_parameter(self): # Test the validity of the normalize parameter input # Input parameters ampl = 2. w = 1. phi = 0 nin = 100 nout = 1000 p = 0.7 # Fraction of points to select # Randomly select a fraction of an array with timesteps rng = np.random.RandomState(2353425) r = rng.rand(nin) t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] # Plot a sine wave for the selected times y = ampl * np.sin(w*t + phi) # Define the array of frequencies for which to compute the periodogram f = np.linspace(0.01, 10., nout) # check each of the valid inputs pgram_false = lombscargle(t, y, f, normalize=False) pgram_true = lombscargle(t, y, f, normalize=True) pgram_power = lombscargle(t, y, f, normalize='power') pgram_norm = lombscargle(t, y, f, normalize='normalize') pgram_amp = lombscargle(t, y, f, normalize='amplitude') # validate the results that should be the same assert_allclose(pgram_false, pgram_power) assert_allclose(pgram_true, pgram_norm) # validate that the power and norm outputs are proper wrt each other weights = np.ones_like(y)/float(y.shape[0]) YY_hat = (weights * y * y).sum() YY = YY_hat # correct formula for floating_mean=False assert_allclose(pgram_power * 2.0 / (float(t.shape[0]) * YY), pgram_norm) # validate that the amp output is correct for the given input f_i = np.where(f==w)[0][0] assert_allclose(np.abs(pgram_amp[f_i]), ampl) # check invalid inputs # 1) a string that is not allowed assert_raises(ValueError, lombscargle, t, y, f, normalize='lomb') # 2) something besides a bool or str assert_raises(ValueError, lombscargle, t, y, f, normalize=2) def test_offset_removal(self): # Verify that the amplitude is the same, even with an offset # must use floating_mean=True, otherwise it will not remove an offset # Input parameters ampl = 2. w = 1. phi = 0.5 * np.pi nin = 100 nout = 1000 p = 0.7 # Fraction of points to select offset = 2.15 # Large offset # Randomly select a fraction of an array with timesteps rng = np.random.RandomState(2353425) r = rng.rand(nin) t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] # Plot a sine wave for the selected times y = ampl * np.sin(w*t + phi) # Define the array of frequencies for which to compute the periodogram f = np.linspace(0.01, 10., nout) # Calculate Lomb-Scargle periodogram pgram = lombscargle(t, y, f, floating_mean=True) pgram_offset = lombscargle(t, y + offset, f, floating_mean=True) # check if offset removal works as expected assert_allclose(pgram, pgram_offset) def test_floating_mean_false(self): # Verify that when disabling the floating_mean, the calculations are correct # Input parameters ampl = 2. w = 1. phi = 0 nin = 1000 nout = 1000 p = 0.7 # Fraction of points to select offset = 2 # Large offset # Randomly select a fraction of an array with timesteps rng = np.random.RandomState(2353425) r = rng.rand(nin) t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] # Plot a cos wave for the selected times y = ampl * np.cos(w*t + phi) # Define the array of frequencies for which to compute the periodogram f = np.linspace(0.01, 10., nout) # Calculate Lomb-Scargle periodogram pgram = lombscargle(t, y, f, normalize=True, floating_mean=False) pgram_offset = lombscargle(t, y + offset, f, normalize=True, floating_mean=False) # check if disabling floating_mean works as expected # nearly-zero for no offset, exact value will change based on seed assert(pgram[0] < 0.01) # significant value with offset, exact value will change based on seed assert(pgram_offset[0] > 0.5) def test_amplitude_is_correct(self): # Verify that the amplitude is correct (when normalize='amplitude') # Input parameters ampl = 2. w = 1. phi = 0.12 nin = 100 nout = 1000 p = 0.7 # Fraction of points to select offset = 2.15 # Large offset # Randomly select a fraction of an array with timesteps rng = np.random.RandomState(2353425) r = rng.rand(nin) t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] # Plot a sine wave for the selected times y = ampl * np.cos(w*t + phi) + offset # Define the array of frequencies for which to compute the periodogram f = np.linspace(0.01, 10., nout) # Get the index of where the exact result should be f_indx = np.where(f==w)[0][0] # Calculate Lomb-Scargle periodogram (amplitude + phase) pgram = lombscargle(t, y, f, normalize='amplitude', floating_mean=True) # Check if amplitude is correct assert_allclose(np.abs(pgram[f_indx]), ampl) # Check if phase is correct # (phase angle is the negative of the phase offset) assert_allclose(-np.angle(pgram[f_indx]), phi) def test_negative_weight(self): # Test that a negative weight produces an error t = np.zeros(1) y = np.zeros(1) f = np.ones(1) weights = -np.ones(1) assert_raises(ValueError, lombscargle, t, y, f, weights=weights) @pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_list_input(self): # Test that input can be passsed in as lists and with a numerical issue # https://github.com/scipy/scipy/issues/8787 t = [1.98201652e+09, 1.98201752e+09, 1.98201852e+09, 1.98201952e+09, 1.98202052e+09, 1.98202152e+09, 1.98202252e+09, 1.98202352e+09, 1.98202452e+09, 1.98202552e+09, 1.98202652e+09, 1.98202752e+09, 1.98202852e+09, 1.98202952e+09, 1.98203052e+09, 1.98203152e+09, 1.98203252e+09, 1.98203352e+09, 1.98203452e+09, 1.98203552e+09, 1.98205452e+09, 1.98205552e+09, 1.98205652e+09, 1.98205752e+09, 1.98205852e+09, 1.98205952e+09, 1.98206052e+09, 1.98206152e+09, 1.98206252e+09, 1.98206352e+09, 1.98206452e+09, 1.98206552e+09, 1.98206652e+09, 1.98206752e+09, 1.98206852e+09, 1.98206952e+09, 1.98207052e+09, 1.98207152e+09, 1.98207252e+09, 1.98207352e+09, 1.98209652e+09, 1.98209752e+09, 1.98209852e+09, 1.98209952e+09, 1.98210052e+09, 1.98210152e+09, 1.98210252e+09, 1.98210352e+09, 1.98210452e+09, 1.98210552e+09, 1.98210652e+09, 1.98210752e+09, 1.98210852e+09, 1.98210952e+09, 1.98211052e+09, 1.98211152e+09, 1.98211252e+09, 1.98211352e+09, 1.98211452e+09, 1.98211552e+09, 1.98217252e+09, 1.98217352e+09, 1.98217452e+09, 1.98217552e+09, 1.98217652e+09, 1.98217752e+09, 1.98217852e+09, 1.98217952e+09, 1.98218052e+09, 1.98218152e+09, 1.98218252e+09, 1.98218352e+09, 1.98218452e+09, 1.98218552e+09, 1.98218652e+09, 1.98218752e+09, 1.98218852e+09, 1.98218952e+09, 1.98219052e+09, 1.98219152e+09, 1.98219352e+09, 1.98219452e+09, 1.98219552e+09, 1.98219652e+09, 1.98219752e+09, 1.98219852e+09, 1.98219952e+09, 1.98220052e+09, 1.98220152e+09, 1.98220252e+09, 1.98220352e+09, 1.98220452e+09, 1.98220552e+09, 1.98220652e+09, 1.98220752e+09, 1.98220852e+09, 1.98220952e+09, 1.98221052e+09, 1.98221152e+09, 1.98221252e+09, 1.98222752e+09, 1.98222852e+09, 1.98222952e+09, 1.98223052e+09, 1.98223152e+09, 1.98223252e+09, 1.98223352e+09, 1.98223452e+09, 1.98223552e+09, 1.98223652e+09, 1.98223752e+09, 1.98223852e+09, 1.98223952e+09, 1.98224052e+09, 1.98224152e+09, 1.98224252e+09, 1.98224352e+09, 1.98224452e+09, 1.98224552e+09, 1.98224652e+09, 1.98224752e+09] y = [2.97600000e+03, 3.18200000e+03, 3.74900000e+03, 4.53500000e+03, 5.43300000e+03, 6.38000000e+03, 7.34000000e+03, 8.29200000e+03, 9.21900000e+03, 1.01120000e+04, 1.09620000e+04, 1.17600000e+04, 1.25010000e+04, 1.31790000e+04, 1.37900000e+04, 1.43290000e+04, 1.47940000e+04, 1.51800000e+04, 1.54870000e+04, 1.57110000e+04, 5.74200000e+03, 4.82300000e+03, 3.99100000e+03, 3.33600000e+03, 2.99600000e+03, 3.08400000e+03, 3.56700000e+03, 4.30700000e+03, 5.18200000e+03, 6.11900000e+03, 7.07900000e+03, 8.03400000e+03, 8.97000000e+03, 9.87300000e+03, 1.07350000e+04, 1.15480000e+04, 1.23050000e+04, 1.30010000e+04, 1.36300000e+04, 1.41890000e+04, 6.00000000e+03, 5.06800000e+03, 4.20500000e+03, 3.49000000e+03, 3.04900000e+03, 3.01600000e+03, 3.40400000e+03, 4.08800000e+03, 4.93500000e+03, 5.86000000e+03, 6.81700000e+03, 7.77500000e+03, 8.71800000e+03, 9.63100000e+03, 1.05050000e+04, 1.13320000e+04, 1.21050000e+04, 1.28170000e+04, 1.34660000e+04, 1.40440000e+04, 1.32730000e+04, 1.26040000e+04, 1.18720000e+04, 1.10820000e+04, 1.02400000e+04, 9.35300000e+03, 8.43000000e+03, 7.48100000e+03, 6.52100000e+03, 5.57000000e+03, 4.66200000e+03, 3.85400000e+03, 3.24600000e+03, 2.97900000e+03, 3.14700000e+03, 3.68800000e+03, 4.45900000e+03, 5.35000000e+03, 6.29400000e+03, 7.25400000e+03, 9.13800000e+03, 1.00340000e+04, 1.08880000e+04, 1.16910000e+04, 1.24370000e+04, 1.31210000e+04, 1.37380000e+04, 1.42840000e+04, 1.47550000e+04, 1.51490000e+04, 1.54630000e+04, 1.56950000e+04, 1.58430000e+04, 1.59070000e+04, 1.58860000e+04, 1.57800000e+04, 1.55910000e+04, 1.53190000e+04, 1.49650000e+04, 1.45330000e+04, 3.01000000e+03, 3.05900000e+03, 3.51200000e+03, 4.23400000e+03, 5.10000000e+03, 6.03400000e+03, 6.99300000e+03, 7.95000000e+03, 8.88800000e+03, 9.79400000e+03, 1.06600000e+04, 1.14770000e+04, 1.22400000e+04, 1.29410000e+04, 1.35770000e+04, 1.41430000e+04, 1.46350000e+04, 1.50500000e+04, 1.53850000e+04, 1.56400000e+04, 1.58110000e+04] periods = np.linspace(400, 120, 1000) angular_freq = 2 * np.pi / periods lombscargle(t, y, angular_freq, precenter=True, normalize=True) def test_zero_freq(self): # Verify that function works when freqs includes 0 # The value at f=0 will depend on the seed # Input parameters ampl = 2. w = 1. phi = 0.12 nin = 100 nout = 1001 p = 0.7 # Fraction of points to select offset = 0 # Randomly select a fraction of an array with timesteps rng = np.random.RandomState(2353425) r = rng.rand(nin) t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] # Plot a sine wave for the selected times y = ampl * np.cos(w*t + phi) + offset # Define the array of frequencies for which to compute the periodogram f = np.linspace(0, 10., nout) # Calculate Lomb-Scargle periodogram pgram = lombscargle(t, y, f, normalize=True, floating_mean=True) # exact value will change based on seed # testing to make sure it is very small assert(pgram[0] < 1e-4) def test_simple_div_zero(self): # these are bare-minimum examples that would, without the eps adjustments, # cause division-by-zero errors # first, test with example that will cause first SS sum to be 0.0 t = [t + 1 for t in range(0, 32)] y = np.ones(len(t)) freqs = [2.0*np.pi] * 2 # must have 2+ elements lombscargle(t, y, freqs) # second, test with example that will cause first CC sum to be 0.0 t = [t*4 + 1 for t in range(0, 32)] y = np.ones(len(t)) freqs = [np.pi/2.0] * 2 # must have 2+ elements lombscargle(t, y, freqs) @pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_input_mutation(self): # this tests for mutation of the input arrays # https://github.com/scipy/scipy/issues/23474 # Input parameters ampl = 2. w = 1. phi = 0.5 * np.pi nin = 100 nout = 1000 p = 0.7 # Fraction of points to select # Randomly select a fraction of an array with timesteps rng = np.random.default_rng() r = rng.random(nin) t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] # Plot a sine wave for the selected times y = ampl * np.sin(w*t + phi) # Define the array of frequencies for which to compute the periodogram f = np.linspace(0.01, 10., nout) weights = np.ones_like(y) # create original copies before passing t_org = t.copy() y_org = y.copy() f_org = f.copy() weights_org = weights.copy() lombscargle(t, y, f, precenter=True, weights=weights) # check all 4 array inputs assert_array_equal(t, t_org) assert_array_equal(y, y_org) assert_array_equal(f, f_org) assert_array_equal(weights, weights_org) def test_precenter_deprecation(self): # test that precenter deprecation warning is raised # Input parameters ampl = 2. w = 1. phi = 0.5 * np.pi nin = 100 nout = 1000 p = 0.7 # Fraction of points to select offset = 0.15 # Offset to be subtracted in pre-centering # Randomly select a fraction of an array with timesteps rng = np.random.default_rng() r = rng.random(nin) t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] # Plot a sine wave for the selected times y = ampl * np.sin(w*t + phi) + offset # Define the array of frequencies for which to compute the periodogram f = np.linspace(0.01, 10., nout) # Calculate Lomb-Scargle periodogram with pytest.deprecated_call(): lombscargle(t, y, f, precenter=True)
TestLombscargle
python
getsentry__sentry
tests/sentry/feedback/endpoints/test_organization_feedback_categories.py
{ "start": 545, "end": 12219 }
class ____(APITestCase): endpoint = "sentry-api-0-organization-user-feedback-categories" def setUp(self) -> None: super().setUp() self.login_as(user=self.user) self.org = self.organization self.project1 = self.project self.project2 = self.create_project(teams=[self.team]) self.features = { "organizations:user-feedback-ai-categorization-features": True, } self.url = reverse( self.endpoint, kwargs={"organization_id_or_slug": self.org.slug}, ) self.mock_has_seer_access_patcher = patch( "sentry.feedback.endpoints.organization_feedback_categories.has_seer_access", return_value=True, ) self.mock_make_signed_seer_api_request_patcher = patch( "sentry.feedback.endpoints.organization_feedback_categories.make_signed_seer_api_request" ) self.mock_threshold_to_get_associated_labels_patcher = patch( "sentry.feedback.endpoints.organization_feedback_categories.THRESHOLD_TO_GET_ASSOCIATED_LABELS", 1, ) self.mock_min_feedbacks_context_patcher = patch( "sentry.feedback.endpoints.organization_feedback_categories.MIN_FEEDBACKS_CONTEXT", 1 ) self.mock_make_signed_seer_api_request = ( self.mock_make_signed_seer_api_request_patcher.start() ) self.mock_has_seer_access = self.mock_has_seer_access_patcher.start() self.mock_threshold_to_get_associated_labels_patcher.start() self.mock_min_feedbacks_context_patcher.start() def tearDown(self) -> None: self.mock_has_seer_access_patcher.stop() self.mock_make_signed_seer_api_request_patcher.stop() self.mock_threshold_to_get_associated_labels_patcher.stop() self.mock_min_feedbacks_context_patcher.stop() super().tearDown() def _create_feedback( self, message: str, labels: list[str], project: Project, dt: datetime | None = None, ) -> None: tags = {f"{AI_LABEL_TAG_PREFIX}.label.{i}": labels[i] for i in range(len(labels))} event = mock_feedback_event( project.id, message=message, tags=tags, dt=dt, ) create_feedback_issue(event, project, FeedbackCreationSource.NEW_FEEDBACK_ENVELOPE) def test_get_feedback_categories_without_feature_flag(self) -> None: response = self.get_error_response(self.org.slug) assert response.status_code == 403 def test_get_feedback_categories_without_seer_access(self) -> None: self.mock_has_seer_access.return_value = False with self.feature(self.features): response = self.get_error_response(self.org.slug) assert response.status_code == 403 def test_get_feedback_categories_basic(self) -> None: self._create_feedback("a", ["User Interface", "Speed"], self.project1) self._create_feedback("b", ["Performance", "Usability", "Loading"], self.project1) self._create_feedback("c", ["Security", "Performance"], self.project2) self._create_feedback("d", ["Performance", "User Interface", "Speed"], self.project2) self.mock_make_signed_seer_api_request.return_value = MockSeerResponse( 200, json_data={ "data": [ { "primaryLabel": "User Interface", "associatedLabels": ["Usability"], }, {"primaryLabel": "Performance", "associatedLabels": ["Speed", "Loading"]}, {"primaryLabel": "Security", "associatedLabels": []}, {"primaryLabel": "hallucinated", "associatedLabels": []}, ] }, ) with self.feature(self.features): response = self.get_success_response(self.org.slug) assert response.data["success"] is True assert response.data["numFeedbacksContext"] == 4 categories = response.data["categories"] assert len(categories) == 4 assert any(category["primaryLabel"] == "User Interface" for category in categories) assert any(category["primaryLabel"] == "Performance" for category in categories) assert any(category["primaryLabel"] == "Security" for category in categories) assert any(category["primaryLabel"] == "hallucinated" for category in categories) for category in categories: if category["primaryLabel"] == "User Interface": assert category["feedbackCount"] == 3 elif category["primaryLabel"] == "Performance": assert category["feedbackCount"] == 4 elif category["primaryLabel"] == "Security": assert category["feedbackCount"] == 1 elif category["primaryLabel"] == "hallucinated": assert category["feedbackCount"] == 0 def test_get_feedback_categories_with_project_filter(self) -> None: self._create_feedback("a", ["User Interface", "Performance"], self.project1) self._create_feedback("b", ["Performance", "Loading"], self.project1) self._create_feedback("c", ["Security", "Performance"], self.project2) self._create_feedback("d", ["Performance", "User Interface", "Speed"], self.project2) self.mock_make_signed_seer_api_request.return_value = MockSeerResponse( 200, json_data={ "data": [ { "primaryLabel": "User Interface", "associatedLabels": [], }, {"primaryLabel": "Performance", "associatedLabels": ["Loading"]}, ] }, ) with self.feature(self.features): response = self.get_success_response(self.org.slug, project=[self.project1.id]) assert response.data["success"] is True assert response.data["numFeedbacksContext"] == 2 categories = response.data["categories"] assert len(categories) == 2 assert any(category["primaryLabel"] == "User Interface" for category in categories) assert any(category["primaryLabel"] == "Performance" for category in categories) for category in categories: if category["primaryLabel"] == "User Interface": assert category["feedbackCount"] == 1 elif category["primaryLabel"] == "Performance": assert category["feedbackCount"] == 2 @patch( "sentry.feedback.endpoints.organization_feedback_categories.MAX_GROUP_LABELS", 2, ) def test_max_group_labels_limit(self) -> None: """Test that MAX_GROUP_LABELS constant is respected when processing label groups.""" self._create_feedback("a", ["User Interface"], self.project1) self._create_feedback("b", ["User Interface", "Usability"], self.project1) self._create_feedback("c", ["Accessibility"], self.project1) # Mock Seer to return a label group with more than MAX_GROUP_LABELS labels self.mock_make_signed_seer_api_request.return_value = MockSeerResponse( 200, json_data={ "data": [ { "primaryLabel": "User Interface", "associatedLabels": ["Usability", "Accessibility"], } ] }, ) with self.feature(self.features): response = self.get_success_response(self.org.slug) assert response.data["success"] is True categories = response.data["categories"] assert len(categories) == 1 assert categories[0]["primaryLabel"] == "User Interface" # Assert associated labels were truncated to length (MAX_GROUP_LABELS - 1) assert categories[0]["associatedLabels"] == ["Usability"] def test_filter_invalid_associated_labels_by_count_ratio(self) -> None: """Test that associated labels with too many feedbacks (relative to primary label) are filtered out.""" # Create feedbacks where associated label feedbacks are >= primary label feedbacks. # This should cause them to be filtered out from the label group. self._create_feedback("a", ["User Interface", "Issues UI"], self.project1) self._create_feedback("b", ["Usability", "Issues UI"], self.project1) # XXX: the endpoint checks for assoc >= 3/4 * primary, but this test is more lenient in case the ratio changes. self.mock_make_signed_seer_api_request.return_value = MockSeerResponse( 200, json_data={ "data": [ { "primaryLabel": "User Interface", "associatedLabels": ["Usability", "Issues UI"], } ] }, ) with self.feature(self.features): response = self.get_success_response(self.org.slug) assert response.data["success"] is True categories = response.data["categories"] assert len(categories) == 1 assert categories[0]["primaryLabel"] == "User Interface" assert categories[0]["associatedLabels"] == [] assert categories[0]["feedbackCount"] == 1 def test_seer_request_error(self) -> None: self._create_feedback("a", ["User Interface", "Issues UI"], self.project1) self.mock_make_signed_seer_api_request.side_effect = Exception("seer failed") with self.feature(self.features): response = self.get_error_response(self.org.slug) assert response.status_code == 500 assert response.data["detail"] == "Failed to generate user feedback label groups" def test_seer_http_errors(self) -> None: self._create_feedback("a", ["User Interface", "Issues UI"], self.project1) for status in [400, 401, 403, 404, 429, 500, 502, 503, 504]: self.mock_make_signed_seer_api_request.return_value = MockSeerResponse( status=status, json_data={"detail": "seer failed"} ) with self.feature(self.features): response = self.get_error_response(self.org.slug) assert response.status_code == 500 assert response.data["detail"] == "Failed to generate user feedback label groups" def test_fallback_to_primary_labels_when_below_threshold(self) -> None: """Test that when feedback count is below THRESHOLD_TO_GET_ASSOCIATED_LABELS, we fall back to primary labels only (no Seer request).""" with patch( "sentry.feedback.endpoints.organization_feedback_categories.THRESHOLD_TO_GET_ASSOCIATED_LABELS", 2, ): self._create_feedback("a", ["User Interface", "Usability"], self.project1) with self.feature(self.features): response = self.get_success_response(self.org.slug) assert self.mock_make_signed_seer_api_request.call_count == 0 assert response.data["success"] is True categories = response.data["categories"] assert len(categories) == 2 assert any(category["primaryLabel"] == "User Interface" for category in categories) assert any(category["primaryLabel"] == "Usability" for category in categories) for category in categories: assert category["associatedLabels"] == [] assert category["feedbackCount"] == 1
OrganizationFeedbackCategoriesTest
python
doocs__leetcode
solution/0400-0499/0409.Longest Palindrome/Solution2.py
{ "start": 0, "end": 245 }
class ____: def longestPalindrome(self, s: str) -> int: odd = defaultdict(int) cnt = 0 for c in s: odd[c] ^= 1 cnt += 1 if odd[c] else -1 return len(s) - cnt + 1 if cnt else len(s)
Solution
python
cherrypy__cherrypy
cherrypy/tutorial/tut04_complex_site.py
{ "start": 620, "end": 954 }
class ____: """Joke app.""" @cherrypy.expose def index(self): """Produce HTTP response body of joke page app index URI.""" return """ <p>"In Python, how do you create a string of random characters?" -- "Read a Perl file!"</p> <p>[<a href="../">Return</a>]</p>"""
JokePage
python
pdm-project__pdm
src/pdm/cli/commands/lock.py
{ "start": 585, "end": 4867 }
class ____(BaseCommand): """Resolve and lock dependencies""" arguments = ( *BaseCommand.arguments, lockfile_option, no_isolation_option, config_setting_option, override_option, skip_option, groups_group, lock_strategy_group, ) def add_arguments(self, parser: argparse.ArgumentParser) -> None: parser.add_argument( "--refresh", action="store_true", help="Refresh the content hash and file hashes in the lock file", ) parser.add_argument( "--check", action="store_true", help="Check if the lock file is up to date and quit", ) parser.add_argument( "--update-reuse", action="store_const", dest="update_strategy", default="all", const="reuse", help="Reuse pinned versions already present in lock file if possible", ) parser.add_argument( "--update-reuse-installed", action="store_const", dest="update_strategy", const="reuse-installed", help="Reuse installed packages if possible", ) parser.add_argument( "--exclude-newer", help="Exclude packages newer than the given UTC date in format `YYYY-MM-DD[THH:MM:SSZ]`", type=convert_to_datetime, ) target_group = parser.add_argument_group("Lock Target") target_group.add_argument("--python", help="The Python range to lock for. E.g. `>=3.9`, `==3.12.*`") target_group.add_argument( "--platform", help="The platform to lock for. E.g. `windows`, `linux`, `macos`, `manylinux_2_17_x86_64`. " "See docs for available choices: http://pdm-project.org/en/latest/usage/lock-targets/", ) target_group.add_argument( "--implementation", help="The Python implementation to lock for. E.g. `cpython`, `pypy`, `pyston`", ) target_group.add_argument("--append", action="store_true", help="Append the result to the current lock file") def handle(self, project: Project, options: argparse.Namespace) -> None: if options.check: strategy = actions.check_lockfile(project, False) if strategy: project.core.ui.echo( f"[error]{termui.Emoji.FAIL}[/] Lockfile is [error]out of date[/].", err=True, verbosity=termui.Verbosity.DETAIL, ) sys.exit(1) else: project.core.ui.echo( f"[success]{termui.Emoji.SUCC}[/] Lockfile is [success]up to date[/].", err=True, verbosity=termui.Verbosity.DETAIL, ) sys.exit(0) selection = GroupSelection.from_options(project, options) strategy = options.update_strategy if options.exclude_newer: strategy = "all" if strategy != options.update_strategy: project.core.ui.info("--exclude-newer is set, forcing --update-all") project.core.state.exclude_newer = options.exclude_newer env_spec: EnvSpec | None = None if any([options.python, options.platform, options.implementation]): replace_dict = {} if options.python: if re.match(r"[\d.]+", options.python): options.python = f">={options.python}" replace_dict["requires_python"] = PySpecSet(options.python) if options.platform: replace_dict["platform"] = options.platform if options.implementation: replace_dict["implementation"] = options.implementation env_spec = project.environment.allow_all_spec.replace(**replace_dict) actions.do_lock( project, refresh=options.refresh, strategy=cast(str, strategy), groups=selection.all(), strategy_change=options.strategy_change, hooks=HookManager(project, options.skip), env_spec=env_spec, append=options.append, )
Command
python
python-poetry__poetry
src/poetry/console/command_loader.py
{ "start": 292, "end": 615 }
class ____(FactoryCommandLoader): def register_factory( self, command_name: str, factory: Callable[[], Command] ) -> None: if command_name in self._factories: raise CleoLogicError(f'The command "{command_name}" already exists.') self._factories[command_name] = factory
CommandLoader
python
tensorflow__tensorflow
tensorflow/python/ops/control_flow_ops_test.py
{ "start": 24100, "end": 39288 }
class ____(test_util.TensorFlowTestCase): def assertAllEqualNested(self, a, b): if isinstance(a, (list, tuple)): for entry_a, entry_b in zip(a, b): self.assertAllEqualNested(entry_a, entry_b) else: self.assertAllEqual(a, b) def _testShape(self, fn_true, fn_false, expected_shape, strict=False): condition = array_ops.placeholder(dtypes.bool) output_cond = tf_cond.cond( condition, fn_true, fn_false, strict=strict) self.assertEqual( _raw_nested_shape(_get_nested_shape(output_cond)), _raw_nested_shape(expected_shape)) output_case = control_flow_case.case([(condition, fn_true)], fn_false, strict=strict) self.assertEqual( _raw_nested_shape(_get_nested_shape(output_case)), _raw_nested_shape(expected_shape)) def _testReturnValues(self, fn_true, fn_false, expected_value_true, expected_value_false, strict=False, check_cond=True, feed_dict=None): if feed_dict is None: feed_dict = {} condition = array_ops.placeholder(dtypes.bool) output_cond = tf_cond.cond( condition, fn_true, fn_false, strict=strict) output_case = control_flow_case.case([(condition, fn_true)], fn_false, strict=strict) with self.cached_session() as sess: self.evaluate(variables.global_variables_initializer()) true_feed_dict = {condition: True} true_feed_dict.update(feed_dict) result_cond, result_case = sess.run([output_cond, output_case], feed_dict=true_feed_dict) self.assertAllEqualNested(result_cond, expected_value_true) if check_cond: self.assertAllEqualNested(result_case, expected_value_true) false_feed_dict = {condition: False} false_feed_dict.update(feed_dict) result_cond, result_case = sess.run([output_cond, output_case], feed_dict=false_feed_dict) self.assertAllEqualNested(result_cond, expected_value_false) if check_cond: self.assertAllEqualNested(result_case, expected_value_false) @test_util.run_deprecated_v1 def test_int(self): shape = tensor_shape.TensorShape([]) fn_true = lambda: 1 fn_false = lambda: 2 self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, 1, 2) self._testShape(fn_true, fn_false, shape, strict=True) self._testReturnValues(fn_true, fn_false, 1, 2, strict=True) @test_util.run_deprecated_v1 def test_float(self): shape = tensor_shape.TensorShape([]) fn_true = lambda: 1.0 fn_false = lambda: 2.0 self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, 1.0, 2.0) @test_util.run_deprecated_v1 def test_noop(self): shape = tensor_shape.TensorShape(None) self._testShape(control_flow_ops.no_op, control_flow_ops.no_op, shape) self._testReturnValues( control_flow_ops.no_op, control_flow_ops.no_op, True, False, check_cond=False) @test_util.run_deprecated_v1 def test_string(self): shape = tensor_shape.TensorShape([]) fn_true = lambda: "abc" fn_false = lambda: "xyz" self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, b"abc", b"xyz") @test_util.run_v1_only("b/138741991") def test_variable(self): shape = tensor_shape.TensorShape([]) fn_true = lambda: variables.Variable(3.0) fn_false = lambda: variables.Variable(4.0) self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, 3.0, 4.0) @test_util.run_v1_only("b/120553181") def test_none(self): fn_none = lambda: None fn_tensor = lambda: constant_op.constant(1) with self.assertRaises(ValueError): tf_cond.cond(constant_op.constant(True), fn_none, fn_tensor) with self.assertRaises(ValueError): tf_cond.cond(constant_op.constant(True), fn_tensor, fn_none) @test_util.run_deprecated_v1 def test_tensors(self): def _build_true_branch(dtype): def _build(): return (array_ops.zeros([2, 2], dtype=dtype), array_ops.ones([3, 3], dtype=dtype)) return _build def _build_false_branch(dtype): def _build(): return (array_ops.ones([2, 2], dtype=dtype), array_ops.zeros([3, 3], dtype=dtype)) return _build for dtype in (dtypes.float16, dtypes.int8, dtypes.int32, dtypes.uint8): shape = (tensor_shape.TensorShape([2, 2]), tensor_shape.TensorShape([3, 3])) fn_true = _build_true_branch(dtype) fn_false = _build_false_branch(dtype) self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, (np.zeros([2, 2]), np.ones([3, 3])), (np.ones([2, 2]), np.zeros([3, 3]))) @test_util.run_deprecated_v1 def test_tensors_unknown_shape(self): def _build_true_branch(dtype): tensor = array_ops.placeholder(dtype=dtype, shape=None) def _build(): return tensor return _build, tensor def _build_false_branch(dtype): tensor = array_ops.placeholder(dtype=dtype, shape=None) def _build(): return tensor return _build, tensor for dtype in (dtypes.float16, dtypes.int8, dtypes.int32, dtypes.uint8): shape = tensor_shape.TensorShape(None) fn_true, true_tensor = _build_true_branch(dtype) fn_false, false_tensor = _build_false_branch(dtype) self._testShape(fn_true, fn_false, shape) self._testReturnValues( fn_true, fn_false, np.zeros([2, 2]), np.ones([2, 2]), feed_dict={ true_tensor: np.zeros([2, 2]), false_tensor: np.ones([2, 2]) }) @test_util.run_deprecated_v1 def test_sparse_tensors(self): shape = tensor_shape.TensorShape([3, 4]) def true_fn(): return [ sparse_tensor.SparseTensor( indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]) ] def false_fn(): return [ sparse_tensor.SparseTensor( indices=[[0, 0], [2, 1]], values=[3, 4], dense_shape=[3, 4]) ] value1 = sparse_tensor.SparseTensorValue( indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]) value2 = sparse_tensor.SparseTensorValue( indices=[[0, 0], [2, 1]], values=[3, 4], dense_shape=[3, 4]) # Non-strict cond is only available in v1 if not tf2.enabled(): self._testShape(true_fn, false_fn, shape) self._testReturnValues(true_fn, false_fn, value1, value2) self._testShape(true_fn, false_fn, [shape], strict=True) self._testReturnValues(true_fn, false_fn, [value1], [value2], strict=True) @test_util.run_deprecated_v1 def test_tensors_with_partially_specified_shapes(self): def _build_branch(dtype, shape): a = array_ops.placeholder(dtype=dtype, shape=shape[0]) b = array_ops.placeholder(dtype=dtype, shape=shape[1]) c = array_ops.placeholder(dtype=dtype, shape=shape[2]) def _build(): return a, b, c return _build, (a, b, c) for dtype in (dtypes.float16, dtypes.int8, dtypes.int32, dtypes.uint8): shape = (tensor_shape.TensorShape([None, 2]), tensor_shape.TensorShape([None]), tensor_shape.TensorShape([3, None])) fn_true, true_tensors = _build_branch(dtype, shape) fn_false, false_tensors = _build_branch(dtype, shape) self._testShape(fn_true, fn_false, shape) self._testReturnValues( fn_true, fn_false, (np.zeros([2, 2]), np.zeros(5), np.ones([3, 3])), (np.zeros([2, 2]), np.zeros(5), np.ones([3, 3])), feed_dict={ true_tensors[0]: np.zeros([2, 2]), false_tensors[0]: np.zeros([2, 2]), true_tensors[1]: np.zeros([5]), false_tensors[1]: np.zeros([5]), true_tensors[2]: np.ones([3, 3]), false_tensors[2]: np.ones([3, 3]) }) @test_util.run_deprecated_v1 def test_tensor_arrays(self): element_shape = tensor_shape.TensorShape([2]) ta1 = _create_tensor_array(4, element_shape) ta2 = _create_tensor_array(4, element_shape) shape = tensor_array_ops.TensorArray fn_true = lambda: ta1 fn_false = lambda: ta2 self._testShape(fn_true, fn_false, shape) @test_util.run_deprecated_v1 def test_tensor_array_reads(self): shape = tensor_shape.TensorShape([2]) ta = _create_tensor_array(4, shape) fn_true = lambda: ta.read(0) fn_false = lambda: ta.read(1) self._testShape(fn_true, fn_false, shape) @test_util.run_v1_only("b/138741991") def test_list(self): shape = [ tensor_shape.TensorShape([]), tensor_shape.TensorShape([]), tensor_shape.TensorShape([]) ] fn_true = lambda: [constant_op.constant(1), 2, variables.Variable(3.0)] fn_false = lambda: [constant_op.constant(3), 4, variables.Variable(5.0)] self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, [1, 2, 3.0], [3, 4, 5.0]) @test_util.run_v1_only("Non-strict cond is only available in v1") def test_non_strict(self): shape = tensor_shape.TensorShape([]) fn_tensor = lambda: constant_op.constant(1) fn_list = lambda: [constant_op.constant(2)] fn_tuple = lambda: (constant_op.constant(3),) self._testShape(fn_tensor, fn_list, shape) self._testShape(fn_tensor, fn_tuple, shape) self._testShape(fn_list, fn_tuple, shape) self._testReturnValues(fn_tensor, fn_list, 1, 2) self._testReturnValues(fn_tensor, fn_tuple, 1, 3) self._testReturnValues(fn_list, fn_tuple, 2, 3) @test_util.run_v1_only("b/120553181") def test_singleton_strict(self): fn_tensor = lambda: constant_op.constant(1) fn_list = lambda: [constant_op.constant(2)] fn_tuple = lambda: (constant_op.constant(3),) with self.assertRaises(ValueError): tf_cond.cond( constant_op.constant(True), fn_tensor, fn_list, strict=True) with self.assertRaises(TypeError): tf_cond.cond( constant_op.constant(True), fn_list, fn_tuple, strict=True) with self.assertRaises(ValueError): control_flow_case.case([(constant_op.constant(True), fn_tensor)], fn_list, strict=True) with self.assertRaises(TypeError): control_flow_case.case([(constant_op.constant(True), fn_list)], fn_tuple, strict=True) @test_util.run_deprecated_v1 def test_singleton_list(self): shape = tensor_shape.TensorShape([]) fn_true = lambda: [constant_op.constant(1)] fn_false = lambda: [constant_op.constant(3)] # Non-strict cond is only available in v1 if not tf2.enabled(): self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, 1, 3) self._testShape(fn_true, fn_false, [shape], strict=True) self._testReturnValues(fn_true, fn_false, [1], [3], strict=True) @test_util.run_deprecated_v1 def test_singleton_tuple(self): shape = tensor_shape.TensorShape([]) fn_true = lambda: (constant_op.constant(1),) fn_false = lambda: (constant_op.constant(3),) # Non-strict cond is only available in v1 if not tf2.enabled(): self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, 1, 3) self._testShape(fn_true, fn_false, (shape,), strict=True) self._testReturnValues(fn_true, fn_false, (1,), (3,), strict=True) @test_util.run_deprecated_v1 def test_singleton_namedtuple(self): shape = tensor_shape.TensorShape([]) fn_true = lambda: SingletonTestTuple(constant_op.constant(1)) fn_false = lambda: SingletonTestTuple(constant_op.constant(3)) # Non-strict cond is only available in v1 if not tf2.enabled(): self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, 1, 3) self._testShape(fn_true, fn_false, SingletonTestTuple(shape), strict=True) self._testReturnValues( fn_true, fn_false, SingletonTestTuple(1), SingletonTestTuple(3), strict=True) @test_util.run_deprecated_v1 def test_tuple(self): shape = (tensor_shape.TensorShape([]), tensor_shape.TensorShape([])) fn_true = lambda: (constant_op.constant(1), 2) fn_false = lambda: (constant_op.constant(3), 4) self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, (1, 2), (3, 4)) @test_util.run_deprecated_v1 def test_namedtuple(self): shape = TestTuple( tensor_shape.TensorShape([]), tensor_shape.TensorShape([])) fn_true = lambda: TestTuple(constant_op.constant(1), 2) fn_false = lambda: TestTuple(constant_op.constant(3), 4) self._testShape(fn_true, fn_false, shape) self._testReturnValues(fn_true, fn_false, TestTuple(1, 2), TestTuple(3, 4)) @test_util.run_deprecated_v1 def test_nested(self): shape = [ tensor_shape.TensorShape([]), TestTuple( tensor_shape.TensorShape([]), [tensor_shape.TensorShape([]), tensor_shape.TensorShape([])]), tensor_shape.TensorShape([5, 5]), tensor_shape.TensorShape([]) ] def true_fn(): return [ constant_op.constant(1), TestTuple(constant_op.constant(2), [3, 4]), array_ops.zeros([5, 5]), 6 ] def false_fn(): return [ constant_op.constant(11), TestTuple(constant_op.constant(12), [13, 14]), array_ops.ones([5, 5]), 16 ] self._testShape(true_fn, false_fn, shape) self._testReturnValues( true_fn, false_fn, [1, TestTuple(2, [3, 4]), np.zeros([5, 5]), 6], [11, TestTuple(12, [13, 14]), np.ones([5, 5]), 16]) @test_util.run_deprecated_v1 def test_cond_inside_while_loop(self): def body(i, matrix): result_tuple, unused_matrix = tf_cond.cond( constant_op.constant(True), lambda: (TestTuple(matrix * 2, matrix * 4), matrix), lambda: (TestTuple(matrix * 4, matrix * 2), matrix)) return [i + 1, result_tuple.a] iteration, matrix = while_loop.while_loop( lambda i, matrix: i < 10, body, loop_vars=[constant_op.constant(0), array_ops.ones([2, 2])]) self.assertEqual(iteration.get_shape(), tensor_shape.TensorShape([])) self.assertEqual(matrix.get_shape(), tensor_shape.TensorShape([2, 2])) @test_util.run_all_in_graph_and_eager_modes
DataTypesTest
python
getsentry__sentry
src/sentry/integrations/messaging/metrics.py
{ "start": 3327, "end": 3477 }
class ____(StrEnum): """Common reasons why a messaging interaction may fail.""" MISSING_ACTION = "missing_action"
MessageInteractionFailureReason
python
spack__spack
lib/spack/spack/test/llnl/util/lock.py
{ "start": 20302, "end": 26203 }
class ____: def __init__(self, lock_path): self.lock_path = lock_path def p1(self, barrier): lock = lk.Lock(self.lock_path) lock.acquire_write() barrier.wait() # ---------------------------------------- 1 # others test timeout barrier.wait() # ---------------------------------------- 2 lock.release_write() # release and others acquire read barrier.wait() # ---------------------------------------- 3 with pytest.raises(lk.LockTimeoutError): lock.acquire_write(lock_fail_timeout) lock.acquire_read() barrier.wait() # ---------------------------------------- 4 lock.release_read() barrier.wait() # ---------------------------------------- 5 # p2 upgrades read to write barrier.wait() # ---------------------------------------- 6 with pytest.raises(lk.LockTimeoutError): lock.acquire_write(lock_fail_timeout) with pytest.raises(lk.LockTimeoutError): lock.acquire_read(lock_fail_timeout) barrier.wait() # ---------------------------------------- 7 # p2 releases write and read barrier.wait() # ---------------------------------------- 8 # p3 acquires read barrier.wait() # ---------------------------------------- 9 # p3 upgrades read to write barrier.wait() # ---------------------------------------- 10 with pytest.raises(lk.LockTimeoutError): lock.acquire_write(lock_fail_timeout) with pytest.raises(lk.LockTimeoutError): lock.acquire_read(lock_fail_timeout) barrier.wait() # ---------------------------------------- 11 # p3 releases locks barrier.wait() # ---------------------------------------- 12 lock.acquire_read() barrier.wait() # ---------------------------------------- 13 lock.release_read() def p2(self, barrier): lock = lk.Lock(self.lock_path) # p1 acquires write barrier.wait() # ---------------------------------------- 1 with pytest.raises(lk.LockTimeoutError): lock.acquire_write(lock_fail_timeout) with pytest.raises(lk.LockTimeoutError): lock.acquire_read(lock_fail_timeout) barrier.wait() # ---------------------------------------- 2 lock.acquire_read() barrier.wait() # ---------------------------------------- 3 # p1 tests shared read barrier.wait() # ---------------------------------------- 4 # others release reads barrier.wait() # ---------------------------------------- 5 lock.acquire_write() # upgrade read to write barrier.wait() # ---------------------------------------- 6 # others test timeout barrier.wait() # ---------------------------------------- 7 lock.release_write() # release read AND write (need both) lock.release_read() barrier.wait() # ---------------------------------------- 8 # p3 acquires read barrier.wait() # ---------------------------------------- 9 # p3 upgrades read to write barrier.wait() # ---------------------------------------- 10 with pytest.raises(lk.LockTimeoutError): lock.acquire_write(lock_fail_timeout) with pytest.raises(lk.LockTimeoutError): lock.acquire_read(lock_fail_timeout) barrier.wait() # ---------------------------------------- 11 # p3 releases locks barrier.wait() # ---------------------------------------- 12 lock.acquire_read() barrier.wait() # ---------------------------------------- 13 lock.release_read() def p3(self, barrier): lock = lk.Lock(self.lock_path) # p1 acquires write barrier.wait() # ---------------------------------------- 1 with pytest.raises(lk.LockTimeoutError): lock.acquire_write(lock_fail_timeout) with pytest.raises(lk.LockTimeoutError): lock.acquire_read(lock_fail_timeout) barrier.wait() # ---------------------------------------- 2 lock.acquire_read() barrier.wait() # ---------------------------------------- 3 # p1 tests shared read barrier.wait() # ---------------------------------------- 4 lock.release_read() barrier.wait() # ---------------------------------------- 5 # p2 upgrades read to write barrier.wait() # ---------------------------------------- 6 with pytest.raises(lk.LockTimeoutError): lock.acquire_write(lock_fail_timeout) with pytest.raises(lk.LockTimeoutError): lock.acquire_read(lock_fail_timeout) barrier.wait() # ---------------------------------------- 7 # p2 releases write & read barrier.wait() # ---------------------------------------- 8 lock.acquire_read() barrier.wait() # ---------------------------------------- 9 lock.acquire_write() barrier.wait() # ---------------------------------------- 10 # others test timeout barrier.wait() # ---------------------------------------- 11 lock.release_read() # release read AND write in opposite lock.release_write() # order from before on p2 barrier.wait() # ---------------------------------------- 12 lock.acquire_read() barrier.wait() # ---------------------------------------- 13 lock.release_read() # # Longer test case that ensures locks are reusable. Ordering is # enforced by barriers throughout -- steps are shown with numbers. # def test_complex_acquire_and_release_chain(lock_path): test_chain = ComplexAcquireAndRelease(lock_path) multiproc_test(test_chain.p1, test_chain.p2, test_chain.p3)
ComplexAcquireAndRelease
python
apache__airflow
providers/openai/src/airflow/providers/openai/operators/openai.py
{ "start": 1343, "end": 3468 }
class ____(BaseOperator): """ Operator that accepts input text to generate OpenAI embeddings using the specified model. :param conn_id: The OpenAI connection ID to use. :param input_text: The text to generate OpenAI embeddings for. This can be a string, a list of strings, a list of integers, or a list of lists of integers. :param model: The OpenAI model to be used for generating the embeddings. :param embedding_kwargs: Additional keyword arguments to pass to the OpenAI `create_embeddings` method. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:OpenAIEmbeddingOperator` For possible options for `embedding_kwargs`, see: https://platform.openai.com/docs/api-reference/embeddings/create """ template_fields: Sequence[str] = ("input_text",) def __init__( self, conn_id: str, input_text: str | list[str] | list[int] | list[list[int]], model: str = "text-embedding-ada-002", embedding_kwargs: dict | None = None, **kwargs: Any, ): super().__init__(**kwargs) self.conn_id = conn_id self.input_text = input_text self.model = model self.embedding_kwargs = embedding_kwargs or {} @cached_property def hook(self) -> OpenAIHook: """Return an instance of the OpenAIHook.""" return OpenAIHook(conn_id=self.conn_id) def execute(self, context: Context) -> list[float]: if not self.input_text or not isinstance(self.input_text, (str, list)): raise ValueError( "The 'input_text' must be a non-empty string, list of strings, list of integers, or list of lists of integers." ) self.log.info("Generating embeddings for the input text of length: %d", len(self.input_text)) embeddings = self.hook.create_embeddings(self.input_text, model=self.model, **self.embedding_kwargs) self.log.info("Generated embeddings for %d items", len(embeddings)) return embeddings
OpenAIEmbeddingOperator
python
ray-project__ray
python/ray/_private/thirdparty/pynvml/pynvml.py
{ "start": 54787, "end": 54952 }
class ____(_PrintableStructure): _fields_ = [ ('fans', c_nvmlUnitFanInfo_t * 24), ('count', c_uint) ] ## Device structures
c_nvmlUnitFanSpeeds_t
python
pytorch__pytorch
test/inductor/test_group_batch_fusion.py
{ "start": 3695, "end": 5243 }
class ____(torch.nn.Module): def __init__(self, device, has_weight=True, has_bias=True): super().__init__() self.device = device self.scale0 = torch.nn.ParameterList( [torch.nn.Parameter(torch.randn(10)) for _ in range(5)] ).to(self.device) self.bias0 = torch.nn.ParameterList( [torch.nn.Parameter(torch.randn(10)) for _ in range(5)] ).to(self.device) self.scale1 = ( torch.nn.ParameterList( [torch.nn.Parameter(torch.randn(5, 10)) for _ in range(5)] ).to(self.device) if has_weight else [None for _ in range(5)] ) self.bias1 = ( torch.nn.ParameterList( [torch.nn.Parameter(torch.randn(5, 10)) for _ in range(5)] ).to(self.device) if has_bias else [None for _ in range(5)] ) def forward(self, x): l1_out = torch.split(x.to(self.device), 10, dim=2) post_l1 = [ torch.nn.functional.layer_norm( l1_out[i], (10,), weight=self.scale0[i], bias=self.bias0[i] ) for i in range(len(l1_out)) ] l1_out = torch.cat(post_l1, dim=2) l2_out = torch.split(l1_out, 10, dim=2) post_l2 = [ torch.nn.functional.layer_norm( l2_out[i], (5, 10), weight=self.scale1[i], bias=self.bias1[i] ) for i in range(len(l2_out)) ] return torch.cat(post_l2, dim=2)
MyModule3
python
encode__django-rest-framework
tests/test_fields.py
{ "start": 87644, "end": 91639 }
class ____: @pytest.mark.parametrize('use_list', (False, True)) def test_validationerror_code_with_msg(self, use_list): class ExampleSerializer(serializers.Serializer): password = serializers.CharField() def validate_password(self, obj): err = DjangoValidationError( 'exc_msg %s', code='exc_code', params=('exc_param',), ) if use_list: err = DjangoValidationError([err]) raise err serializer = ExampleSerializer(data={'password': 123}) serializer.is_valid() assert serializer.errors == {'password': ['exc_msg exc_param']} assert serializer.errors['password'][0].code == 'exc_code' @pytest.mark.parametrize('use_list', (False, True)) def test_validationerror_code_with_msg_including_percent(self, use_list): class ExampleSerializer(serializers.Serializer): password = serializers.CharField() def validate_password(self, obj): err = DjangoValidationError('exc_msg with %', code='exc_code') if use_list: err = DjangoValidationError([err]) raise err serializer = ExampleSerializer(data={'password': 123}) serializer.is_valid() assert serializer.errors == {'password': ['exc_msg with %']} assert serializer.errors['password'][0].code == 'exc_code' @pytest.mark.parametrize('code', (None, 'exc_code',)) @pytest.mark.parametrize('use_list', (False, True)) def test_validationerror_code_with_dict(self, use_list, code): class ExampleSerializer(serializers.Serializer): def validate(self, obj): if code is None: err = DjangoValidationError({ 'email': 'email error', }) else: err = DjangoValidationError({ 'email': DjangoValidationError( 'email error', code=code), }) if use_list: err = DjangoValidationError([err]) raise err serializer = ExampleSerializer(data={}) serializer.is_valid() expected_code = code if code else 'invalid' if use_list: assert serializer.errors == { 'non_field_errors': [ exceptions.ErrorDetail( string='email error', code=expected_code ) ] } else: assert serializer.errors == { 'email': ['email error'], } assert serializer.errors['email'][0].code == expected_code @pytest.mark.parametrize('code', (None, 'exc_code',)) def test_validationerror_code_with_dict_list_same_code(self, code): class ExampleSerializer(serializers.Serializer): def validate(self, obj): if code is None: raise DjangoValidationError({'email': ['email error 1', 'email error 2']}) raise DjangoValidationError({'email': [ DjangoValidationError('email error 1', code=code), DjangoValidationError('email error 2', code=code), ]}) serializer = ExampleSerializer(data={}) serializer.is_valid() expected_code = code if code else 'invalid' assert serializer.errors == { 'email': [ exceptions.ErrorDetail( string='email error 1', code=expected_code ), exceptions.ErrorDetail( string='email error 2', code=expected_code ), ] }
TestValidationErrorCode
python
ray-project__ray
python/ray/autoscaler/_private/aws/cloudwatch/cloudwatch_helper.py
{ "start": 779, "end": 32701 }
class ____: def __init__( self, provider_config: Dict[str, Any], node_id: str, cluster_name: str ) -> None: self.node_id = node_id self.cluster_name = cluster_name self.provider_config = provider_config region = provider_config["region"] self.ec2_resource = resource_cache("ec2", region) self.ec2_client = self.ec2_resource.meta.client self.ssm_client = client_cache("ssm", region) cloudwatch_resource = resource_cache("cloudwatch", region) self.cloudwatch_client = cloudwatch_resource.meta.client self.CLOUDWATCH_CONFIG_TYPE_TO_CONFIG_VARIABLE_REPLACE_FUNC: Dict[ str, Callable ] = { CloudwatchConfigType.AGENT.value: self._replace_cwa_config_vars, CloudwatchConfigType.DASHBOARD.value: self._replace_dashboard_config_vars, CloudwatchConfigType.ALARM.value: self._load_config_file, } self.CLOUDWATCH_CONFIG_TYPE_TO_UPDATE_FUNC_HEAD_NODE: Dict[str, Callable] = { CloudwatchConfigType.AGENT.value: self._restart_cloudwatch_agent, CloudwatchConfigType.DASHBOARD.value: self._put_cloudwatch_dashboard, CloudwatchConfigType.ALARM.value: self._put_cloudwatch_alarm, } self.CLOUDWATCH_CONFIG_TYPE_TO_UPDATE_FUNC_WORKER_NODE: Dict[str, Callable] = { CloudwatchConfigType.AGENT.value: self._restart_cloudwatch_agent, CloudwatchConfigType.ALARM.value: self._put_cloudwatch_alarm, } def update_from_config(self, is_head_node: bool) -> None: """Discovers and applies CloudWatch config updates as required. Args: is_head_node: whether this node is the head node. """ for config_type in CloudwatchConfigType: if CloudwatchHelper.cloudwatch_config_exists( self.provider_config, config_type.value ): self._update_cloudwatch_config(config_type.value, is_head_node) def _ec2_health_check_waiter(self, node_id: str) -> None: # wait for all EC2 instance checks to complete try: logger.info( "Waiting for EC2 instance health checks to complete before " "configuring Unified Cloudwatch Agent. This may take a few " "minutes..." ) waiter = self.ec2_client.get_waiter("instance_status_ok") waiter.wait(InstanceIds=[node_id]) except botocore.exceptions.WaiterError as e: logger.error( "Failed while waiting for EC2 instance checks to complete: {}".format( e.message ) ) raise e def _update_cloudwatch_config(self, config_type: str, is_head_node: bool) -> None: """ check whether update operations are needed in cloudwatch related configs """ cwa_installed = self._setup_cwa() param_name = self._get_ssm_param_name(config_type) if cwa_installed: if is_head_node: cw_config_ssm = self._set_cloudwatch_ssm_config_param( param_name, config_type ) cur_cw_config_hash = self._sha1_hash_file(config_type) ssm_cw_config_hash = self._sha1_hash_json(cw_config_ssm) # check if user updated cloudwatch related config files. # if so, perform corresponding actions. if cur_cw_config_hash != ssm_cw_config_hash: logger.info( "Cloudwatch {} config file has changed.".format(config_type) ) self._upload_config_to_ssm_and_set_hash_tag(config_type) self.CLOUDWATCH_CONFIG_TYPE_TO_UPDATE_FUNC_HEAD_NODE.get( config_type )() else: head_node_hash = self._get_head_node_config_hash(config_type) cur_node_hash = self._get_cur_node_config_hash(config_type) if head_node_hash != cur_node_hash: logger.info( "Cloudwatch {} config file has changed.".format(config_type) ) update_func = ( self.CLOUDWATCH_CONFIG_TYPE_TO_UPDATE_FUNC_WORKER_NODE.get( config_type ) ) if update_func: update_func() self._update_cloudwatch_hash_tag_value( self.node_id, head_node_hash, config_type ) def _put_cloudwatch_dashboard(self) -> Dict[str, Any]: """put dashboard to cloudwatch console""" cloudwatch_config = self.provider_config["cloudwatch"] dashboard_config = cloudwatch_config.get("dashboard", {}) dashboard_name_cluster = dashboard_config.get("name", self.cluster_name) dashboard_name = self.cluster_name + "-" + dashboard_name_cluster widgets = self._replace_dashboard_config_vars( CloudwatchConfigType.DASHBOARD.value ) response = self.cloudwatch_client.put_dashboard( DashboardName=dashboard_name, DashboardBody=json.dumps({"widgets": widgets}) ) issue_count = len(response.get("DashboardValidationMessages", [])) if issue_count > 0: for issue in response.get("DashboardValidationMessages"): logging.error( "Error in dashboard config: {} - {}".format( issue["Message"], issue["DataPath"] ) ) raise Exception( "Errors in dashboard configuration: {} issues raised".format( issue_count ) ) else: logger.info("Successfully put dashboard to CloudWatch console") return response def _put_cloudwatch_alarm(self) -> None: """put CloudWatch metric alarms read from config""" param_name = self._get_ssm_param_name(CloudwatchConfigType.ALARM.value) data = json.loads(self._get_ssm_param(param_name)) for item in data: item_out = copy.deepcopy(item) self._replace_all_config_variables( item_out, self.node_id, self.cluster_name, self.provider_config["region"], ) self.cloudwatch_client.put_metric_alarm(**item_out) logger.info("Successfully put alarms to CloudWatch console") def _send_command_to_node( self, document_name: str, parameters: Dict[str, List[str]], node_id: str ) -> Dict[str, Any]: """send SSM command to the given nodes""" logger.debug( "Sending SSM command to {} node(s). Document name: {}. " "Parameters: {}.".format(node_id, document_name, parameters) ) response = self.ssm_client.send_command( InstanceIds=[node_id], DocumentName=document_name, Parameters=parameters, MaxConcurrency="1", MaxErrors="0", ) return response def _ssm_command_waiter( self, document_name: str, parameters: Dict[str, List[str]], node_id: str, retry_failed: bool = True, ) -> Dict[str, Any]: """wait for SSM command to complete on all cluster nodes""" # This waiter differs from the built-in SSM.Waiter by # optimistically waiting for the command invocation to # exist instead of failing immediately, and by resubmitting # any failed command until all retry attempts are exhausted # by default. response = self._send_command_to_node(document_name, parameters, node_id) command_id = response["Command"]["CommandId"] cloudwatch_config = self.provider_config["cloudwatch"] agent_retryer_config = cloudwatch_config.get( CloudwatchConfigType.AGENT.value ).get("retryer", {}) max_attempts = agent_retryer_config.get("max_attempts", 120) delay_seconds = agent_retryer_config.get("delay_seconds", 30) num_attempts = 0 cmd_invocation_res = {} while True: num_attempts += 1 logger.debug( "Listing SSM command ID {} invocations on node {}".format( command_id, node_id ) ) response = self.ssm_client.list_command_invocations( CommandId=command_id, InstanceId=node_id, ) cmd_invocations = response["CommandInvocations"] if not cmd_invocations: logger.debug( "SSM Command ID {} invocation does not exist. If " "the command was just started, it may take a " "few seconds to register.".format(command_id) ) else: if len(cmd_invocations) > 1: logger.warning( "Expected to find 1 SSM command invocation with " "ID {} on node {} but found {}: {}".format( command_id, node_id, len(cmd_invocations), cmd_invocations, ) ) cmd_invocation = cmd_invocations[0] if cmd_invocation["Status"] == "Success": logger.debug( "SSM Command ID {} completed successfully.".format(command_id) ) cmd_invocation_res[node_id] = True break if num_attempts >= max_attempts: logger.error( "Max attempts for command {} exceeded on node {}".format( command_id, node_id ) ) raise botocore.exceptions.WaiterError( name="ssm_waiter", reason="Max attempts exceeded", last_response=cmd_invocation, ) if cmd_invocation["Status"] == "Failed": logger.debug(f"SSM Command ID {command_id} failed.") if retry_failed: logger.debug(f"Retrying in {delay_seconds} seconds.") response = self._send_command_to_node( document_name, parameters, node_id ) command_id = response["Command"]["CommandId"] logger.debug( "Sent SSM command ID {} to node {}".format( command_id, node_id ) ) else: logger.debug(f"Ignoring Command ID {command_id} failure.") cmd_invocation_res[node_id] = False break time.sleep(delay_seconds) return cmd_invocation_res def _replace_config_variables( self, string: str, node_id: str, cluster_name: str, region: str ) -> str: """ replace known config variable occurrences in the input string does not replace variables with undefined or empty strings """ if node_id: string = string.replace("{instance_id}", node_id) if cluster_name: string = string.replace("{cluster_name}", cluster_name) if region: string = string.replace("{region}", region) return string def _replace_all_config_variables( self, collection: Union[Dict[str, Any], str], node_id: str, cluster_name: str, region: str, ) -> Union[str, Dict[str, Any]]: """ Replace known config variable occurrences in the input collection. The input collection must be either a dict or list. Returns a tuple consisting of the output collection and the number of modified strings in the collection (which is not necessarily equal to the number of variables replaced). """ for key in collection: if type(collection) is dict: value = collection.get(key) index_key = key elif type(collection) is list: value = key index_key = collection.index(key) else: raise ValueError( f"Can't replace CloudWatch config variables " f"in unsupported collection type: {type(collection)}." f"Please check your CloudWatch JSON config files." ) if type(value) is str: collection[index_key] = self._replace_config_variables( value, node_id, cluster_name, region ) elif type(value) is dict or type(value) is list: collection[index_key] = self._replace_all_config_variables( value, node_id, cluster_name, region ) return collection def _load_config_file(self, config_type: str) -> Dict[str, Any]: """load JSON config file""" cloudwatch_config = self.provider_config["cloudwatch"] json_config_file_section = cloudwatch_config.get(config_type, {}) json_config_file_path = json_config_file_section.get("config", {}) json_config_path = os.path.abspath(json_config_file_path) with open(json_config_path) as f: data = json.load(f) return data def _set_cloudwatch_ssm_config_param( self, parameter_name: str, config_type: str ) -> str: """ get cloudwatch config for the given param and config type from SSM if it exists, put it in the SSM param store if not """ try: parameter_value = self._get_ssm_param(parameter_name) except botocore.exceptions.ClientError as e: if e.response["Error"]["Code"] == "ParameterNotFound": logger.info( "Cloudwatch {} config file is not found " "at SSM parameter store. " "Checking for Unified CloudWatch Agent installation".format( config_type ) ) return self._get_default_empty_config_file_hash() else: logger.info( "Failed to fetch Unified CloudWatch Agent config from SSM " "parameter store." ) logger.error(e) raise e return parameter_value def _get_default_empty_config_file_hash(self): default_cw_config = "{}" parameter_value = self._sha1_hash_json(default_cw_config) return parameter_value def _get_ssm_param(self, parameter_name: str) -> str: """ get the SSM parameter value associated with the given parameter name """ response = self.ssm_client.get_parameter(Name=parameter_name) logger.info("Successfully fetch ssm parameter: {}".format(parameter_name)) res = response.get("Parameter", {}) cwa_parameter = res.get("Value", {}) return cwa_parameter def _sha1_hash_json(self, value: str) -> str: """calculate the json string sha1 hash""" sha1_hash = hashlib.new("sha1") binary_value = value.encode("ascii") sha1_hash.update(binary_value) sha1_res = sha1_hash.hexdigest() return sha1_res def _sha1_hash_file(self, config_type: str) -> str: """calculate the config file sha1 hash""" config = self.CLOUDWATCH_CONFIG_TYPE_TO_CONFIG_VARIABLE_REPLACE_FUNC.get( config_type )(config_type) value = json.dumps(config) sha1_res = self._sha1_hash_json(value) return sha1_res def _upload_config_to_ssm_and_set_hash_tag(self, config_type: str): data = self.CLOUDWATCH_CONFIG_TYPE_TO_CONFIG_VARIABLE_REPLACE_FUNC.get( config_type )(config_type) sha1_hash_value = self._sha1_hash_file(config_type) self._upload_config_to_ssm(data, config_type) self._update_cloudwatch_hash_tag_value( self.node_id, sha1_hash_value, config_type ) def _add_cwa_installed_tag(self, node_id: str) -> None: self.ec2_client.create_tags( Resources=[node_id], Tags=[{"Key": CLOUDWATCH_AGENT_INSTALLED_TAG, "Value": "True"}], ) logger.info( "Successfully add Unified CloudWatch Agent installed " "tag on {}".format(node_id) ) def _update_cloudwatch_hash_tag_value( self, node_id: str, sha1_hash_value: str, config_type: str ): hash_key_value = "-".join([CLOUDWATCH_CONFIG_HASH_TAG_BASE, config_type]) self.ec2_client.create_tags( Resources=[node_id], Tags=[{"Key": hash_key_value, "Value": sha1_hash_value}], ) logger.info( "Successfully update cloudwatch {} hash tag on {}".format( config_type, node_id ) ) def _get_ssm_param_name(self, config_type: str) -> str: """return the parameter name for cloudwatch configs""" ssm_config_param_name = "AmazonCloudWatch-" + "ray_{}_config_{}".format( config_type, self.cluster_name ) return ssm_config_param_name def _put_ssm_param(self, parameter: Dict[str, Any], parameter_name: str) -> None: """upload cloudwatch config to the SSM parameter store""" self.ssm_client.put_parameter( Name=parameter_name, Type="String", Value=json.dumps(parameter), Overwrite=True, Tier="Intelligent-Tiering", ) def _upload_config_to_ssm(self, param: Dict[str, Any], config_type: str): param_name = self._get_ssm_param_name(config_type) self._put_ssm_param(param, param_name) def _replace_cwa_config_vars(self, config_type: str) -> Dict[str, Any]: """ replace {instance_id}, {region}, {cluster_name} variable occurrences in Unified Cloudwatch Agent config file """ cwa_config = self._load_config_file(config_type) self._replace_all_config_variables( cwa_config, self.node_id, self.cluster_name, self.provider_config["region"], ) return cwa_config def _replace_dashboard_config_vars(self, config_type: str) -> List[str]: """ replace known variable occurrences in CloudWatch Dashboard config file """ data = self._load_config_file(config_type) widgets = [] for item in data: item_out = self._replace_all_config_variables( item, self.node_id, self.cluster_name, self.provider_config["region"], ) widgets.append(item_out) return widgets def _replace_alarm_config_vars(self, config_type: str) -> List[str]: """ replace {instance_id}, {region}, {cluster_name} variable occurrences in cloudwatch alarm config file """ data = self._load_config_file(config_type) param_data = [] for item in data: item_out = copy.deepcopy(item) self._replace_all_config_variables( item_out, self.node_id, self.cluster_name, self.provider_config["region"], ) param_data.append(item_out) return param_data def _restart_cloudwatch_agent(self) -> None: """restart Unified CloudWatch Agent""" cwa_param_name = self._get_ssm_param_name(CloudwatchConfigType.AGENT.value) logger.info( "Restarting Unified CloudWatch Agent package on node {}.".format( self.node_id ) ) self._stop_cloudwatch_agent() self._start_cloudwatch_agent(cwa_param_name) def _stop_cloudwatch_agent(self) -> None: """stop Unified CloudWatch Agent""" logger.info( "Stopping Unified CloudWatch Agent package on node {}.".format(self.node_id) ) parameters_stop_cwa = { "action": ["stop"], "mode": ["ec2"], } # don't retry failed stop commands # (there's not always an agent to stop) self._ssm_command_waiter( "AmazonCloudWatch-ManageAgent", parameters_stop_cwa, self.node_id, False, ) logger.info("Unified CloudWatch Agent stopped on node {}.".format(self.node_id)) def _start_cloudwatch_agent(self, cwa_param_name: str) -> None: """start Unified CloudWatch Agent""" logger.info( "Starting Unified CloudWatch Agent package on node {}.".format(self.node_id) ) parameters_start_cwa = { "action": ["configure"], "mode": ["ec2"], "optionalConfigurationSource": ["ssm"], "optionalConfigurationLocation": [cwa_param_name], "optionalRestart": ["yes"], } self._ssm_command_waiter( "AmazonCloudWatch-ManageAgent", parameters_start_cwa, self.node_id ) logger.info( "Unified CloudWatch Agent started successfully on node {}.".format( self.node_id ) ) def _setup_cwa(self) -> bool: cwa_installed = self._check_cwa_installed_ec2_tag() if cwa_installed == "False": res_cwa_installed = self._ensure_cwa_installed_ssm(self.node_id) return res_cwa_installed else: return True def _get_head_node_config_hash(self, config_type: str) -> str: hash_key_value = "-".join([CLOUDWATCH_CONFIG_HASH_TAG_BASE, config_type]) filters = copy.deepcopy( self._get_current_cluster_session_nodes(self.cluster_name) ) filters.append( { "Name": "tag:{}".format(TAG_RAY_NODE_KIND), "Values": [NODE_KIND_HEAD], } ) try: instance = list(self.ec2_resource.instances.filter(Filters=filters)) assert len(instance) == 1, "More than 1 head node found!" for tag in instance[0].tags: if tag["Key"] == hash_key_value: return tag["Value"] except botocore.exceptions.ClientError as e: logger.warning( "{} Error caught when getting value of {} tag on head node".format( e.response["Error"], hash_key_value ) ) def _get_cur_node_config_hash(self, config_type: str) -> str: hash_key_value = "-".join([CLOUDWATCH_CONFIG_HASH_TAG_BASE, config_type]) try: response = self.ec2_client.describe_instances(InstanceIds=[self.node_id]) reservations = response["Reservations"] message = "More than 1 response received from describing current node" assert len(reservations) == 1, message instances = reservations[0]["Instances"] assert len(reservations) == 1, message tags = instances[0]["Tags"] hash_value = self._get_default_empty_config_file_hash() for tag in tags: if tag["Key"] == hash_key_value: logger.info( "Successfully get cloudwatch {} hash tag value from " "node {}".format(config_type, self.node_id) ) hash_value = tag["Value"] return hash_value except botocore.exceptions.ClientError as e: logger.warning( "{} Error caught when getting hash tag {} tag".format( e.response["Error"], hash_key_value ) ) def _ensure_cwa_installed_ssm(self, node_id: str) -> bool: """ Check if Unified Cloudwatch Agent is installed via ssm run command. If not, notify user to use an AMI with the Unified CloudWatch Agent installed. """ logger.info( "Checking Unified Cloudwatch Agent status on node {}".format(node_id) ) parameters_status_cwa = { "action": ["status"], "mode": ["ec2"], } self._ec2_health_check_waiter(node_id) cmd_invocation_res = self._ssm_command_waiter( "AmazonCloudWatch-ManageAgent", parameters_status_cwa, node_id, False ) cwa_installed = cmd_invocation_res.get(node_id, False) if not cwa_installed: logger.warning( "Unified CloudWatch Agent not installed on {}. " "Ray logs, metrics not picked up. " "Please use an AMI with Unified CloudWatch Agent installed.".format( node_id ) ) return False else: return True def _get_current_cluster_session_nodes(self, cluster_name: str) -> List[dict]: filters = [ { "Name": "instance-state-name", "Values": ["pending", "running"], }, { "Name": "tag:{}".format(TAG_RAY_CLUSTER_NAME), "Values": [cluster_name], }, ] return filters def _check_cwa_installed_ec2_tag(self) -> List[str]: """ Filtering all nodes to get nodes without Unified CloudWatch Agent installed """ try: response = self.ec2_client.describe_instances(InstanceIds=[self.node_id]) reservations = response["Reservations"] message = "More than 1 response received from describing current node" assert len(reservations) == 1, message instances = reservations[0]["Instances"] assert len(instances) == 1, message tags = instances[0]["Tags"] cwa_installed = str(False) for tag in tags: if tag["Key"] == CLOUDWATCH_AGENT_INSTALLED_TAG: logger.info( "Unified CloudWatch Agent is installed on " "node {}".format(self.node_id) ) cwa_installed = tag["Value"] return cwa_installed except botocore.exceptions.ClientError as e: logger.warning( "{} Error caught when getting Unified CloudWatch Agent " "status based on {} tag".format( e.response["Error"], CLOUDWATCH_AGENT_INSTALLED_TAG ) ) @staticmethod def resolve_instance_profile_name( config: Dict[str, Any], default_instance_profile_name: str ) -> str: """Get default cloudwatch instance profile name. Args: config: provider section of cluster config file. default_instance_profile_name: default ray instance profile name. Returns: default cloudwatch instance profile name if cloudwatch config file exists. default ray instance profile name if cloudwatch config file doesn't exist. """ cwa_cfg_exists = CloudwatchHelper.cloudwatch_config_exists( config, CloudwatchConfigType.AGENT.value ) return ( CLOUDWATCH_RAY_INSTANCE_PROFILE if cwa_cfg_exists else default_instance_profile_name ) @staticmethod def resolve_iam_role_name( config: Dict[str, Any], default_iam_role_name: str ) -> str: """Get default cloudwatch iam role name. Args: config: provider section of cluster config file. default_iam_role_name: default ray iam role name. Returns: default cloudwatch iam role name if cloudwatch config file exists. default ray iam role name if cloudwatch config file doesn't exist. """ cwa_cfg_exists = CloudwatchHelper.cloudwatch_config_exists( config, CloudwatchConfigType.AGENT.value ) return CLOUDWATCH_RAY_IAM_ROLE if cwa_cfg_exists else default_iam_role_name @staticmethod def resolve_policy_arns( config: Dict[str, Any], iam: Any, default_policy_arns: List[str] ) -> List[str]: """Attach necessary AWS policies for CloudWatch related operations. Args: config: provider section of cluster config file. iam: AWS iam resource. default_policy_arns: List of default ray AWS policies. Returns: list of policy arns including additional policies for CloudWatch related operations if cloudwatch agent config is specifed in cluster config file. """ cwa_cfg_exists = CloudwatchHelper.cloudwatch_config_exists( config, CloudwatchConfigType.AGENT.value ) if cwa_cfg_exists: cloudwatch_managed_policy = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ssm:SendCommand", "ssm:ListCommandInvocations", "iam:PassRole", ], "Resource": "*", } ], } iam_client = iam.meta.client iam_client.create_policy( PolicyName="CloudwatchManagedPolicies", PolicyDocument=json.dumps(cloudwatch_managed_policy), ) sts_client = client_cache("sts", config["region"]) account_id = sts_client.get_caller_identity().get("Account") managed_policy_arn = ( "arn:aws:iam::{}:policy/CloudwatchManagedPolicies".format(account_id) ) policy_waiter = iam_client.get_waiter("policy_exists") policy_waiter.wait( PolicyArn=managed_policy_arn, WaiterConfig={"Delay": 2, "MaxAttempts": 200}, ) new_policy_arns = copy.copy(default_policy_arns) new_policy_arns.extend( [ "arn:aws:iam::aws:policy/CloudWatchAgentAdminPolicy", "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore", managed_policy_arn, ] ) return new_policy_arns else: return default_policy_arns @staticmethod def cloudwatch_config_exists(config: Dict[str, Any], config_type: str) -> bool: """Check if CloudWatch configuration was specified by the user in their cluster config file. Specifically, this function checks if a CloudWatch config file is specified by the user in their cluster config file. Args: config: provider section of cluster config file. config_type: type of CloudWatch config file. Returns: True if config file is specified by user. False if config file is not specified. """ cfg = config.get("cloudwatch", {}).get(config_type, {}).get("config") return bool(cfg)
CloudwatchHelper
python
doocs__leetcode
lcci/16.14.Best Line/Solution.py
{ "start": 0, "end": 591 }
class ____: def bestLine(self, points: List[List[int]]) -> List[int]: n = len(points) mx = 0 for i in range(n): x1, y1 = points[i] for j in range(i + 1, n): x2, y2 = points[j] cnt = 2 for k in range(j + 1, n): x3, y3 = points[k] a = (y2 - y1) * (x3 - x1) b = (y3 - y1) * (x2 - x1) cnt += a == b if mx < cnt: mx = cnt x, y = i, j return [x, y]
Solution
python
py-pdf__pypdf
pypdf/constants.py
{ "start": 10577, "end": 10779 }
class ____: Fields = "/Fields" NeedAppearances = "/NeedAppearances" SigFlags = "/SigFlags" CO = "/CO" DR = "/DR" DA = "/DA" Q = "/Q" XFA = "/XFA"
InteractiveFormDictEntries
python
dagster-io__dagster
python_modules/dagster/dagster/_utils/concurrency.py
{ "start": 1778, "end": 1845 }
class ____: run_id: str step_key: str @record
ClaimedSlotInfo
python
PyCQA__pylint
tests/functional/i/invalid/invalid_getnewargs/invalid_getnewargs_returned.py
{ "start": 1308, "end": 1447 }
class ____: """Potential uninferable return value""" def __getnewargs__(self): return tuple(Missing)
AnotherAmbiguousGetNewArgs
python
apache__airflow
airflow-core/src/airflow/api_fastapi/execution_api/datamodels/taskinstance.py
{ "start": 8940, "end": 9334 }
class ____(StrictBaseModel): """Schema for AssetEvent model used in DagRun.""" asset: AssetReferenceAssetEventDagRun extra: dict[str, JsonValue] source_task_id: str | None source_dag_id: str | None source_run_id: str | None source_map_index: int | None source_aliases: list[AssetAliasReferenceAssetEventDagRun] timestamp: UtcDateTime
AssetEventDagRunReference
python
dagster-io__dagster
python_modules/dagster/dagster/_config/pythonic_config/resource.py
{ "start": 5912, "end": 24202 }
class ____( Config, TypecheckAllowPartialResourceInitParams, Generic[TResValue], ABC, metaclass=BaseResourceMeta, ): """Base class for creating and managing the lifecycle of Dagster resources that utilize structured config. Users should directly inherit from this class when they want the object passed to user-defined code (such as an asset or op) to be different than the object that defines the configuration schema and is passed to the :py:class:`Definitions` object. Cases where this is useful include is when the object passed to user code is: * An existing class from a third-party library that the user does not control. * A complex class that requires substantial internal state management or itself requires arguments beyond its config values. * A class with expensive initialization that should not be invoked on project load, but rather lazily on first use in an op or asset during a run. * A class that you desire to be a plain Python class, rather than a Pydantic class, for whatever reason. This class is a subclass of both :py:class:`ResourceDefinition` and :py:class:`Config`, and must implement ``create_resource``, which creates the resource to pass to user code. Example definition: .. code-block:: python class DatabaseResource(ConfigurableResourceFactory[Database]): connection_uri: str def create_resource(self, _init_context) -> Database: # For example Database could be from a third-party library or require expensive setup. # Or you could just prefer to separate the concerns of configuration and runtime representation return Database(self.connection_uri) To use a resource created by a factory in a job, you must use the Resource type annotation. Example usage: .. code-block:: python @asset def asset_that_uses_database(database: ResourceParam[Database]): # Database used directly in user code database.query("SELECT * FROM table") defs = Definitions( assets=[asset_that_uses_database], resources={"database": DatabaseResource(connection_uri="some_uri")}, ) """ def __init__(self, **data: Any): resource_pointers, data_without_resources = separate_resource_params(self.__class__, data) schema = infer_schema_from_config_class( self.__class__, fields_to_omit=set(resource_pointers.keys()) ) # Populate config values super().__init__(**data_without_resources, **resource_pointers) # We pull the values from the Pydantic config object, which may cast values # to the correct type under the hood - useful in particular for enums casted_data_without_resources = { k: v for k, v in self._convert_to_config_dictionary().items() if k in data_without_resources } resolved_config_dict = config_dictionary_from_values(casted_data_without_resources, schema) self._state__internal__ = ConfigurableResourceFactoryState( # We keep track of any resources we depend on which are not fully configured # so that we can retrieve them at runtime nested_partial_resources={ k: v for k, v in resource_pointers.items() if (not _is_fully_configured(v)) }, resolved_config_dict=resolved_config_dict, # These are unfortunately named very similarily config_schema=_curry_config_schema(schema, resolved_config_dict), schema=schema, nested_resources={k: v for k, v in resource_pointers.items()}, resource_context=None, ) @property def _schema(self): return self._state__internal__.schema @property def _config_schema(self): return self._state__internal__.config_schema @property def _nested_partial_resources(self): return self._state__internal__.nested_partial_resources @property def _nested_resources(self): return self._state__internal__.nested_resources @property def _resolved_config_dict(self): return self._state__internal__.resolved_config_dict @classmethod def _is_dagster_maintained(cls) -> bool: """This should be overridden to return True by all dagster maintained resources and IO managers.""" return False @classmethod def _is_cm_resource_cls(cls: type["ConfigurableResourceFactory"]) -> bool: return ( cls.yield_for_execution != ConfigurableResourceFactory.yield_for_execution or cls.teardown_after_execution != ConfigurableResourceFactory.teardown_after_execution # We assume that any resource which has nested resources needs to be treated as a # context manager resource, since its nested resources may be context managers # and need setup and teardown logic or len(_get_resource_param_fields(cls)) > 0 ) @property def _is_cm_resource(self) -> bool: return self.__class__._is_cm_resource_cls() # noqa: SLF001 def _get_initialize_and_run_fn(self) -> Callable: return self._initialize_and_run_cm if self._is_cm_resource else self._initialize_and_run @cached_method # resource resolution depends on always resolving to the same ResourceDefinition instance def get_resource_definition(self) -> ConfigurableResourceFactoryResourceDefinition: return ConfigurableResourceFactoryResourceDefinition( self.__class__, resource_fn=self._get_initialize_and_run_fn(), config_schema=self._config_schema, description=self.__doc__, nested_resources=self.nested_resources, nested_partial_resources=self._nested_partial_resources, dagster_maintained=self._is_dagster_maintained(), ) @abstractmethod def create_resource(self, context: InitResourceContext) -> TResValue: """Returns the object that this resource hands to user code, accessible by ops or assets through the context or resource parameters. This works like the function decorated with @resource when using function-based resources. """ raise NotImplementedError() @property def nested_resources( self, ) -> Mapping[str, Any]: return self._nested_resources @classmethod def configure_at_launch(cls: "type[T_Self]", **kwargs) -> "PartialResource[T_Self]": """Returns a partially initialized copy of the resource, with remaining config fields set at runtime. """ return PartialResource(cls, data=kwargs) def _with_updated_values( self, values: Optional[Mapping[str, Any]] ) -> "ConfigurableResourceFactory[TResValue]": """Returns a new instance of the resource with the given values. Used when initializing a resource at runtime. """ values = check.opt_mapping_param(values, "values", key_type=str) # Since Resource extends BaseModel and is a dataclass, we know that the # signature of any __init__ method will always consist of the fields # of this class. We can therefore safely pass in the values as kwargs. to_populate = self.__class__._get_non_default_public_field_values_cls( # noqa: SLF001 {**self._get_non_default_public_field_values(), **values} ) out = self.__class__(**to_populate) out._state__internal__ = out._state__internal__._replace( # noqa: SLF001 resource_context=self._state__internal__.resource_context ) return out @contextlib.contextmanager def _resolve_and_update_nested_resources( self, context: InitResourceContext ) -> Generator["ConfigurableResourceFactory[TResValue]", None, None]: """Updates any nested resources with the resource values from the context. In this case, populating partially configured resources or resources that return plain Python types. Returns a new instance of the resource. """ from dagster._core.execution.build_resources import wrap_resource_for_execution partial_resources_to_update: dict[str, Any] = {} if self._nested_partial_resources: for attr_name, resource in self._nested_partial_resources.items(): key = _resolve_partial_resource_to_key( attr_name, resource, context.all_resource_defs ) resolved_resource = getattr(context.resources, key) partial_resources_to_update[attr_name] = resolved_resource # Also evaluate any resources that are not partial with contextlib.ExitStack() as stack: resources_to_update, _ = separate_resource_params(self.__class__, self.__dict__) resources_to_update = { attr_name: _call_resource_fn_with_default( stack, wrap_resource_for_execution(resource), context ) for attr_name, resource in resources_to_update.items() if attr_name not in partial_resources_to_update } to_update = {**resources_to_update, **partial_resources_to_update} yield self._with_updated_values(to_update) @deprecated( breaking_version="2.0", additional_warn_text="Use `with_replaced_resource_context` instead" ) def with_resource_context( self, resource_context: InitResourceContext ) -> "ConfigurableResourceFactory[TResValue]": return self.with_replaced_resource_context(resource_context) def with_replaced_resource_context( self, resource_context: InitResourceContext ) -> "ConfigurableResourceFactory[TResValue]": """Returns a new instance of the resource with the given resource init context bound.""" # This utility is used to create a copy of this resource, without adjusting # any values in this case copy = self._with_updated_values({}) copy._state__internal__ = copy._state__internal__._replace( # noqa: SLF001 resource_context=resource_context ) return copy def _initialize_and_run(self, context: InitResourceContext) -> TResValue: with self._resolve_and_update_nested_resources(context) as has_nested_resource: updated_resource = has_nested_resource.with_replaced_resource_context( # noqa: SLF001 context )._with_updated_values(context.resource_config) updated_resource.setup_for_execution(context) return updated_resource.create_resource(context) @contextlib.contextmanager def _async_to_sync_cm( self, context: InitResourceContext, async_cm: contextlib.AbstractAsyncContextManager, ): aio_exit_stack = contextlib.AsyncExitStack() loop = context.event_loop if loop is None: raise DagsterError( "Unable to handle resource with async def yield_for_execution in the current context. " "If using direct execution utilities like build_context, pass an event loop in and use " "the same event loop to execute your asset/op." ) try: value = loop.run_until_complete(aio_exit_stack.enter_async_context(async_cm)) yield value finally: loop.run_until_complete(aio_exit_stack.aclose()) @contextlib.contextmanager def _initialize_and_run_cm( self, context: InitResourceContext, ) -> Generator[TResValue, None, None]: with self._resolve_and_update_nested_resources(context) as has_nested_resource: updated_resource = has_nested_resource.with_replaced_resource_context( # noqa: SLF001 context )._with_updated_values(context.resource_config) resource_cm = updated_resource.yield_for_execution(context) if isinstance(resource_cm, contextlib.AbstractAsyncContextManager): resource_cm = self._async_to_sync_cm(context, resource_cm) with resource_cm as value: yield value def setup_for_execution(self, context: InitResourceContext) -> None: """Optionally override this method to perform any pre-execution steps needed before the resource is used in execution. """ pass def teardown_after_execution(self, context: InitResourceContext) -> None: """Optionally override this method to perform any post-execution steps needed after the resource is used in execution. teardown_after_execution will be called even if any part of the run fails. It will not be called if setup_for_execution fails. """ pass @contextlib.contextmanager def yield_for_execution(self, context: InitResourceContext) -> Generator[TResValue, None, None]: """Optionally override this method to perform any lifecycle steps before or after the resource is used in execution. By default, calls setup_for_execution before yielding, and teardown_after_execution after yielding. Note that if you override this method and want setup_for_execution or teardown_after_execution to be called, you must invoke them yourself. """ self.setup_for_execution(context) try: yield self.create_resource(context) finally: self.teardown_after_execution(context) def get_resource_context(self) -> InitResourceContext: """Returns the context that this resource was initialized with.""" return check.not_none( self._state__internal__.resource_context, additional_message="Attempted to get context before resource was initialized.", ) def process_config_and_initialize(self) -> TResValue: """Initializes this resource, fully processing its config and returning the prepared resource value. """ from dagster._config.post_process import post_process_config post_processed_config = post_process_config( self._config_schema.config_type, # pyright: ignore[reportArgumentType] self._convert_to_config_dictionary(), ) if not post_processed_config.success: raise DagsterInvalidConfigError( "Errors while initializing resource", post_processed_config.errors, post_processed_config, ) return self.from_resource_context( build_init_resource_context(config=post_processed_config.value), nested_resources=self.nested_resources, ) @contextlib.contextmanager def process_config_and_initialize_cm(self) -> Generator[TResValue, None, None]: """Context which initializes this resource, fully processing its config and yielding the prepared resource value. """ from dagster._config.post_process import post_process_config post_processed_config = post_process_config( self._config_schema.config_type, # pyright: ignore[reportArgumentType] self._convert_to_config_dictionary(), ) if not post_processed_config.success: raise DagsterInvalidConfigError( "Errors while initializing resource", post_processed_config.errors, post_processed_config, ) with ( build_init_resource_context(config=post_processed_config.value) as context, self.from_resource_context_cm( context, nested_resources=self.nested_resources, ) as out, ): yield out @classmethod def from_resource_context( cls, context: InitResourceContext, nested_resources: Optional[Mapping[str, Any]] = None ) -> TResValue: """Creates a new instance of this resource from a populated InitResourceContext. Useful when creating a resource from a function-based resource, for backwards compatibility purposes. For resources that have custom teardown behavior, use from_resource_context_cm instead. Example usage: .. code-block:: python class MyResource(ConfigurableResource): my_str: str @resource(config_schema=MyResource.to_config_schema()) def my_resource(context: InitResourceContext) -> MyResource: return MyResource.from_resource_context(context) """ check.invariant( not cls._is_cm_resource_cls(), "Use from_resource_context_cm for resources which have custom teardown behavior," " e.g. overriding yield_for_execution or teardown_after_execution", ) return cls( # noqa: SLF001 **{**(context.resource_config or {}), **(nested_resources or {})} )._initialize_and_run(context) @classmethod @contextlib.contextmanager def from_resource_context_cm( cls, context: InitResourceContext, nested_resources: Optional[Mapping[str, Any]] = None ) -> Generator[TResValue, None, None]: """Context which generates a new instance of this resource from a populated InitResourceContext. Useful when creating a resource from a function-based resource, for backwards compatibility purposes. Handles custom teardown behavior. Example usage: .. code-block:: python class MyResource(ConfigurableResource): my_str: str @resource(config_schema=MyResource.to_config_schema()) def my_resource(context: InitResourceContext) -> Generator[MyResource, None, None]: with MyResource.from_resource_context_cm(context) as my_resource: yield my_resource """ with cls( # noqa: SLF001 **{**(context.resource_config or {}), **(nested_resources or {})} )._initialize_and_run_cm(context) as value: yield value @public
ConfigurableResourceFactory
python
huggingface__transformers
src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
{ "start": 2292, "end": 4045 }
class ____(ModelOutput): r""" last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder. decoder_coords (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): The predicted coordinates of the objects. decoder_classes (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes)`): The predicted classes of the objects. encoder_coord_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): The predicted coordinates of the objects from the encoder. encoder_class_logits (`tuple[torch.FloatTensor]` of shape `(batch_size, num_queries, num_classes)`): The predicted class of the objects from the encoder. init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): The initial reference points. intermediate_reference_points (`tuple[tuple[torch.FloatTensor]]`): The intermediate reference points. """ last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None decoder_coords: Optional[torch.FloatTensor] = None decoder_classes: Optional[torch.FloatTensor] = None encoder_coord_logits: Optional[torch.FloatTensor] = None encoder_class_logits: Optional[tuple[torch.FloatTensor]] = None init_reference_points: Optional[torch.FloatTensor] = None intermediate_reference_points: tuple[tuple[torch.FloatTensor]] = None @dataclass @auto_docstring( custom_intro=""" Output type of [`OmDetTurboObjectDetectionOutput`]. """ )
OmDetTurboDecoderOutput
python
tensorflow__tensorflow
tensorflow/python/data/experimental/kernel_tests/map_and_batch_test.py
{ "start": 17134, "end": 20581 }
class ____(checkpoint_test_base.CheckpointTestBase, parameterized.TestCase): @combinations.generate( combinations.times( test_base.default_test_combinations(), checkpoint_test_base.default_test_combinations(), combinations.combine( drop_remainder=[True, False], symbolic_checkpoint=[True, False]))) def test(self, verify_fn, drop_remainder, symbolic_checkpoint): range_size = 11 num_shards = 3 num_repeats = 2 batch_size = 5 num_parallel_calls = 7 total_outputs = (range_size // num_shards) * num_repeats if drop_remainder: num_outputs = total_outputs // batch_size else: num_outputs = int(math.ceil(total_outputs / batch_size)) def build_ds(range_start, drop_remainder=False, symbolic_checkpoint=False): def _map_fn(x): return math_ops.square(x) dataset = dataset_ops.Dataset.range( range_start, range_start + range_size) dataset = dataset.shard(num_shards=num_shards, index=0) dataset = dataset.repeat(num_repeats) dataset = dataset.apply( batching.map_and_batch( map_func=_map_fn, batch_size=batch_size, num_parallel_calls=num_parallel_calls, drop_remainder=drop_remainder)) options = options_lib.Options() options.experimental_symbolic_checkpoint = symbolic_checkpoint return dataset.with_options(options) verify_fn( self, lambda: build_ds( 10, drop_remainder=drop_remainder, symbolic_checkpoint=symbolic_checkpoint), num_outputs) @combinations.generate( combinations.times( test_base.default_test_combinations(), checkpoint_test_base.default_test_combinations(), combinations.combine(drop_remainder=[True, False]))) def testNumParallelBatches(self, verify_fn, drop_remainder): range_size = 11 num_shards = 3 num_repeats = 2 batch_size = 5 num_parallel_batches = 2 total_outputs = (range_size // num_shards) * num_repeats if drop_remainder: num_outputs = total_outputs // batch_size else: num_outputs = int(math.ceil(total_outputs / batch_size)) def build_ds(range_start, drop_remainder): def _map_fn(x): return math_ops.square(x) return dataset_ops.Dataset.range( range_start, range_start + range_size).shard( num_shards=num_shards, index=0).repeat(num_repeats).apply( batching.map_and_batch( map_func=_map_fn, batch_size=batch_size, num_parallel_batches=num_parallel_batches, drop_remainder=drop_remainder)) verify_fn(self, lambda: build_ds(10, drop_remainder=drop_remainder), num_outputs) @combinations.generate( combinations.times(test_base.default_test_combinations(), checkpoint_test_base.default_test_combinations())) def testSparse(self, verify_fn): def build_dataset(): def map_fn(i): return sparse_tensor.SparseTensorValue( indices=[[0]], values=(i * [1]), dense_shape=[1]) return dataset_ops.Dataset.range(10).apply( batching.map_and_batch(map_fn, 5)) verify_fn(self, build_dataset, num_outputs=2) if __name__ == "__main__": test.main()
MapAndBatchCheckpointTest
python
apache__airflow
airflow-core/src/airflow/api_fastapi/core_api/datamodels/ui/grid.py
{ "start": 1226, "end": 1389 }
class ____(BaseModel): """DAG Run model for the Grid UI.""" run_id: str dag_id: str task_instances: list[LightGridTaskInstanceSummary]
GridTISummaries
python
python-openxml__python-docx
tests/parts/test_settings.py
{ "start": 420, "end": 2540 }
class ____: def it_is_used_by_loader_to_construct_settings_part(self, load_, package_, settings_part_): partname, blob = "partname", "blob" content_type = CT.WML_SETTINGS load_.return_value = settings_part_ part = PartFactory(partname, content_type, None, blob, package_) load_.assert_called_once_with(partname, content_type, blob, package_) assert part is settings_part_ def it_provides_access_to_its_settings(self, settings_fixture): settings_part, Settings_, settings_ = settings_fixture settings = settings_part.settings Settings_.assert_called_once_with(settings_part.element) assert settings is settings_ def it_constructs_a_default_settings_part_to_help(self): package = OpcPackage() settings_part = SettingsPart.default(package) assert isinstance(settings_part, SettingsPart) assert settings_part.partname == "/word/settings.xml" assert settings_part.content_type == CT.WML_SETTINGS assert settings_part.package is package assert len(settings_part.element) == 6 # fixtures ------------------------------------------------------- @pytest.fixture def settings_fixture(self, Settings_, settings_): settings_elm = element("w:settings") settings_part = SettingsPart(None, None, settings_elm, None) return settings_part, Settings_, settings_ # fixture components --------------------------------------------- @pytest.fixture def load_(self, request): return method_mock(request, SettingsPart, "load", autospec=False) @pytest.fixture def package_(self, request): return instance_mock(request, Package) @pytest.fixture def Settings_(self, request, settings_): return class_mock(request, "docx.parts.settings.Settings", return_value=settings_) @pytest.fixture def settings_(self, request): return instance_mock(request, Settings) @pytest.fixture def settings_part_(self, request): return instance_mock(request, SettingsPart)
DescribeSettingsPart
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1002292, "end": 1002847 }
class ____(sgqlc.types.Type): """Represents a team repository.""" __schema__ = github_schema __field_names__ = ("cursor", "node", "permission") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") """A cursor for use in pagination.""" node = sgqlc.types.Field(sgqlc.types.non_null("Repository"), graphql_name="node") permission = sgqlc.types.Field(sgqlc.types.non_null(RepositoryPermission), graphql_name="permission") """The permission level the team has on the repository"""
TeamRepositoryEdge
python
sympy__sympy
sympy/assumptions/predicates/order.py
{ "start": 6646, "end": 7211 }
class ____(Predicate): r""" Positive extended real number predicate. Explanation =========== ``Q.extended_positive(x)`` is true iff ``x`` is extended real and `x > 0`, that is if ``x`` is in the interval `(0, \infty]`. Examples ======== >>> from sympy import ask, I, oo, Q >>> ask(Q.extended_positive(1)) True >>> ask(Q.extended_positive(oo)) True >>> ask(Q.extended_positive(I)) False """ name = 'extended_positive' handler = Dispatcher("ExtendedPositiveHandler")
ExtendedPositivePredicate
python
airbytehq__airbyte
airbyte-integrations/connectors/source-genesys/source_genesys/source.py
{ "start": 2659, "end": 2878 }
class ____(GenesysStream): """ API Docs: https://developer.genesys.cloud/telephony/locations-apis """ primary_key = "id" def path(self, **kwargs) -> str: return "locations"
TelephonyLocations
python
tensorflow__tensorflow
tensorflow/python/ops/image_ops_test.py
{ "start": 205222, "end": 208413 }
class ____(test_util.TensorFlowTestCase): # NOTE(b/142795960): parameterized tests do not work well with tf.tensor # inputs. Due to failures, creating another test `testInvalidTensorInput` # which is identical to this one except that the input here is a scalar as # opposed to a tensor. def testInvalidPyInput(self): boxes_np = [[[[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]]] scores_np = [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]] max_output_size_per_class = 5 max_total_size = 2**31 with self.assertRaisesRegex( (TypeError, ValueError), "type int64 that does not match expected type of int32|" "Tensor conversion requested dtype int32 for Tensor with dtype int64"): image_ops.combined_non_max_suppression( boxes=boxes_np, scores=scores_np, max_output_size_per_class=max_output_size_per_class, max_total_size=max_total_size) # NOTE(b/142795960): parameterized tests do not work well with tf.tensor # inputs. Due to failures, creating another this test which is identical to # `testInvalidPyInput` except that the input is a tensor here as opposed # to a scalar. def testInvalidTensorInput(self): boxes_np = [[[[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]]] scores_np = [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]] max_output_size_per_class = 5 max_total_size = ops.convert_to_tensor(2**31) with self.assertRaisesRegex( (TypeError, ValueError), "type int64 that does not match expected type of int32|" "Tensor conversion requested dtype int32 for Tensor with dtype int64"): image_ops.combined_non_max_suppression( boxes=boxes_np, scores=scores_np, max_output_size_per_class=max_output_size_per_class, max_total_size=max_total_size) def testLargeMaxOutputSizePerClass(self): # Ensure the max_output_size_per_class doesn't result in overflows. boxes = [[[ [0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101], ]]] scores = [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]] nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = ( image_ops.combined_non_max_suppression( boxes=boxes, scores=scores, max_output_size_per_class=2**31 - 1, max_total_size=8, pad_per_class=True, clip_boxes=False, ) ) self.assertAllClose( nmsed_boxes, [[ [0, 10, 1, 11], [0, 0, 1, 1], [0, 0.1, 1.0, 1.1], [0, -0.1, 1, 0.9], [0, 10.1, 1, 11.1], [0, 100, 1, 101], [0, 0, 0, 0], [0, 0, 0, 0], ]], ) self.assertAllClose(nmsed_classes, [[3, 0, 1, 2, 4, 5, 0, 0]]) self.assertAllClose( nmsed_scores, [[0.95, 0.9, 0.75, 0.6, 0.5, 0.3, 0.0, 0.0]] ) self.assertAllClose(valid_detections, [6])
CombinedNonMaxSuppressionTest
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/ruff/RUF031.py
{ "start": 756, "end": 908 }
class ____(dict[str, int]): pass # Skip tuples of length one that are single-starred expressions # https://github.com/astral-sh/ruff/issues/16077 d[*x]
Foo
python
getsentry__sentry
src/sentry/hybridcloud/rpc/pagination.py
{ "start": 2428, "end": 3154 }
class ____(RpcModel): ids: list[int] = Field(default_factory=list) hits: int | None = None max_hits: int | None = None next: RpcCursorState = Field(default_factory=lambda: RpcCursorState()) prev: RpcCursorState = Field(default_factory=lambda: RpcCursorState()) @classmethod def from_cursor_result(cls, cursor_result: CursorResult[Any]) -> "RpcPaginationResult": return RpcPaginationResult( ids=[row["id"] for row in cursor_result.results], hits=cursor_result.hits, max_hits=cursor_result.max_hits, next=RpcCursorState.from_cursor(cursor_result.next), prev=RpcCursorState.from_cursor(cursor_result.prev), )
RpcPaginationResult
python
run-llama__llama_index
llama-index-core/llama_index/core/ingestion/pipeline.py
{ "start": 6013, "end": 30444 }
class ____(BaseModel): """ An ingestion pipeline that can be applied to data. Args: name (str, optional): Unique name of the ingestion pipeline. Defaults to DEFAULT_PIPELINE_NAME. project_name (str, optional): Unique name of the project. Defaults to DEFAULT_PROJECT_NAME. transformations (List[TransformComponent], optional): Transformations to apply to the data. Defaults to None. documents (Optional[Sequence[Document]], optional): Documents to ingest. Defaults to None. readers (Optional[List[ReaderConfig]], optional): Reader to use to read the data. Defaults to None. vector_store (Optional[BasePydanticVectorStore], optional): Vector store to use to store the data. Defaults to None. cache (Optional[IngestionCache], optional): Cache to use to store the data. Defaults to None. docstore (Optional[BaseDocumentStore], optional): Document store to use for de-duping with a vector store. Defaults to None. docstore_strategy (DocstoreStrategy, optional): Document de-dup strategy. Defaults to DocstoreStrategy.UPSERTS. disable_cache (bool, optional): Disable the cache. Defaults to False. base_url (str, optional): Base URL for the LlamaCloud API. Defaults to DEFAULT_BASE_URL. app_url (str, optional): Base URL for the LlamaCloud app. Defaults to DEFAULT_APP_URL. api_key (Optional[str], optional): LlamaCloud API key. Defaults to None. Examples: ```python from llama_index.core.ingestion import IngestionPipeline from llama_index.core.node_parser import SentenceSplitter from llama_index.embeddings.openai import OpenAIEmbedding pipeline = IngestionPipeline( transformations=[ SentenceSplitter(chunk_size=512, chunk_overlap=20), OpenAIEmbedding(), ], ) nodes = pipeline.run(documents=documents) ``` """ model_config = ConfigDict(arbitrary_types_allowed=True) name: str = Field( default=DEFAULT_PIPELINE_NAME, description="Unique name of the ingestion pipeline", ) project_name: str = Field( default=DEFAULT_PROJECT_NAME, description="Unique name of the project" ) transformations: List[TransformComponent] = Field( description="Transformations to apply to the data" ) documents: Optional[Sequence[Document]] = Field(description="Documents to ingest") readers: Optional[List[ReaderConfig]] = Field( description="Reader to use to read the data" ) vector_store: Optional[BasePydanticVectorStore] = Field( description="Vector store to use to store the data" ) cache: IngestionCache = Field( default_factory=IngestionCache, description="Cache to use to store the data", ) docstore: Optional[BaseDocumentStore] = Field( default=None, description="Document store to use for de-duping with a vector store.", ) docstore_strategy: DocstoreStrategy = Field( default=DocstoreStrategy.UPSERTS, description="Document de-dup strategy." ) disable_cache: bool = Field(default=False, description="Disable the cache") def __init__( self, name: str = DEFAULT_PIPELINE_NAME, project_name: str = DEFAULT_PROJECT_NAME, transformations: Optional[List[TransformComponent]] = None, readers: Optional[List[ReaderConfig]] = None, documents: Optional[Sequence[Document]] = None, vector_store: Optional[BasePydanticVectorStore] = None, cache: Optional[IngestionCache] = None, docstore: Optional[BaseDocumentStore] = None, docstore_strategy: DocstoreStrategy = DocstoreStrategy.UPSERTS, disable_cache: bool = False, ) -> None: if transformations is None: transformations = self._get_default_transformations() super().__init__( name=name, project_name=project_name, transformations=transformations, readers=readers, documents=documents, vector_store=vector_store, cache=cache or IngestionCache(), docstore=docstore, docstore_strategy=docstore_strategy, disable_cache=disable_cache, ) def persist( self, persist_dir: str = "./pipeline_storage", fs: Optional[AbstractFileSystem] = None, cache_name: str = DEFAULT_CACHE_NAME, docstore_name: str = DOCSTORE_FNAME, ) -> None: """Persist the pipeline to disk.""" if fs is not None: persist_dir = str(persist_dir) # NOTE: doesn't support Windows here docstore_path = concat_dirs(persist_dir, docstore_name) cache_path = concat_dirs(persist_dir, cache_name) else: persist_path = Path(persist_dir) docstore_path = str(persist_path / docstore_name) cache_path = str(persist_path / cache_name) self.cache.persist(cache_path, fs=fs) if self.docstore is not None: self.docstore.persist(docstore_path, fs=fs) def load( self, persist_dir: str = "./pipeline_storage", fs: Optional[AbstractFileSystem] = None, cache_name: str = DEFAULT_CACHE_NAME, docstore_name: str = DOCSTORE_FNAME, ) -> None: """Load the pipeline from disk.""" if fs is not None: self.cache = IngestionCache.from_persist_path( concat_dirs(persist_dir, cache_name), fs=fs ) persist_docstore_path = concat_dirs(persist_dir, docstore_name) if fs.exists(persist_docstore_path): self.docstore = SimpleDocumentStore.from_persist_path( concat_dirs(persist_dir, docstore_name), fs=fs ) else: self.cache = IngestionCache.from_persist_path( str(Path(persist_dir) / cache_name) ) persist_docstore_path = str(Path(persist_dir) / docstore_name) if os.path.exists(persist_docstore_path): self.docstore = SimpleDocumentStore.from_persist_path( str(Path(persist_dir) / docstore_name) ) def _get_default_transformations(self) -> List[TransformComponent]: return [ SentenceSplitter(), Settings.embed_model, ] def _prepare_inputs( self, documents: Optional[Sequence[Document]], nodes: Optional[Sequence[BaseNode]], ) -> Sequence[BaseNode]: input_nodes: Sequence[BaseNode] = [] if documents is not None: input_nodes += documents # type: ignore if nodes is not None: input_nodes += nodes # type: ignore if self.documents is not None: input_nodes += self.documents # type: ignore if self.readers is not None: for reader in self.readers: input_nodes += reader.read() # type: ignore return input_nodes def _handle_duplicates( self, nodes: Sequence[BaseNode], ) -> Sequence[BaseNode]: """Handle docstore duplicates by checking all hashes.""" assert self.docstore is not None existing_hashes = self.docstore.get_all_document_hashes() current_hashes = [] nodes_to_run = [] for node in nodes: if node.hash not in existing_hashes and node.hash not in current_hashes: self.docstore.set_document_hash(node.id_, node.hash) nodes_to_run.append(node) current_hashes.append(node.hash) return nodes_to_run def _handle_upserts( self, nodes: Sequence[BaseNode], ) -> Sequence[BaseNode]: """Handle docstore upserts by checking hashes and ids.""" assert self.docstore is not None doc_ids_from_nodes = set() deduped_nodes_to_run = {} for node in nodes: ref_doc_id = node.ref_doc_id if node.ref_doc_id else node.id_ doc_ids_from_nodes.add(ref_doc_id) existing_hash = self.docstore.get_document_hash(ref_doc_id) if not existing_hash: # document doesn't exist, so add it deduped_nodes_to_run[ref_doc_id] = node elif existing_hash and existing_hash != node.hash: self.docstore.delete_ref_doc(ref_doc_id, raise_error=False) if self.vector_store is not None: self.vector_store.delete(ref_doc_id) deduped_nodes_to_run[ref_doc_id] = node else: continue # document exists and is unchanged, so skip it if self.docstore_strategy == DocstoreStrategy.UPSERTS_AND_DELETE: # Identify missing docs and delete them from docstore and vector store existing_doc_ids_before = set( self.docstore.get_all_document_hashes().values() ) doc_ids_to_delete = existing_doc_ids_before - doc_ids_from_nodes for ref_doc_id in doc_ids_to_delete: self.docstore.delete_document(ref_doc_id) if self.vector_store is not None: self.vector_store.delete(ref_doc_id) return list(deduped_nodes_to_run.values()) @staticmethod def _node_batcher( num_batches: int, nodes: Union[Sequence[BaseNode], List[Document]] ) -> Generator[Union[Sequence[BaseNode], List[Document]], Any, Any]: """Yield successive n-sized chunks from lst.""" batch_size = max(1, int(len(nodes) / num_batches)) for i in range(0, len(nodes), batch_size): yield nodes[i : i + batch_size] def _update_docstore( self, nodes: Sequence[BaseNode], store_doc_text: bool = True ) -> None: """Update the document store with the given nodes.""" assert self.docstore is not None if self.docstore_strategy in ( DocstoreStrategy.UPSERTS, DocstoreStrategy.UPSERTS_AND_DELETE, ): self.docstore.set_document_hashes({n.id_: n.hash for n in nodes}) self.docstore.add_documents(nodes, store_text=store_doc_text) elif self.docstore_strategy == DocstoreStrategy.DUPLICATES_ONLY: self.docstore.add_documents(nodes, store_text=store_doc_text) else: raise ValueError(f"Invalid docstore strategy: {self.docstore_strategy}") @dispatcher.span def run( self, show_progress: bool = False, documents: Optional[List[Document]] = None, nodes: Optional[Sequence[BaseNode]] = None, cache_collection: Optional[str] = None, in_place: bool = True, store_doc_text: bool = True, num_workers: Optional[int] = None, **kwargs: Any, ) -> Sequence[BaseNode]: """ Run a series of transformations on a set of nodes. If a vector store is provided, nodes with embeddings will be added to the vector store. If a vector store + docstore are provided, the docstore will be used to de-duplicate documents. Args: show_progress (bool, optional): Shows execution progress bar(s). Defaults to False. documents (Optional[List[Document]], optional): Set of documents to be transformed. Defaults to None. nodes (Optional[Sequence[BaseNode]], optional): Set of nodes to be transformed. Defaults to None. cache_collection (Optional[str], optional): Cache for transformations. Defaults to None. in_place (bool, optional): Whether transformations creates a new list for transformed nodes or modifies the array passed to `run_transformations`. Defaults to True. num_workers (Optional[int], optional): The number of parallel processes to use. If set to None, then sequential compute is used. Defaults to None. Returns: Sequence[BaseNode]: The set of transformed Nodes/Documents """ input_nodes = self._prepare_inputs(documents, nodes) # check if we need to dedup if self.docstore is not None and self.vector_store is not None: if self.docstore_strategy in ( DocstoreStrategy.UPSERTS, DocstoreStrategy.UPSERTS_AND_DELETE, ): nodes_to_run = self._handle_upserts(input_nodes) elif self.docstore_strategy == DocstoreStrategy.DUPLICATES_ONLY: nodes_to_run = self._handle_duplicates(input_nodes) else: raise ValueError(f"Invalid docstore strategy: {self.docstore_strategy}") elif self.docstore is not None and self.vector_store is None: if self.docstore_strategy == DocstoreStrategy.UPSERTS: print( "Docstore strategy set to upserts, but no vector store. " "Switching to duplicates_only strategy." ) self.docstore_strategy = DocstoreStrategy.DUPLICATES_ONLY elif self.docstore_strategy == DocstoreStrategy.UPSERTS_AND_DELETE: print( "Docstore strategy set to upserts and delete, but no vector store. " "Switching to duplicates_only strategy." ) self.docstore_strategy = DocstoreStrategy.DUPLICATES_ONLY nodes_to_run = self._handle_duplicates(input_nodes) else: nodes_to_run = input_nodes if num_workers and num_workers > 1: num_cpus = multiprocessing.cpu_count() if num_workers > num_cpus: warnings.warn( "Specified num_workers exceed number of CPUs in the system. " "Setting `num_workers` down to the maximum CPU count." ) num_workers = num_cpus with multiprocessing.get_context("spawn").Pool(num_workers) as p: node_batches = self._node_batcher( num_batches=num_workers, nodes=nodes_to_run ) nodes_parallel = p.starmap( run_transformations, zip( node_batches, repeat(self.transformations), repeat(in_place), repeat(self.cache if not self.disable_cache else None), repeat(cache_collection), ), ) nodes = reduce(lambda x, y: x + y, nodes_parallel, []) # type: ignore else: nodes = run_transformations( nodes_to_run, self.transformations, show_progress=show_progress, cache=self.cache if not self.disable_cache else None, cache_collection=cache_collection, in_place=in_place, **kwargs, ) nodes = nodes or [] if self.vector_store is not None: nodes_with_embeddings = [n for n in nodes if n.embedding is not None] if nodes_with_embeddings: self.vector_store.add(nodes_with_embeddings) if self.docstore is not None: self._update_docstore(nodes_to_run, store_doc_text=store_doc_text) return nodes # ------ async methods ------ async def _aupdate_docstore( self, nodes: Sequence[BaseNode], store_doc_text: bool = True ) -> None: """Update the document store with the given nodes.""" assert self.docstore is not None if self.docstore_strategy in ( DocstoreStrategy.UPSERTS, DocstoreStrategy.UPSERTS_AND_DELETE, ): await self.docstore.aset_document_hashes({n.id_: n.hash for n in nodes}) await self.docstore.async_add_documents(nodes, store_text=store_doc_text) elif self.docstore_strategy == DocstoreStrategy.DUPLICATES_ONLY: await self.docstore.async_add_documents(nodes, store_text=store_doc_text) else: raise ValueError(f"Invalid docstore strategy: {self.docstore_strategy}") async def _ahandle_duplicates( self, nodes: Sequence[BaseNode], store_doc_text: bool = True, ) -> Sequence[BaseNode]: """Handle docstore duplicates by checking all hashes.""" assert self.docstore is not None existing_hashes = await self.docstore.aget_all_document_hashes() current_hashes = [] nodes_to_run = [] for node in nodes: if node.hash not in existing_hashes and node.hash not in current_hashes: await self.docstore.aset_document_hash(node.id_, node.hash) nodes_to_run.append(node) current_hashes.append(node.hash) return nodes_to_run async def _ahandle_upserts( self, nodes: Sequence[BaseNode], store_doc_text: bool = True, ) -> Sequence[BaseNode]: """Handle docstore upserts by checking hashes and ids.""" assert self.docstore is not None doc_ids_from_nodes = set() deduped_nodes_to_run = {} for node in nodes: ref_doc_id = node.ref_doc_id if node.ref_doc_id else node.id_ doc_ids_from_nodes.add(ref_doc_id) existing_hash = await self.docstore.aget_document_hash(ref_doc_id) if not existing_hash: # document doesn't exist, so add it deduped_nodes_to_run[ref_doc_id] = node elif existing_hash and existing_hash != node.hash: await self.docstore.adelete_ref_doc(ref_doc_id, raise_error=False) if self.vector_store is not None: await self.vector_store.adelete(ref_doc_id) deduped_nodes_to_run[ref_doc_id] = node else: continue # document exists and is unchanged, so skip it if self.docstore_strategy == DocstoreStrategy.UPSERTS_AND_DELETE: # Identify missing docs and delete them from docstore and vector store existing_doc_ids_before = set( (await self.docstore.aget_all_document_hashes()).values() ) doc_ids_to_delete = existing_doc_ids_before - doc_ids_from_nodes for ref_doc_id in doc_ids_to_delete: await self.docstore.adelete_document(ref_doc_id) if self.vector_store is not None: await self.vector_store.adelete(ref_doc_id) return list(deduped_nodes_to_run.values()) @dispatcher.span async def arun( self, show_progress: bool = False, documents: Optional[List[Document]] = None, nodes: Optional[Sequence[BaseNode]] = None, cache_collection: Optional[str] = None, in_place: bool = True, store_doc_text: bool = True, num_workers: Optional[int] = None, **kwargs: Any, ) -> Sequence[BaseNode]: """ Run a series of transformations on a set of nodes. If a vector store is provided, nodes with embeddings will be added to the vector store. If a vector store + docstore are provided, the docstore will be used to de-duplicate documents. Args: show_progress (bool, optional): Shows execution progress bar(s). Defaults to False. documents (Optional[List[Document]], optional): Set of documents to be transformed. Defaults to None. nodes (Optional[Sequence[BaseNode]], optional): Set of nodes to be transformed. Defaults to None. cache_collection (Optional[str], optional): Cache for transformations. Defaults to None. in_place (bool, optional): Whether transformations creates a new list for transformed nodes or modifies the array passed to `run_transformations`. Defaults to True. num_workers (Optional[int], optional): The number of parallel processes to use. If set to None, then sequential compute is used. Defaults to None. Returns: Sequence[BaseNode]: The set of transformed Nodes/Documents """ input_nodes = self._prepare_inputs(documents, nodes) # check if we need to dedup if self.docstore is not None and self.vector_store is not None: if self.docstore_strategy in ( DocstoreStrategy.UPSERTS, DocstoreStrategy.UPSERTS_AND_DELETE, ): nodes_to_run = await self._ahandle_upserts( input_nodes, store_doc_text=store_doc_text ) elif self.docstore_strategy == DocstoreStrategy.DUPLICATES_ONLY: nodes_to_run = await self._ahandle_duplicates( input_nodes, store_doc_text=store_doc_text ) else: raise ValueError(f"Invalid docstore strategy: {self.docstore_strategy}") elif self.docstore is not None and self.vector_store is None: if self.docstore_strategy == DocstoreStrategy.UPSERTS: print( "Docstore strategy set to upserts, but no vector store. " "Switching to duplicates_only strategy." ) self.docstore_strategy = DocstoreStrategy.DUPLICATES_ONLY elif self.docstore_strategy == DocstoreStrategy.UPSERTS_AND_DELETE: print( "Docstore strategy set to upserts and delete, but no vector store. " "Switching to duplicates_only strategy." ) self.docstore_strategy = DocstoreStrategy.DUPLICATES_ONLY nodes_to_run = await self._ahandle_duplicates( input_nodes, store_doc_text=store_doc_text ) else: nodes_to_run = input_nodes if num_workers and num_workers > 1: num_cpus = multiprocessing.cpu_count() if num_workers > num_cpus: warnings.warn( "Specified num_workers exceed number of CPUs in the system. " "Setting `num_workers` down to the maximum CPU count." ) num_workers = num_cpus loop = asyncio.get_event_loop() with ProcessPoolExecutor(max_workers=num_workers) as p: node_batches = self._node_batcher( num_batches=num_workers, nodes=nodes_to_run ) tasks = [ loop.run_in_executor( p, partial( arun_transformations_wrapper, transformations=self.transformations, in_place=in_place, cache=self.cache if not self.disable_cache else None, cache_collection=cache_collection, ), batch, ) for batch in node_batches ] result: Sequence[Sequence[BaseNode]] = await asyncio.gather(*tasks) nodes: Sequence[BaseNode] = reduce(lambda x, y: x + y, result, []) # type: ignore else: nodes = await arun_transformations( # type: ignore nodes_to_run, self.transformations, show_progress=show_progress, cache=self.cache if not self.disable_cache else None, cache_collection=cache_collection, in_place=in_place, **kwargs, ) nodes = nodes nodes = nodes or [] if self.vector_store is not None: nodes_with_embeddings = [n for n in nodes if n.embedding is not None] if nodes_with_embeddings: await self.vector_store.async_add(nodes_with_embeddings) if self.docstore is not None: await self._aupdate_docstore(nodes_to_run, store_doc_text=store_doc_text) return nodes
IngestionPipeline
python
sphinx-doc__sphinx
sphinx/domains/cpp/_ast.py
{ "start": 43795, "end": 45593 }
class ____(ASTExpression): def __init__(self, exprs: list[ASTExpression], ops: list[str]) -> None: assert len(exprs) > 0 assert len(exprs) == len(ops) + 1 self.exprs = exprs self.ops = ops def __eq__(self, other: object) -> bool: if not isinstance(other, ASTBinOpExpr): return NotImplemented return self.exprs == other.exprs and self.ops == other.ops def __hash__(self) -> int: return hash((self.exprs, self.ops)) def _stringify(self, transform: StringifyTransform) -> str: res = [transform(self.exprs[0])] for i in range(1, len(self.exprs)): res.extend(( ' ', self.ops[i - 1], ' ', transform(self.exprs[i]), )) return ''.join(res) def get_id(self, version: int) -> str: assert version >= 2 res: list[str] = [] for i in range(len(self.ops)): res.extend((_id_operator_v2[self.ops[i]], self.exprs[i].get_id(version))) res.append(self.exprs[-1].get_id(version)) return ''.join(res) def describe_signature( self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol ) -> None: self.exprs[0].describe_signature(signode, mode, env, symbol) for i in range(1, len(self.exprs)): signode += addnodes.desc_sig_space() op = self.ops[i - 1] if ord(op[0]) >= ord('a') and ord(op[0]) <= ord('z'): signode += addnodes.desc_sig_keyword(op, op) else: signode += addnodes.desc_sig_operator(op, op) signode += addnodes.desc_sig_space() self.exprs[i].describe_signature(signode, mode, env, symbol)
ASTBinOpExpr
python
huggingface__transformers
tests/models/musicgen/test_modeling_musicgen.py
{ "start": 52162, "end": 55393 }
class ____(unittest.TestCase): @cached_property def model(self): return MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-stereo-small").to(torch_device) @cached_property def processor(self): return MusicgenProcessor.from_pretrained("facebook/musicgen-stereo-small") @slow def test_generate_unconditional_greedy(self): model = self.model # only generate 1 sample with greedy - since it's deterministic all elements of the batch will be the same unconditional_inputs = model.get_unconditional_inputs(num_samples=1) unconditional_inputs = place_dict_on_device(unconditional_inputs, device=torch_device) output_values = model.generate(**unconditional_inputs, do_sample=False, max_new_tokens=12) # fmt: off EXPECTED_VALUES_LEFT = torch.tensor( [ 0.0017, 0.0004, 0.0004, 0.0005, 0.0002, 0.0002, -0.0002, -0.0013, -0.0010, -0.0015, -0.0018, -0.0032, -0.0060, -0.0082, -0.0096, -0.0099, ] ) EXPECTED_VALUES_RIGHT = torch.tensor( [ 0.0038, 0.0028, 0.0031, 0.0032, 0.0031, 0.0032, 0.0030, 0.0019, 0.0021, 0.0015, 0.0009, -0.0008, -0.0040, -0.0067, -0.0087, -0.0096, ] ) # fmt: on # (bsz, channels, seq_len) self.assertTrue(output_values.shape == (1, 2, 5760)) torch.testing.assert_close(output_values[0, 0, :16].cpu(), EXPECTED_VALUES_LEFT, rtol=1e-4, atol=1e-4) torch.testing.assert_close(output_values[0, 1, :16].cpu(), EXPECTED_VALUES_RIGHT, rtol=1e-4, atol=1e-4) @slow def test_generate_text_audio_prompt(self): model = self.model processor = self.processor # create stereo inputs audio = [get_bip_bip(duration=0.5)[None, :].repeat(2, 0), get_bip_bip(duration=1.0)[None, :].repeat(2, 0)] text = ["80s music", "Club techno"] inputs = processor(audio=audio, text=text, padding=True, return_tensors="pt") inputs = place_dict_on_device(inputs, device=torch_device) output_values = model.generate(**inputs, do_sample=False, guidance_scale=3.0, max_new_tokens=12) # fmt: off EXPECTED_VALUES_LEFT = torch.tensor( [ 0.2535, 0.2008, 0.1471, 0.0896, 0.0306, -0.0200, -0.0501, -0.0728, -0.0832, -0.0856, -0.0867, -0.0884, -0.0864, -0.0866, -0.0744, -0.0430, ] ) EXPECTED_VALUES_RIGHT = torch.tensor( [ 0.1695, 0.1213, 0.0732, 0.0239, -0.0264, -0.0705, -0.0935, -0.1103, -0.1163, -0.1139, -0.1104, -0.1082, -0.1027, -0.1004, -0.0900, -0.0614, ] ) # fmt: on # (bsz, channels, seq_len) self.assertTrue(output_values.shape == (2, 2, 37760)) # input values take shape 32000 and we generate from there - we check the last (generated) values torch.testing.assert_close(output_values[0, 0, -16:].cpu(), EXPECTED_VALUES_LEFT, rtol=2e-4, atol=2e-4) torch.testing.assert_close(output_values[0, 1, -16:].cpu(), EXPECTED_VALUES_RIGHT, rtol=2e-4, atol=2e-4)
MusicgenStereoIntegrationTests
python
pytest-dev__pytest
testing/test_doctest.py
{ "start": 48698, "end": 50931 }
class ____: def __getattr__(self, _): raise KeyError("This should be an AttributeError") @pytest.mark.parametrize( # pragma: no branch (lambdas are not called) "stop", [ None, pytest.param(_is_mocked, id="is_mocked"), pytest.param(lambda f: None, id="lambda_none"), pytest.param(lambda f: False, id="lambda_false"), pytest.param(lambda f: True, id="lambda_true"), ], ) def test_warning_on_unwrap_of_broken_object( stop: Callable[[object], object] | None, ) -> None: bad_instance = Broken() assert inspect.unwrap.__module__ == "inspect" with _patch_unwrap_mock_aware(): assert inspect.unwrap.__module__ != "inspect" with pytest.warns( pytest.PytestWarning, match="^Got KeyError.* when unwrapping" ): with pytest.raises(KeyError): inspect.unwrap(bad_instance, stop=stop) # type: ignore[arg-type] assert inspect.unwrap.__module__ == "inspect" def test_is_setup_py_not_named_setup_py(tmp_path: Path) -> None: not_setup_py = tmp_path.joinpath("not_setup.py") not_setup_py.write_text( 'from setuptools import setup; setup(name="foo")', encoding="utf-8" ) assert not _is_setup_py(not_setup_py) @pytest.mark.parametrize("mod", ("setuptools", "distutils.core")) def test_is_setup_py_is_a_setup_py(tmp_path: Path, mod: str) -> None: setup_py = tmp_path.joinpath("setup.py") setup_py.write_text(f'from {mod} import setup; setup(name="foo")', "utf-8") assert _is_setup_py(setup_py) @pytest.mark.parametrize("mod", ("setuptools", "distutils.core")) def test_is_setup_py_different_encoding(tmp_path: Path, mod: str) -> None: setup_py = tmp_path.joinpath("setup.py") contents = ( "# -*- coding: cp1252 -*-\n" f'from {mod} import setup; setup(name="foo", description="€")\n' ) setup_py.write_bytes(contents.encode("cp1252")) assert _is_setup_py(setup_py) @pytest.mark.parametrize( "name, expected", [("__main__.py", True), ("__init__.py", False)] ) def test_is_main_py(tmp_path: Path, name: str, expected: bool) -> None: dunder_main = tmp_path.joinpath(name) assert _is_main_py(dunder_main) == expected
Broken